Lines Matching full:adapter
97 static void send_query_map(struct ibmvnic_adapter *adapter);
100 static int send_login(struct ibmvnic_adapter *adapter);
101 static void send_query_cap(struct ibmvnic_adapter *adapter);
103 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int init_crq_queue(struct ibmvnic_adapter *adapter);
108 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
109 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
146 static int send_crq_init_complete(struct ibmvnic_adapter *adapter) in send_crq_init_complete() argument
154 return ibmvnic_send_crq(adapter, &crq); in send_crq_init_complete()
157 static int send_version_xchg(struct ibmvnic_adapter *adapter) in send_version_xchg() argument
166 return ibmvnic_send_crq(adapter, &crq); in send_version_xchg()
185 * @adapter: private device data
192 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, in ibmvnic_wait_for_completion() argument
200 netdev = adapter->netdev; in ibmvnic_wait_for_completion()
204 if (!adapter->crq.active) { in ibmvnic_wait_for_completion()
217 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, in alloc_long_term_buff() argument
220 struct device *dev = &adapter->vdev->dev; in alloc_long_term_buff()
231 ltb->map_id = adapter->map_id; in alloc_long_term_buff()
232 adapter->map_id++; in alloc_long_term_buff()
234 mutex_lock(&adapter->fw_lock); in alloc_long_term_buff()
235 adapter->fw_done_rc = 0; in alloc_long_term_buff()
236 reinit_completion(&adapter->fw_done); in alloc_long_term_buff()
238 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); in alloc_long_term_buff()
244 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in alloc_long_term_buff()
252 if (adapter->fw_done_rc) { in alloc_long_term_buff()
254 adapter->fw_done_rc); in alloc_long_term_buff()
264 mutex_unlock(&adapter->fw_lock); in alloc_long_term_buff()
268 static void free_long_term_buff(struct ibmvnic_adapter *adapter, in free_long_term_buff() argument
271 struct device *dev = &adapter->vdev->dev; in free_long_term_buff()
280 if (adapter->reset_reason != VNIC_RESET_FAILOVER && in free_long_term_buff()
281 adapter->reset_reason != VNIC_RESET_MOBILITY && in free_long_term_buff()
282 adapter->reset_reason != VNIC_RESET_TIMEOUT) in free_long_term_buff()
283 send_request_unmap(adapter, ltb->map_id); in free_long_term_buff()
289 static int reset_long_term_buff(struct ibmvnic_adapter *adapter, in reset_long_term_buff() argument
292 struct device *dev = &adapter->vdev->dev; in reset_long_term_buff()
297 mutex_lock(&adapter->fw_lock); in reset_long_term_buff()
298 adapter->fw_done_rc = 0; in reset_long_term_buff()
300 reinit_completion(&adapter->fw_done); in reset_long_term_buff()
301 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); in reset_long_term_buff()
303 mutex_unlock(&adapter->fw_lock); in reset_long_term_buff()
307 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in reset_long_term_buff()
311 mutex_unlock(&adapter->fw_lock); in reset_long_term_buff()
315 if (adapter->fw_done_rc) { in reset_long_term_buff()
318 free_long_term_buff(adapter, ltb); in reset_long_term_buff()
319 mutex_unlock(&adapter->fw_lock); in reset_long_term_buff()
320 return alloc_long_term_buff(adapter, ltb, ltb->size); in reset_long_term_buff()
322 mutex_unlock(&adapter->fw_lock); in reset_long_term_buff()
326 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) in deactivate_rx_pools() argument
330 for (i = 0; i < adapter->num_active_rx_pools; i++) in deactivate_rx_pools()
331 adapter->rx_pool[i].active = 0; in deactivate_rx_pools()
334 static void replenish_rx_pool(struct ibmvnic_adapter *adapter, in replenish_rx_pool() argument
338 u64 handle = adapter->rx_scrq[pool->index]->handle; in replenish_rx_pool()
339 struct device *dev = &adapter->vdev->dev; in replenish_rx_pool()
356 rx_scrq = adapter->rx_scrq[pool->index]; in replenish_rx_pool()
366 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); in replenish_rx_pool()
369 adapter->replenish_no_mem++; in replenish_rx_pool()
412 send_subcrq_indirect(adapter, handle, in replenish_rx_pool()
418 adapter->replenish_add_buff_success += ind_bufp->index; in replenish_rx_pool()
441 adapter->replenish_add_buff_failure += ind_bufp->index; in replenish_rx_pool()
444 if (lpar_rc == H_CLOSED || adapter->failover_pending) { in replenish_rx_pool()
450 deactivate_rx_pools(adapter); in replenish_rx_pool()
451 netif_carrier_off(adapter->netdev); in replenish_rx_pool()
455 static void replenish_pools(struct ibmvnic_adapter *adapter) in replenish_pools() argument
459 adapter->replenish_task_cycles++; in replenish_pools()
460 for (i = 0; i < adapter->num_active_rx_pools; i++) { in replenish_pools()
461 if (adapter->rx_pool[i].active) in replenish_pools()
462 replenish_rx_pool(adapter, &adapter->rx_pool[i]); in replenish_pools()
465 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); in replenish_pools()
468 static void release_stats_buffers(struct ibmvnic_adapter *adapter) in release_stats_buffers() argument
470 kfree(adapter->tx_stats_buffers); in release_stats_buffers()
471 kfree(adapter->rx_stats_buffers); in release_stats_buffers()
472 adapter->tx_stats_buffers = NULL; in release_stats_buffers()
473 adapter->rx_stats_buffers = NULL; in release_stats_buffers()
476 static int init_stats_buffers(struct ibmvnic_adapter *adapter) in init_stats_buffers() argument
478 adapter->tx_stats_buffers = in init_stats_buffers()
482 if (!adapter->tx_stats_buffers) in init_stats_buffers()
485 adapter->rx_stats_buffers = in init_stats_buffers()
489 if (!adapter->rx_stats_buffers) in init_stats_buffers()
495 static void release_stats_token(struct ibmvnic_adapter *adapter) in release_stats_token() argument
497 struct device *dev = &adapter->vdev->dev; in release_stats_token()
499 if (!adapter->stats_token) in release_stats_token()
502 dma_unmap_single(dev, adapter->stats_token, in release_stats_token()
505 adapter->stats_token = 0; in release_stats_token()
508 static int init_stats_token(struct ibmvnic_adapter *adapter) in init_stats_token() argument
510 struct device *dev = &adapter->vdev->dev; in init_stats_token()
513 stok = dma_map_single(dev, &adapter->stats, in init_stats_token()
521 adapter->stats_token = stok; in init_stats_token()
522 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); in init_stats_token()
526 static int reset_rx_pools(struct ibmvnic_adapter *adapter) in reset_rx_pools() argument
533 if (!adapter->rx_pool) in reset_rx_pools()
536 buff_size = adapter->cur_rx_buf_sz; in reset_rx_pools()
537 rx_scrqs = adapter->num_active_rx_pools; in reset_rx_pools()
539 rx_pool = &adapter->rx_pool[i]; in reset_rx_pools()
541 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); in reset_rx_pools()
544 free_long_term_buff(adapter, &rx_pool->long_term_buff); in reset_rx_pools()
546 rc = alloc_long_term_buff(adapter, in reset_rx_pools()
551 rc = reset_long_term_buff(adapter, in reset_rx_pools()
573 static void release_rx_pools(struct ibmvnic_adapter *adapter) in release_rx_pools() argument
578 if (!adapter->rx_pool) in release_rx_pools()
581 for (i = 0; i < adapter->num_active_rx_pools; i++) { in release_rx_pools()
582 rx_pool = &adapter->rx_pool[i]; in release_rx_pools()
584 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); in release_rx_pools()
587 free_long_term_buff(adapter, &rx_pool->long_term_buff); in release_rx_pools()
602 kfree(adapter->rx_pool); in release_rx_pools()
603 adapter->rx_pool = NULL; in release_rx_pools()
604 adapter->num_active_rx_pools = 0; in release_rx_pools()
609 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in init_rx_pools() local
610 struct device *dev = &adapter->vdev->dev; in init_rx_pools()
616 rxadd_subcrqs = adapter->num_active_rx_scrqs; in init_rx_pools()
617 buff_size = adapter->cur_rx_buf_sz; in init_rx_pools()
619 adapter->rx_pool = kcalloc(rxadd_subcrqs, in init_rx_pools()
622 if (!adapter->rx_pool) { in init_rx_pools()
627 adapter->num_active_rx_pools = rxadd_subcrqs; in init_rx_pools()
630 rx_pool = &adapter->rx_pool[i]; in init_rx_pools()
632 netdev_dbg(adapter->netdev, in init_rx_pools()
634 i, adapter->req_rx_add_entries_per_subcrq, in init_rx_pools()
637 rx_pool->size = adapter->req_rx_add_entries_per_subcrq; in init_rx_pools()
645 release_rx_pools(adapter); in init_rx_pools()
654 release_rx_pools(adapter); in init_rx_pools()
658 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, in init_rx_pools()
660 release_rx_pools(adapter); in init_rx_pools()
675 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, in reset_one_tx_pool() argument
680 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); in reset_one_tx_pool()
697 static int reset_tx_pools(struct ibmvnic_adapter *adapter) in reset_tx_pools() argument
702 if (!adapter->tx_pool) in reset_tx_pools()
705 tx_scrqs = adapter->num_active_tx_pools; in reset_tx_pools()
707 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); in reset_tx_pools()
708 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); in reset_tx_pools()
711 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); in reset_tx_pools()
719 static void release_vpd_data(struct ibmvnic_adapter *adapter) in release_vpd_data() argument
721 if (!adapter->vpd) in release_vpd_data()
724 kfree(adapter->vpd->buff); in release_vpd_data()
725 kfree(adapter->vpd); in release_vpd_data()
727 adapter->vpd = NULL; in release_vpd_data()
730 static void release_one_tx_pool(struct ibmvnic_adapter *adapter, in release_one_tx_pool() argument
735 free_long_term_buff(adapter, &tx_pool->long_term_buff); in release_one_tx_pool()
738 static void release_tx_pools(struct ibmvnic_adapter *adapter) in release_tx_pools() argument
742 if (!adapter->tx_pool) in release_tx_pools()
745 for (i = 0; i < adapter->num_active_tx_pools; i++) { in release_tx_pools()
746 release_one_tx_pool(adapter, &adapter->tx_pool[i]); in release_tx_pools()
747 release_one_tx_pool(adapter, &adapter->tso_pool[i]); in release_tx_pools()
750 kfree(adapter->tx_pool); in release_tx_pools()
751 adapter->tx_pool = NULL; in release_tx_pools()
752 kfree(adapter->tso_pool); in release_tx_pools()
753 adapter->tso_pool = NULL; in release_tx_pools()
754 adapter->num_active_tx_pools = 0; in release_tx_pools()
761 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in init_one_tx_pool() local
770 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, in init_one_tx_pool()
791 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in init_tx_pools() local
796 tx_subcrqs = adapter->num_active_tx_scrqs; in init_tx_pools()
797 adapter->tx_pool = kcalloc(tx_subcrqs, in init_tx_pools()
799 if (!adapter->tx_pool) in init_tx_pools()
802 adapter->tso_pool = kcalloc(tx_subcrqs, in init_tx_pools()
804 if (!adapter->tso_pool) { in init_tx_pools()
805 kfree(adapter->tx_pool); in init_tx_pools()
806 adapter->tx_pool = NULL; in init_tx_pools()
810 adapter->num_active_tx_pools = tx_subcrqs; in init_tx_pools()
813 buff_size = adapter->req_mtu + VLAN_HLEN; in init_tx_pools()
815 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], in init_tx_pools()
816 adapter->req_tx_entries_per_subcrq, in init_tx_pools()
819 release_tx_pools(adapter); in init_tx_pools()
823 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], in init_tx_pools()
827 release_tx_pools(adapter); in init_tx_pools()
835 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) in ibmvnic_napi_enable() argument
839 if (adapter->napi_enabled) in ibmvnic_napi_enable()
842 for (i = 0; i < adapter->req_rx_queues; i++) in ibmvnic_napi_enable()
843 napi_enable(&adapter->napi[i]); in ibmvnic_napi_enable()
845 adapter->napi_enabled = true; in ibmvnic_napi_enable()
848 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) in ibmvnic_napi_disable() argument
852 if (!adapter->napi_enabled) in ibmvnic_napi_disable()
855 for (i = 0; i < adapter->req_rx_queues; i++) { in ibmvnic_napi_disable()
856 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); in ibmvnic_napi_disable()
857 napi_disable(&adapter->napi[i]); in ibmvnic_napi_disable()
860 adapter->napi_enabled = false; in ibmvnic_napi_disable()
863 static int init_napi(struct ibmvnic_adapter *adapter) in init_napi() argument
867 adapter->napi = kcalloc(adapter->req_rx_queues, in init_napi()
869 if (!adapter->napi) in init_napi()
872 for (i = 0; i < adapter->req_rx_queues; i++) { in init_napi()
873 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); in init_napi()
874 netif_napi_add(adapter->netdev, &adapter->napi[i], in init_napi()
878 adapter->num_active_rx_napi = adapter->req_rx_queues; in init_napi()
882 static void release_napi(struct ibmvnic_adapter *adapter) in release_napi() argument
886 if (!adapter->napi) in release_napi()
889 for (i = 0; i < adapter->num_active_rx_napi; i++) { in release_napi()
890 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); in release_napi()
891 netif_napi_del(&adapter->napi[i]); in release_napi()
894 kfree(adapter->napi); in release_napi()
895 adapter->napi = NULL; in release_napi()
896 adapter->num_active_rx_napi = 0; in release_napi()
897 adapter->napi_enabled = false; in release_napi()
927 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_login() local
941 adapter->init_done_rc = 0; in ibmvnic_login()
942 reinit_completion(&adapter->init_done); in ibmvnic_login()
943 rc = send_login(adapter); in ibmvnic_login()
947 if (!wait_for_completion_timeout(&adapter->init_done, in ibmvnic_login()
951 adapter->init_done_rc = 0; in ibmvnic_login()
956 if (adapter->init_done_rc == ABORTED) { in ibmvnic_login()
959 adapter->init_done_rc = 0; in ibmvnic_login()
965 } else if (adapter->init_done_rc == PARTIALSUCCESS) { in ibmvnic_login()
967 release_sub_crqs(adapter, 1); in ibmvnic_login()
972 adapter->init_done_rc = 0; in ibmvnic_login()
973 reinit_completion(&adapter->init_done); in ibmvnic_login()
974 send_query_cap(adapter); in ibmvnic_login()
975 if (!wait_for_completion_timeout(&adapter->init_done, in ibmvnic_login()
982 rc = init_sub_crqs(adapter); in ibmvnic_login()
989 rc = init_sub_crq_irqs(adapter); in ibmvnic_login()
995 } else if (adapter->init_done_rc) { in ibmvnic_login()
996 netdev_warn(netdev, "Adapter login failed\n"); in ibmvnic_login()
1001 __ibmvnic_set_mac(netdev, adapter->mac_addr); in ibmvnic_login()
1003 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); in ibmvnic_login()
1007 static void release_login_buffer(struct ibmvnic_adapter *adapter) in release_login_buffer() argument
1009 kfree(adapter->login_buf); in release_login_buffer()
1010 adapter->login_buf = NULL; in release_login_buffer()
1013 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) in release_login_rsp_buffer() argument
1015 kfree(adapter->login_rsp_buf); in release_login_rsp_buffer()
1016 adapter->login_rsp_buf = NULL; in release_login_rsp_buffer()
1019 static void release_resources(struct ibmvnic_adapter *adapter) in release_resources() argument
1021 release_vpd_data(adapter); in release_resources()
1023 release_tx_pools(adapter); in release_resources()
1024 release_rx_pools(adapter); in release_resources()
1026 release_napi(adapter); in release_resources()
1027 release_login_buffer(adapter); in release_resources()
1028 release_login_rsp_buffer(adapter); in release_resources()
1031 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) in set_link_state() argument
1033 struct net_device *netdev = adapter->netdev; in set_link_state()
1049 reinit_completion(&adapter->init_done); in set_link_state()
1050 rc = ibmvnic_send_crq(adapter, &crq); in set_link_state()
1056 if (!wait_for_completion_timeout(&adapter->init_done, in set_link_state()
1062 if (adapter->init_done_rc == PARTIALSUCCESS) { in set_link_state()
1066 } else if (adapter->init_done_rc) { in set_link_state()
1068 adapter->init_done_rc); in set_link_state()
1069 return adapter->init_done_rc; in set_link_state()
1078 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in set_real_num_queues() local
1082 adapter->req_tx_queues, adapter->req_rx_queues); in set_real_num_queues()
1084 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); in set_real_num_queues()
1090 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); in set_real_num_queues()
1097 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) in ibmvnic_get_vpd() argument
1099 struct device *dev = &adapter->vdev->dev; in ibmvnic_get_vpd()
1104 if (adapter->vpd->buff) in ibmvnic_get_vpd()
1105 len = adapter->vpd->len; in ibmvnic_get_vpd()
1107 mutex_lock(&adapter->fw_lock); in ibmvnic_get_vpd()
1108 adapter->fw_done_rc = 0; in ibmvnic_get_vpd()
1109 reinit_completion(&adapter->fw_done); in ibmvnic_get_vpd()
1113 rc = ibmvnic_send_crq(adapter, &crq); in ibmvnic_get_vpd()
1115 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1119 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in ibmvnic_get_vpd()
1122 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1125 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1127 if (!adapter->vpd->len) in ibmvnic_get_vpd()
1130 if (!adapter->vpd->buff) in ibmvnic_get_vpd()
1131 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); in ibmvnic_get_vpd()
1132 else if (adapter->vpd->len != len) in ibmvnic_get_vpd()
1133 adapter->vpd->buff = in ibmvnic_get_vpd()
1134 krealloc(adapter->vpd->buff, in ibmvnic_get_vpd()
1135 adapter->vpd->len, GFP_KERNEL); in ibmvnic_get_vpd()
1137 if (!adapter->vpd->buff) { in ibmvnic_get_vpd()
1142 adapter->vpd->dma_addr = in ibmvnic_get_vpd()
1143 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, in ibmvnic_get_vpd()
1145 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { in ibmvnic_get_vpd()
1147 kfree(adapter->vpd->buff); in ibmvnic_get_vpd()
1148 adapter->vpd->buff = NULL; in ibmvnic_get_vpd()
1152 mutex_lock(&adapter->fw_lock); in ibmvnic_get_vpd()
1153 adapter->fw_done_rc = 0; in ibmvnic_get_vpd()
1154 reinit_completion(&adapter->fw_done); in ibmvnic_get_vpd()
1158 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); in ibmvnic_get_vpd()
1159 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); in ibmvnic_get_vpd()
1160 rc = ibmvnic_send_crq(adapter, &crq); in ibmvnic_get_vpd()
1162 kfree(adapter->vpd->buff); in ibmvnic_get_vpd()
1163 adapter->vpd->buff = NULL; in ibmvnic_get_vpd()
1164 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1168 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in ibmvnic_get_vpd()
1171 kfree(adapter->vpd->buff); in ibmvnic_get_vpd()
1172 adapter->vpd->buff = NULL; in ibmvnic_get_vpd()
1173 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1177 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1181 static int init_resources(struct ibmvnic_adapter *adapter) in init_resources() argument
1183 struct net_device *netdev = adapter->netdev; in init_resources()
1190 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); in init_resources()
1191 if (!adapter->vpd) in init_resources()
1195 rc = ibmvnic_get_vpd(adapter); in init_resources()
1201 adapter->map_id = 1; in init_resources()
1203 rc = init_napi(adapter); in init_resources()
1207 send_query_map(adapter); in init_resources()
1219 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in __ibmvnic_open() local
1220 enum vnic_state prev_state = adapter->state; in __ibmvnic_open()
1223 adapter->state = VNIC_OPENING; in __ibmvnic_open()
1224 replenish_pools(adapter); in __ibmvnic_open()
1225 ibmvnic_napi_enable(adapter); in __ibmvnic_open()
1230 for (i = 0; i < adapter->req_rx_queues; i++) { in __ibmvnic_open()
1233 enable_irq(adapter->rx_scrq[i]->irq); in __ibmvnic_open()
1234 enable_scrq_irq(adapter, adapter->rx_scrq[i]); in __ibmvnic_open()
1237 for (i = 0; i < adapter->req_tx_queues; i++) { in __ibmvnic_open()
1240 enable_irq(adapter->tx_scrq[i]->irq); in __ibmvnic_open()
1241 enable_scrq_irq(adapter, adapter->tx_scrq[i]); in __ibmvnic_open()
1245 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); in __ibmvnic_open()
1247 ibmvnic_napi_disable(adapter); in __ibmvnic_open()
1248 release_resources(adapter); in __ibmvnic_open()
1255 for (i = 0; i < adapter->req_rx_queues; i++) in __ibmvnic_open()
1256 napi_schedule(&adapter->napi[i]); in __ibmvnic_open()
1259 adapter->state = VNIC_OPEN; in __ibmvnic_open()
1265 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_open() local
1274 * It should be safe to overwrite the adapter->state here. Since in ibmvnic_open()
1281 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { in ibmvnic_open()
1283 adapter_state_to_string(adapter->state), in ibmvnic_open()
1284 adapter->failover_pending); in ibmvnic_open()
1285 adapter->state = VNIC_OPEN; in ibmvnic_open()
1290 if (adapter->state != VNIC_CLOSED) { in ibmvnic_open()
1295 rc = init_resources(adapter); in ibmvnic_open()
1298 release_resources(adapter); in ibmvnic_open()
1311 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { in ibmvnic_open()
1312 adapter->state = VNIC_OPEN; in ibmvnic_open()
1318 static void clean_rx_pools(struct ibmvnic_adapter *adapter) in clean_rx_pools() argument
1326 if (!adapter->rx_pool) in clean_rx_pools()
1329 rx_scrqs = adapter->num_active_rx_pools; in clean_rx_pools()
1330 rx_entries = adapter->req_rx_add_entries_per_subcrq; in clean_rx_pools()
1334 rx_pool = &adapter->rx_pool[i]; in clean_rx_pools()
1338 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); in clean_rx_pools()
1349 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter, in clean_one_tx_pool() argument
1370 static void clean_tx_pools(struct ibmvnic_adapter *adapter) in clean_tx_pools() argument
1375 if (!adapter->tx_pool || !adapter->tso_pool) in clean_tx_pools()
1378 tx_scrqs = adapter->num_active_tx_pools; in clean_tx_pools()
1382 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); in clean_tx_pools()
1383 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); in clean_tx_pools()
1384 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); in clean_tx_pools()
1388 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter) in ibmvnic_disable_irqs() argument
1390 struct net_device *netdev = adapter->netdev; in ibmvnic_disable_irqs()
1393 if (adapter->tx_scrq) { in ibmvnic_disable_irqs()
1394 for (i = 0; i < adapter->req_tx_queues; i++) in ibmvnic_disable_irqs()
1395 if (adapter->tx_scrq[i]->irq) { in ibmvnic_disable_irqs()
1398 disable_scrq_irq(adapter, adapter->tx_scrq[i]); in ibmvnic_disable_irqs()
1399 disable_irq(adapter->tx_scrq[i]->irq); in ibmvnic_disable_irqs()
1403 if (adapter->rx_scrq) { in ibmvnic_disable_irqs()
1404 for (i = 0; i < adapter->req_rx_queues; i++) { in ibmvnic_disable_irqs()
1405 if (adapter->rx_scrq[i]->irq) { in ibmvnic_disable_irqs()
1408 disable_scrq_irq(adapter, adapter->rx_scrq[i]); in ibmvnic_disable_irqs()
1409 disable_irq(adapter->rx_scrq[i]->irq); in ibmvnic_disable_irqs()
1417 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_cleanup() local
1420 if (test_bit(0, &adapter->resetting)) in ibmvnic_cleanup()
1425 ibmvnic_napi_disable(adapter); in ibmvnic_cleanup()
1426 ibmvnic_disable_irqs(adapter); in ibmvnic_cleanup()
1428 clean_rx_pools(adapter); in ibmvnic_cleanup()
1429 clean_tx_pools(adapter); in ibmvnic_cleanup()
1434 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in __ibmvnic_close() local
1437 adapter->state = VNIC_CLOSING; in __ibmvnic_close()
1438 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); in __ibmvnic_close()
1439 adapter->state = VNIC_CLOSED; in __ibmvnic_close()
1445 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_close() local
1449 adapter_state_to_string(adapter->state), in ibmvnic_close()
1450 adapter->failover_pending, in ibmvnic_close()
1451 adapter->force_reset_recovery); in ibmvnic_close()
1456 if (adapter->failover_pending) { in ibmvnic_close()
1457 adapter->state = VNIC_CLOSED; in ibmvnic_close()
1619 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, in ibmvnic_tx_scrq_clean_buffer() argument
1641 tx_pool = &adapter->tso_pool[queue_num]; in ibmvnic_tx_scrq_clean_buffer()
1644 tx_pool = &adapter->tx_pool[queue_num]; in ibmvnic_tx_scrq_clean_buffer()
1651 adapter->netdev->stats.tx_packets--; in ibmvnic_tx_scrq_clean_buffer()
1652 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; in ibmvnic_tx_scrq_clean_buffer()
1653 adapter->tx_stats_buffers[queue_num].packets--; in ibmvnic_tx_scrq_clean_buffer()
1654 adapter->tx_stats_buffers[queue_num].bytes -= in ibmvnic_tx_scrq_clean_buffer()
1658 adapter->netdev->stats.tx_dropped++; in ibmvnic_tx_scrq_clean_buffer()
1662 (adapter->req_tx_entries_per_subcrq / 2) && in ibmvnic_tx_scrq_clean_buffer()
1663 __netif_subqueue_stopped(adapter->netdev, queue_num) && in ibmvnic_tx_scrq_clean_buffer()
1664 !test_bit(0, &adapter->resetting)) { in ibmvnic_tx_scrq_clean_buffer()
1665 netif_wake_subqueue(adapter->netdev, queue_num); in ibmvnic_tx_scrq_clean_buffer()
1666 netdev_dbg(adapter->netdev, "Started queue %d\n", in ibmvnic_tx_scrq_clean_buffer()
1671 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, in ibmvnic_tx_scrq_flush() argument
1687 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); in ibmvnic_tx_scrq_flush()
1689 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); in ibmvnic_tx_scrq_flush()
1697 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_xmit() local
1699 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; in ibmvnic_xmit()
1700 struct device *dev = &adapter->vdev->dev; in ibmvnic_xmit()
1722 tx_scrq = adapter->tx_scrq[queue_num]; in ibmvnic_xmit()
1726 if (test_bit(0, &adapter->resetting)) { in ibmvnic_xmit()
1741 ibmvnic_tx_scrq_flush(adapter, tx_scrq); in ibmvnic_xmit()
1745 tx_pool = &adapter->tso_pool[queue_num]; in ibmvnic_xmit()
1747 tx_pool = &adapter->tx_pool[queue_num]; in ibmvnic_xmit()
1755 ibmvnic_tx_scrq_flush(adapter, tx_scrq); in ibmvnic_xmit()
1813 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { in ibmvnic_xmit()
1848 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); in ibmvnic_xmit()
1860 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); in ibmvnic_xmit()
1866 >= adapter->req_tx_entries_per_subcrq) { in ibmvnic_xmit()
1888 if (lpar_rc == H_CLOSED || adapter->failover_pending) { in ibmvnic_xmit()
1901 adapter->tx_send_failed += tx_send_failed; in ibmvnic_xmit()
1902 adapter->tx_map_failed += tx_map_failed; in ibmvnic_xmit()
1903 adapter->tx_stats_buffers[queue_num].packets += tx_packets; in ibmvnic_xmit()
1904 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; in ibmvnic_xmit()
1905 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; in ibmvnic_xmit()
1912 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_set_multi() local
1921 if (!adapter->promisc_supported) in ibmvnic_set_multi()
1930 ibmvnic_send_crq(adapter, &crq); in ibmvnic_set_multi()
1937 ibmvnic_send_crq(adapter, &crq); in ibmvnic_set_multi()
1947 ibmvnic_send_crq(adapter, &crq); in ibmvnic_set_multi()
1955 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in __ibmvnic_set_mac() local
1969 mutex_lock(&adapter->fw_lock); in __ibmvnic_set_mac()
1970 adapter->fw_done_rc = 0; in __ibmvnic_set_mac()
1971 reinit_completion(&adapter->fw_done); in __ibmvnic_set_mac()
1973 rc = ibmvnic_send_crq(adapter, &crq); in __ibmvnic_set_mac()
1976 mutex_unlock(&adapter->fw_lock); in __ibmvnic_set_mac()
1980 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in __ibmvnic_set_mac()
1982 if (rc || adapter->fw_done_rc) { in __ibmvnic_set_mac()
1984 mutex_unlock(&adapter->fw_lock); in __ibmvnic_set_mac()
1987 mutex_unlock(&adapter->fw_lock); in __ibmvnic_set_mac()
1990 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); in __ibmvnic_set_mac()
1996 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_set_mac() local
2004 ether_addr_copy(adapter->mac_addr, addr->sa_data); in ibmvnic_set_mac()
2005 if (adapter->state != VNIC_PROBED) in ibmvnic_set_mac()
2036 static int do_reset(struct ibmvnic_adapter *adapter, in do_reset() argument
2041 struct net_device *netdev = adapter->netdev; in do_reset()
2044 netdev_dbg(adapter->netdev, in do_reset()
2046 adapter_state_to_string(adapter->state), in do_reset()
2047 adapter->failover_pending, in do_reset()
2051 adapter->reset_reason = rwi->reset_reason; in do_reset()
2053 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) in do_reset()
2061 adapter->failover_pending = false; in do_reset()
2064 reset_state = adapter->state; in do_reset()
2073 old_num_rx_queues = adapter->req_rx_queues; in do_reset()
2074 old_num_tx_queues = adapter->req_tx_queues; in do_reset()
2075 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; in do_reset()
2076 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; in do_reset()
2081 adapter->reset_reason != VNIC_RESET_MOBILITY && in do_reset()
2082 adapter->reset_reason != VNIC_RESET_FAILOVER) { in do_reset()
2083 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2088 adapter->state = VNIC_CLOSING; in do_reset()
2096 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); in do_reset()
2101 if (adapter->state == VNIC_OPEN) { in do_reset()
2104 * set the adapter state to OPEN. Update our in do_reset()
2112 adapter->state = VNIC_CLOSING; in do_reset()
2115 if (adapter->state != VNIC_CLOSING) { in do_reset()
2116 /* If someone else changed the adapter state in do_reset()
2122 adapter->state = VNIC_CLOSED; in do_reset()
2126 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2127 release_resources(adapter); in do_reset()
2128 release_sub_crqs(adapter, 1); in do_reset()
2129 release_crq_queue(adapter); in do_reset()
2132 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { in do_reset()
2136 adapter->state = VNIC_PROBED; in do_reset()
2138 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2139 rc = init_crq_queue(adapter); in do_reset()
2140 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { in do_reset()
2141 rc = ibmvnic_reenable_crq_queue(adapter); in do_reset()
2142 release_sub_crqs(adapter, 1); in do_reset()
2144 rc = ibmvnic_reset_crq(adapter); in do_reset()
2146 rc = vio_enable_interrupts(adapter->vdev); in do_reset()
2148 netdev_err(adapter->netdev, in do_reset()
2155 netdev_err(adapter->netdev, in do_reset()
2160 rc = ibmvnic_reset_init(adapter, true); in do_reset()
2166 /* If the adapter was in PROBE or DOWN state prior to the reset, in do_reset()
2178 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2179 rc = init_resources(adapter); in do_reset()
2182 } else if (adapter->req_rx_queues != old_num_rx_queues || in do_reset()
2183 adapter->req_tx_queues != old_num_tx_queues || in do_reset()
2184 adapter->req_rx_add_entries_per_subcrq != in do_reset()
2186 adapter->req_tx_entries_per_subcrq != in do_reset()
2188 !adapter->rx_pool || in do_reset()
2189 !adapter->tso_pool || in do_reset()
2190 !adapter->tx_pool) { in do_reset()
2191 release_rx_pools(adapter); in do_reset()
2192 release_tx_pools(adapter); in do_reset()
2193 release_napi(adapter); in do_reset()
2194 release_vpd_data(adapter); in do_reset()
2196 rc = init_resources(adapter); in do_reset()
2201 rc = reset_tx_pools(adapter); in do_reset()
2203 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", in do_reset()
2208 rc = reset_rx_pools(adapter); in do_reset()
2210 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", in do_reset()
2215 ibmvnic_disable_irqs(adapter); in do_reset()
2217 adapter->state = VNIC_CLOSED; in do_reset()
2233 if (adapter->reset_reason == VNIC_RESET_FAILOVER || in do_reset()
2234 adapter->reset_reason == VNIC_RESET_MOBILITY) in do_reset()
2240 /* restore the adapter state if reset failed */ in do_reset()
2242 adapter->state = reset_state; in do_reset()
2244 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) in do_reset()
2247 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", in do_reset()
2248 adapter_state_to_string(adapter->state), in do_reset()
2249 adapter->failover_pending, rc); in do_reset()
2253 static int do_hard_reset(struct ibmvnic_adapter *adapter, in do_hard_reset() argument
2256 struct net_device *netdev = adapter->netdev; in do_hard_reset()
2259 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", in do_hard_reset()
2263 reset_state = adapter->state; in do_hard_reset()
2271 adapter->reset_reason = rwi->reset_reason; in do_hard_reset()
2274 release_resources(adapter); in do_hard_reset()
2275 release_sub_crqs(adapter, 0); in do_hard_reset()
2276 release_crq_queue(adapter); in do_hard_reset()
2281 adapter->state = VNIC_PROBED; in do_hard_reset()
2283 reinit_completion(&adapter->init_done); in do_hard_reset()
2284 rc = init_crq_queue(adapter); in do_hard_reset()
2286 netdev_err(adapter->netdev, in do_hard_reset()
2291 rc = ibmvnic_reset_init(adapter, false); in do_hard_reset()
2295 /* If the adapter was in PROBE or DOWN state prior to the reset, in do_hard_reset()
2305 rc = init_resources(adapter); in do_hard_reset()
2309 ibmvnic_disable_irqs(adapter); in do_hard_reset()
2310 adapter->state = VNIC_CLOSED; in do_hard_reset()
2323 /* restore adapter state if reset failed */ in do_hard_reset()
2325 adapter->state = reset_state; in do_hard_reset()
2326 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", in do_hard_reset()
2327 adapter_state_to_string(adapter->state), in do_hard_reset()
2328 adapter->failover_pending, rc); in do_hard_reset()
2332 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) in get_next_rwi() argument
2337 spin_lock_irqsave(&adapter->rwi_lock, flags); in get_next_rwi()
2339 if (!list_empty(&adapter->rwi_list)) { in get_next_rwi()
2340 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, in get_next_rwi()
2347 spin_unlock_irqrestore(&adapter->rwi_lock, flags); in get_next_rwi()
2353 * @adapter: ibmvnic_adapter struct
2365 static int do_passive_init(struct ibmvnic_adapter *adapter) in do_passive_init() argument
2368 struct net_device *netdev = adapter->netdev; in do_passive_init()
2369 struct device *dev = &adapter->vdev->dev; in do_passive_init()
2374 adapter->state = VNIC_PROBING; in do_passive_init()
2375 reinit_completion(&adapter->init_done); in do_passive_init()
2376 adapter->init_done_rc = 0; in do_passive_init()
2377 adapter->crq.active = true; in do_passive_init()
2379 rc = send_crq_init_complete(adapter); in do_passive_init()
2383 rc = send_version_xchg(adapter); in do_passive_init()
2385 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); in do_passive_init()
2387 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { in do_passive_init()
2393 rc = init_sub_crqs(adapter); in do_passive_init()
2399 rc = init_sub_crq_irqs(adapter); in do_passive_init()
2405 netdev->mtu = adapter->req_mtu - ETH_HLEN; in do_passive_init()
2406 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; in do_passive_init()
2407 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; in do_passive_init()
2409 adapter->state = VNIC_PROBED; in do_passive_init()
2415 release_sub_crqs(adapter, 1); in do_passive_init()
2417 adapter->state = VNIC_DOWN; in do_passive_init()
2423 struct ibmvnic_adapter *adapter; in __ibmvnic_reset() local
2431 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); in __ibmvnic_reset()
2433 if (test_and_set_bit_lock(0, &adapter->resetting)) { in __ibmvnic_reset()
2435 &adapter->ibmvnic_delayed_reset, in __ibmvnic_reset()
2440 rwi = get_next_rwi(adapter); in __ibmvnic_reset()
2442 spin_lock_irqsave(&adapter->state_lock, flags); in __ibmvnic_reset()
2444 if (adapter->state == VNIC_REMOVING || in __ibmvnic_reset()
2445 adapter->state == VNIC_REMOVED) { in __ibmvnic_reset()
2446 spin_unlock_irqrestore(&adapter->state_lock, flags); in __ibmvnic_reset()
2453 reset_state = adapter->state; in __ibmvnic_reset()
2456 spin_unlock_irqrestore(&adapter->state_lock, flags); in __ibmvnic_reset()
2460 rc = do_passive_init(adapter); in __ibmvnic_reset()
2463 netif_carrier_on(adapter->netdev); in __ibmvnic_reset()
2464 } else if (adapter->force_reset_recovery) { in __ibmvnic_reset()
2469 adapter->failover_pending = false; in __ibmvnic_reset()
2472 if (adapter->wait_for_reset) { in __ibmvnic_reset()
2474 adapter->force_reset_recovery = false; in __ibmvnic_reset()
2475 rc = do_hard_reset(adapter, rwi, reset_state); in __ibmvnic_reset()
2478 adapter->force_reset_recovery = false; in __ibmvnic_reset()
2479 rc = do_hard_reset(adapter, rwi, reset_state); in __ibmvnic_reset()
2484 netdev_dbg(adapter->netdev, in __ibmvnic_reset()
2486 adapter_state_to_string(adapter->state)); in __ibmvnic_reset()
2491 rc = do_reset(adapter, rwi, reset_state); in __ibmvnic_reset()
2494 adapter->last_reset_time = jiffies; in __ibmvnic_reset()
2497 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); in __ibmvnic_reset()
2499 rwi = get_next_rwi(adapter); in __ibmvnic_reset()
2508 * the adapter would be in an undefined state. So retry the in __ibmvnic_reset()
2518 adapter->force_reset_recovery = true; in __ibmvnic_reset()
2521 if (adapter->wait_for_reset) { in __ibmvnic_reset()
2522 adapter->reset_done_rc = rc; in __ibmvnic_reset()
2523 complete(&adapter->reset_done); in __ibmvnic_reset()
2526 clear_bit_unlock(0, &adapter->resetting); in __ibmvnic_reset()
2528 netdev_dbg(adapter->netdev, in __ibmvnic_reset()
2530 adapter_state_to_string(adapter->state), in __ibmvnic_reset()
2531 adapter->force_reset_recovery, in __ibmvnic_reset()
2532 adapter->wait_for_reset); in __ibmvnic_reset()
2537 struct ibmvnic_adapter *adapter; in __ibmvnic_delayed_reset() local
2539 adapter = container_of(work, struct ibmvnic_adapter, in __ibmvnic_delayed_reset()
2541 __ibmvnic_reset(&adapter->ibmvnic_reset); in __ibmvnic_delayed_reset()
2544 static int ibmvnic_reset(struct ibmvnic_adapter *adapter, in ibmvnic_reset() argument
2549 struct net_device *netdev = adapter->netdev; in ibmvnic_reset()
2553 spin_lock_irqsave(&adapter->rwi_lock, flags); in ibmvnic_reset()
2560 if (adapter->state == VNIC_REMOVING || in ibmvnic_reset()
2561 adapter->state == VNIC_REMOVED || in ibmvnic_reset()
2562 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { in ibmvnic_reset()
2564 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); in ibmvnic_reset()
2568 if (adapter->state == VNIC_PROBING) { in ibmvnic_reset()
2569 netdev_warn(netdev, "Adapter reset during probe\n"); in ibmvnic_reset()
2570 adapter->init_done_rc = EAGAIN; in ibmvnic_reset()
2575 list_for_each_entry(tmp, &adapter->rwi_list, list) { in ibmvnic_reset()
2592 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { in ibmvnic_reset()
2593 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) in ibmvnic_reset()
2597 list_add_tail(&rwi->list, &adapter->rwi_list); in ibmvnic_reset()
2598 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", in ibmvnic_reset()
2600 queue_work(system_long_wq, &adapter->ibmvnic_reset); in ibmvnic_reset()
2605 spin_unlock_irqrestore(&adapter->rwi_lock, flags); in ibmvnic_reset()
2615 struct ibmvnic_adapter *adapter = netdev_priv(dev); in ibmvnic_tx_timeout() local
2617 if (test_bit(0, &adapter->resetting)) { in ibmvnic_tx_timeout()
2618 netdev_err(adapter->netdev, in ibmvnic_tx_timeout()
2619 "Adapter is resetting, skip timeout reset\n"); in ibmvnic_tx_timeout()
2625 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { in ibmvnic_tx_timeout()
2629 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); in ibmvnic_tx_timeout()
2632 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, in remove_buff_from_pool() argument
2635 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; in remove_buff_from_pool()
2648 struct ibmvnic_adapter *adapter; in ibmvnic_poll() local
2654 adapter = netdev_priv(netdev); in ibmvnic_poll()
2655 scrq_num = (int)(napi - adapter->napi); in ibmvnic_poll()
2657 rx_scrq = adapter->rx_scrq[scrq_num]; in ibmvnic_poll()
2668 if (unlikely(test_bit(0, &adapter->resetting) && in ibmvnic_poll()
2669 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { in ibmvnic_poll()
2670 enable_scrq_irq(adapter, rx_scrq); in ibmvnic_poll()
2675 if (!pending_scrq(adapter, rx_scrq)) in ibmvnic_poll()
2677 next = ibmvnic_next_scrq(adapter, rx_scrq); in ibmvnic_poll()
2687 remove_buff_from_pool(adapter, rx_buff); in ibmvnic_poll()
2692 remove_buff_from_pool(adapter, rx_buff); in ibmvnic_poll()
2708 if (adapter->rx_vlan_header_insertion && in ibmvnic_poll()
2715 remove_buff_from_pool(adapter, rx_buff); in ibmvnic_poll()
2730 adapter->rx_stats_buffers[scrq_num].packets++; in ibmvnic_poll()
2731 adapter->rx_stats_buffers[scrq_num].bytes += length; in ibmvnic_poll()
2735 if (adapter->state != VNIC_CLOSING && in ibmvnic_poll()
2736 ((atomic_read(&adapter->rx_pool[scrq_num].available) < in ibmvnic_poll()
2737 adapter->req_rx_add_entries_per_subcrq / 2) || in ibmvnic_poll()
2739 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); in ibmvnic_poll()
2742 enable_scrq_irq(adapter, rx_scrq); in ibmvnic_poll()
2743 if (pending_scrq(adapter, rx_scrq)) { in ibmvnic_poll()
2745 disable_scrq_irq(adapter, rx_scrq); in ibmvnic_poll()
2754 static int wait_for_reset(struct ibmvnic_adapter *adapter) in wait_for_reset() argument
2758 adapter->fallback.mtu = adapter->req_mtu; in wait_for_reset()
2759 adapter->fallback.rx_queues = adapter->req_rx_queues; in wait_for_reset()
2760 adapter->fallback.tx_queues = adapter->req_tx_queues; in wait_for_reset()
2761 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; in wait_for_reset()
2762 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; in wait_for_reset()
2764 reinit_completion(&adapter->reset_done); in wait_for_reset()
2765 adapter->wait_for_reset = true; in wait_for_reset()
2766 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); in wait_for_reset()
2772 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); in wait_for_reset()
2779 if (adapter->reset_done_rc) { in wait_for_reset()
2781 adapter->desired.mtu = adapter->fallback.mtu; in wait_for_reset()
2782 adapter->desired.rx_queues = adapter->fallback.rx_queues; in wait_for_reset()
2783 adapter->desired.tx_queues = adapter->fallback.tx_queues; in wait_for_reset()
2784 adapter->desired.rx_entries = adapter->fallback.rx_entries; in wait_for_reset()
2785 adapter->desired.tx_entries = adapter->fallback.tx_entries; in wait_for_reset()
2787 reinit_completion(&adapter->reset_done); in wait_for_reset()
2788 adapter->wait_for_reset = true; in wait_for_reset()
2789 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); in wait_for_reset()
2794 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, in wait_for_reset()
2802 adapter->wait_for_reset = false; in wait_for_reset()
2809 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_change_mtu() local
2811 adapter->desired.mtu = new_mtu + ETH_HLEN; in ibmvnic_change_mtu()
2813 return wait_for_reset(adapter); in ibmvnic_change_mtu()
2850 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_link_ksettings() local
2853 rc = send_query_phys_parms(adapter); in ibmvnic_get_link_ksettings()
2855 adapter->speed = SPEED_UNKNOWN; in ibmvnic_get_link_ksettings()
2856 adapter->duplex = DUPLEX_UNKNOWN; in ibmvnic_get_link_ksettings()
2858 cmd->base.speed = adapter->speed; in ibmvnic_get_link_ksettings()
2859 cmd->base.duplex = adapter->duplex; in ibmvnic_get_link_ksettings()
2870 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_drvinfo() local
2874 strscpy(info->fw_version, adapter->fw_version, in ibmvnic_get_drvinfo()
2880 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_msglevel() local
2882 return adapter->msg_enable; in ibmvnic_get_msglevel()
2887 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_set_msglevel() local
2889 adapter->msg_enable = data; in ibmvnic_set_msglevel()
2894 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_link() local
2899 return adapter->logical_link_state; in ibmvnic_get_link()
2905 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_ringparam() local
2907 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { in ibmvnic_get_ringparam()
2908 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; in ibmvnic_get_ringparam()
2909 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; in ibmvnic_get_ringparam()
2916 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; in ibmvnic_get_ringparam()
2917 ring->tx_pending = adapter->req_tx_entries_per_subcrq; in ibmvnic_get_ringparam()
2925 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_set_ringparam() local
2929 adapter->desired.rx_entries = ring->rx_pending; in ibmvnic_set_ringparam()
2930 adapter->desired.tx_entries = ring->tx_pending; in ibmvnic_set_ringparam()
2932 ret = wait_for_reset(adapter); in ibmvnic_set_ringparam()
2935 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending || in ibmvnic_set_ringparam()
2936 adapter->req_tx_entries_per_subcrq != ring->tx_pending)) in ibmvnic_set_ringparam()
2940 adapter->req_rx_add_entries_per_subcrq, in ibmvnic_set_ringparam()
2941 adapter->req_tx_entries_per_subcrq); in ibmvnic_set_ringparam()
2948 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_channels() local
2950 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) { in ibmvnic_get_channels()
2951 channels->max_rx = adapter->max_rx_queues; in ibmvnic_get_channels()
2952 channels->max_tx = adapter->max_tx_queues; in ibmvnic_get_channels()
2960 channels->rx_count = adapter->req_rx_queues; in ibmvnic_get_channels()
2961 channels->tx_count = adapter->req_tx_queues; in ibmvnic_get_channels()
2969 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_set_channels() local
2973 adapter->desired.rx_queues = channels->rx_count; in ibmvnic_set_channels()
2974 adapter->desired.tx_queues = channels->tx_count; in ibmvnic_set_channels()
2976 ret = wait_for_reset(adapter); in ibmvnic_set_channels()
2979 (adapter->req_rx_queues != channels->rx_count || in ibmvnic_set_channels()
2980 adapter->req_tx_queues != channels->tx_count)) in ibmvnic_set_channels()
2984 adapter->req_rx_queues, adapter->req_tx_queues); in ibmvnic_set_channels()
2990 struct ibmvnic_adapter *adapter = netdev_priv(dev); in ibmvnic_get_strings() local
2999 for (i = 0; i < adapter->req_tx_queues; i++) { in ibmvnic_get_strings()
3011 for (i = 0; i < adapter->req_rx_queues; i++) { in ibmvnic_get_strings()
3035 struct ibmvnic_adapter *adapter = netdev_priv(dev); in ibmvnic_get_sset_count() local
3040 adapter->req_tx_queues * NUM_TX_STATS + in ibmvnic_get_sset_count()
3041 adapter->req_rx_queues * NUM_RX_STATS; in ibmvnic_get_sset_count()
3052 struct ibmvnic_adapter *adapter = netdev_priv(dev); in ibmvnic_get_ethtool_stats() local
3060 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); in ibmvnic_get_ethtool_stats()
3065 reinit_completion(&adapter->stats_done); in ibmvnic_get_ethtool_stats()
3066 rc = ibmvnic_send_crq(adapter, &crq); in ibmvnic_get_ethtool_stats()
3069 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); in ibmvnic_get_ethtool_stats()
3075 (adapter, ibmvnic_stats[i].offset)); in ibmvnic_get_ethtool_stats()
3077 for (j = 0; j < adapter->req_tx_queues; j++) { in ibmvnic_get_ethtool_stats()
3078 data[i] = adapter->tx_stats_buffers[j].packets; in ibmvnic_get_ethtool_stats()
3080 data[i] = adapter->tx_stats_buffers[j].bytes; in ibmvnic_get_ethtool_stats()
3082 data[i] = adapter->tx_stats_buffers[j].dropped_packets; in ibmvnic_get_ethtool_stats()
3086 for (j = 0; j < adapter->req_rx_queues; j++) { in ibmvnic_get_ethtool_stats()
3087 data[i] = adapter->rx_stats_buffers[j].packets; in ibmvnic_get_ethtool_stats()
3089 data[i] = adapter->rx_stats_buffers[j].bytes; in ibmvnic_get_ethtool_stats()
3091 data[i] = adapter->rx_stats_buffers[j].interrupts; in ibmvnic_get_ethtool_stats()
3098 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_get_priv_flags() local
3100 return adapter->priv_flags; in ibmvnic_get_priv_flags()
3105 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_set_priv_flags() local
3109 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES; in ibmvnic_set_priv_flags()
3111 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES; in ibmvnic_set_priv_flags()
3135 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, in reset_one_sub_crq_queue() argument
3141 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); in reset_one_sub_crq_queue()
3157 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); in reset_one_sub_crq_queue()
3161 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in reset_one_sub_crq_queue()
3166 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) in reset_sub_crq_queues() argument
3170 if (!adapter->tx_scrq || !adapter->rx_scrq) in reset_sub_crq_queues()
3173 for (i = 0; i < adapter->req_tx_queues; i++) { in reset_sub_crq_queues()
3174 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); in reset_sub_crq_queues()
3175 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); in reset_sub_crq_queues()
3180 for (i = 0; i < adapter->req_rx_queues; i++) { in reset_sub_crq_queues()
3181 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); in reset_sub_crq_queues()
3182 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); in reset_sub_crq_queues()
3190 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, in release_sub_crq_queue() argument
3194 struct device *dev = &adapter->vdev->dev; in release_sub_crq_queue()
3197 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); in release_sub_crq_queue()
3203 adapter->vdev->unit_address, in release_sub_crq_queue()
3208 netdev_err(adapter->netdev, in release_sub_crq_queue()
3226 *adapter) in init_sub_crq_queue()
3228 struct device *dev = &adapter->vdev->dev; in init_sub_crq_queue()
3250 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in init_sub_crq_queue()
3254 rc = ibmvnic_reset_crq(adapter); in init_sub_crq_queue()
3257 dev_warn(dev, "Partner adapter not ready, waiting.\n"); in init_sub_crq_queue()
3263 scrq->adapter = adapter; in init_sub_crq_queue()
3278 netdev_dbg(adapter->netdev, in init_sub_crq_queue()
3287 adapter->vdev->unit_address, in init_sub_crq_queue()
3301 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free) in release_sub_crqs() argument
3305 if (adapter->tx_scrq) { in release_sub_crqs()
3306 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { in release_sub_crqs()
3307 if (!adapter->tx_scrq[i]) in release_sub_crqs()
3310 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", in release_sub_crqs()
3312 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); in release_sub_crqs()
3313 if (adapter->tx_scrq[i]->irq) { in release_sub_crqs()
3314 free_irq(adapter->tx_scrq[i]->irq, in release_sub_crqs()
3315 adapter->tx_scrq[i]); in release_sub_crqs()
3316 irq_dispose_mapping(adapter->tx_scrq[i]->irq); in release_sub_crqs()
3317 adapter->tx_scrq[i]->irq = 0; in release_sub_crqs()
3320 release_sub_crq_queue(adapter, adapter->tx_scrq[i], in release_sub_crqs()
3324 kfree(adapter->tx_scrq); in release_sub_crqs()
3325 adapter->tx_scrq = NULL; in release_sub_crqs()
3326 adapter->num_active_tx_scrqs = 0; in release_sub_crqs()
3329 if (adapter->rx_scrq) { in release_sub_crqs()
3330 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { in release_sub_crqs()
3331 if (!adapter->rx_scrq[i]) in release_sub_crqs()
3334 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", in release_sub_crqs()
3336 if (adapter->rx_scrq[i]->irq) { in release_sub_crqs()
3337 free_irq(adapter->rx_scrq[i]->irq, in release_sub_crqs()
3338 adapter->rx_scrq[i]); in release_sub_crqs()
3339 irq_dispose_mapping(adapter->rx_scrq[i]->irq); in release_sub_crqs()
3340 adapter->rx_scrq[i]->irq = 0; in release_sub_crqs()
3343 release_sub_crq_queue(adapter, adapter->rx_scrq[i], in release_sub_crqs()
3347 kfree(adapter->rx_scrq); in release_sub_crqs()
3348 adapter->rx_scrq = NULL; in release_sub_crqs()
3349 adapter->num_active_rx_scrqs = 0; in release_sub_crqs()
3353 static int disable_scrq_irq(struct ibmvnic_adapter *adapter, in disable_scrq_irq() argument
3356 struct device *dev = &adapter->vdev->dev; in disable_scrq_irq()
3359 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, in disable_scrq_irq()
3367 static int enable_scrq_irq(struct ibmvnic_adapter *adapter, in enable_scrq_irq() argument
3370 struct device *dev = &adapter->vdev->dev; in enable_scrq_irq()
3378 if (test_bit(0, &adapter->resetting) && in enable_scrq_irq()
3379 adapter->reset_reason == VNIC_RESET_MOBILITY) { in enable_scrq_irq()
3391 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, in enable_scrq_irq()
3399 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, in ibmvnic_complete_tx() argument
3402 struct device *dev = &adapter->vdev->dev; in ibmvnic_complete_tx()
3411 while (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
3417 next = ibmvnic_next_scrq(adapter, scrq); in ibmvnic_complete_tx()
3421 tx_pool = &adapter->tso_pool[pool]; in ibmvnic_complete_tx()
3424 tx_pool = &adapter->tx_pool[pool]; in ibmvnic_complete_tx()
3441 netdev_warn(adapter->netdev, in ibmvnic_complete_tx()
3452 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
3456 (adapter->req_tx_entries_per_subcrq / 2) && in ibmvnic_complete_tx()
3457 __netif_subqueue_stopped(adapter->netdev, in ibmvnic_complete_tx()
3459 netif_wake_subqueue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
3460 netdev_dbg(adapter->netdev, "Started queue %d\n", in ibmvnic_complete_tx()
3465 enable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
3467 if (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
3468 disable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
3478 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_tx() local
3480 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_tx()
3481 ibmvnic_complete_tx(adapter, scrq); in ibmvnic_interrupt_tx()
3489 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_rx() local
3494 if (unlikely(adapter->state != VNIC_OPEN)) in ibmvnic_interrupt_rx()
3497 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; in ibmvnic_interrupt_rx()
3499 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { in ibmvnic_interrupt_rx()
3500 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_rx()
3501 __napi_schedule(&adapter->napi[scrq->scrq_num]); in ibmvnic_interrupt_rx()
3507 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) in init_sub_crq_irqs() argument
3509 struct device *dev = &adapter->vdev->dev; in init_sub_crq_irqs()
3514 for (i = 0; i < adapter->req_tx_queues; i++) { in init_sub_crq_irqs()
3515 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", in init_sub_crq_irqs()
3517 scrq = adapter->tx_scrq[i]; in init_sub_crq_irqs()
3527 adapter->vdev->unit_address, i); in init_sub_crq_irqs()
3539 for (i = 0; i < adapter->req_rx_queues; i++) { in init_sub_crq_irqs()
3540 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", in init_sub_crq_irqs()
3542 scrq = adapter->rx_scrq[i]; in init_sub_crq_irqs()
3550 adapter->vdev->unit_address, i); in init_sub_crq_irqs()
3564 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); in init_sub_crq_irqs()
3565 irq_dispose_mapping(adapter->rx_scrq[j]->irq); in init_sub_crq_irqs()
3567 i = adapter->req_tx_queues; in init_sub_crq_irqs()
3570 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); in init_sub_crq_irqs()
3571 irq_dispose_mapping(adapter->tx_scrq[j]->irq); in init_sub_crq_irqs()
3573 release_sub_crqs(adapter, 1); in init_sub_crq_irqs()
3577 static int init_sub_crqs(struct ibmvnic_adapter *adapter) in init_sub_crqs() argument
3579 struct device *dev = &adapter->vdev->dev; in init_sub_crqs()
3586 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; in init_sub_crqs()
3593 allqueues[i] = init_sub_crq_queue(adapter); in init_sub_crqs()
3603 adapter->min_tx_queues + adapter->min_rx_queues) { in init_sub_crqs()
3610 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); in init_sub_crqs()
3613 if (adapter->req_rx_queues > adapter->min_rx_queues) in init_sub_crqs()
3614 adapter->req_rx_queues--; in init_sub_crqs()
3619 if (adapter->req_tx_queues > adapter->min_tx_queues) in init_sub_crqs()
3620 adapter->req_tx_queues--; in init_sub_crqs()
3627 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, in init_sub_crqs()
3628 sizeof(*adapter->tx_scrq), GFP_KERNEL); in init_sub_crqs()
3629 if (!adapter->tx_scrq) in init_sub_crqs()
3632 for (i = 0; i < adapter->req_tx_queues; i++) { in init_sub_crqs()
3633 adapter->tx_scrq[i] = allqueues[i]; in init_sub_crqs()
3634 adapter->tx_scrq[i]->pool_index = i; in init_sub_crqs()
3635 adapter->num_active_tx_scrqs++; in init_sub_crqs()
3638 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, in init_sub_crqs()
3639 sizeof(*adapter->rx_scrq), GFP_KERNEL); in init_sub_crqs()
3640 if (!adapter->rx_scrq) in init_sub_crqs()
3643 for (i = 0; i < adapter->req_rx_queues; i++) { in init_sub_crqs()
3644 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; in init_sub_crqs()
3645 adapter->rx_scrq[i]->scrq_num = i; in init_sub_crqs()
3646 adapter->num_active_rx_scrqs++; in init_sub_crqs()
3653 kfree(adapter->tx_scrq); in init_sub_crqs()
3654 adapter->tx_scrq = NULL; in init_sub_crqs()
3657 release_sub_crq_queue(adapter, allqueues[i], 1); in init_sub_crqs()
3662 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) in send_request_cap() argument
3664 struct device *dev = &adapter->vdev->dev; in send_request_cap()
3672 if (adapter->min_tx_entries_per_subcrq > entries_page || in send_request_cap()
3673 adapter->min_rx_add_entries_per_subcrq > entries_page) { in send_request_cap()
3678 if (adapter->desired.mtu) in send_request_cap()
3679 adapter->req_mtu = adapter->desired.mtu; in send_request_cap()
3681 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; in send_request_cap()
3683 if (!adapter->desired.tx_entries) in send_request_cap()
3684 adapter->desired.tx_entries = in send_request_cap()
3685 adapter->max_tx_entries_per_subcrq; in send_request_cap()
3686 if (!adapter->desired.rx_entries) in send_request_cap()
3687 adapter->desired.rx_entries = in send_request_cap()
3688 adapter->max_rx_add_entries_per_subcrq; in send_request_cap()
3691 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); in send_request_cap()
3693 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * in send_request_cap()
3694 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { in send_request_cap()
3695 adapter->desired.tx_entries = max_entries; in send_request_cap()
3698 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * in send_request_cap()
3699 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { in send_request_cap()
3700 adapter->desired.rx_entries = max_entries; in send_request_cap()
3703 if (adapter->desired.tx_entries) in send_request_cap()
3704 adapter->req_tx_entries_per_subcrq = in send_request_cap()
3705 adapter->desired.tx_entries; in send_request_cap()
3707 adapter->req_tx_entries_per_subcrq = in send_request_cap()
3708 adapter->max_tx_entries_per_subcrq; in send_request_cap()
3710 if (adapter->desired.rx_entries) in send_request_cap()
3711 adapter->req_rx_add_entries_per_subcrq = in send_request_cap()
3712 adapter->desired.rx_entries; in send_request_cap()
3714 adapter->req_rx_add_entries_per_subcrq = in send_request_cap()
3715 adapter->max_rx_add_entries_per_subcrq; in send_request_cap()
3717 if (adapter->desired.tx_queues) in send_request_cap()
3718 adapter->req_tx_queues = in send_request_cap()
3719 adapter->desired.tx_queues; in send_request_cap()
3721 adapter->req_tx_queues = in send_request_cap()
3722 adapter->opt_tx_comp_sub_queues; in send_request_cap()
3724 if (adapter->desired.rx_queues) in send_request_cap()
3725 adapter->req_rx_queues = in send_request_cap()
3726 adapter->desired.rx_queues; in send_request_cap()
3728 adapter->req_rx_queues = in send_request_cap()
3729 adapter->opt_rx_comp_queues; in send_request_cap()
3731 adapter->req_rx_add_queues = adapter->max_rx_add_queues; in send_request_cap()
3739 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); in send_request_cap()
3740 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3741 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3744 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); in send_request_cap()
3745 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3746 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3749 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); in send_request_cap()
3750 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3751 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3756 cpu_to_be64(adapter->req_tx_entries_per_subcrq); in send_request_cap()
3757 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3758 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3763 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); in send_request_cap()
3764 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3765 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3768 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); in send_request_cap()
3769 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3770 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3772 if (adapter->netdev->flags & IFF_PROMISC) { in send_request_cap()
3773 if (adapter->promisc_supported) { in send_request_cap()
3777 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3778 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3784 atomic_inc(&adapter->running_cap_crqs); in send_request_cap()
3785 ibmvnic_send_crq(adapter, &crq); in send_request_cap()
3789 static int pending_scrq(struct ibmvnic_adapter *adapter, in pending_scrq() argument
3805 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter, in ibmvnic_next_scrq() argument
3829 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter) in ibmvnic_next_crq() argument
3831 struct ibmvnic_crq_queue *queue = &adapter->crq; in ibmvnic_next_crq()
3850 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n", in print_subcrq_error()
3855 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n", in print_subcrq_error()
3864 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, in send_subcrq_indirect() argument
3867 unsigned int ua = adapter->vdev->unit_address; in send_subcrq_indirect()
3868 struct device *dev = &adapter->vdev->dev; in send_subcrq_indirect()
3883 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, in ibmvnic_send_crq() argument
3886 unsigned int ua = adapter->vdev->unit_address; in ibmvnic_send_crq()
3887 struct device *dev = &adapter->vdev->dev; in ibmvnic_send_crq()
3891 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", in ibmvnic_send_crq()
3895 if (!adapter->crq.active && in ibmvnic_send_crq()
3920 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) in ibmvnic_send_crq_init() argument
3922 struct device *dev = &adapter->vdev->dev; in ibmvnic_send_crq_init()
3930 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); in ibmvnic_send_crq_init()
3933 rc = ibmvnic_send_crq(adapter, &crq); in ibmvnic_send_crq_init()
3955 static int vnic_client_data_len(struct ibmvnic_adapter *adapter) in vnic_client_data_len() argument
3966 len += strlen(adapter->netdev->name) + 1; in vnic_client_data_len()
3971 static void vnic_add_client_data(struct ibmvnic_adapter *adapter, in vnic_add_client_data() argument
3993 len = strlen(adapter->netdev->name) + 1; in vnic_add_client_data()
3995 strscpy(vlcd->name, adapter->netdev->name, len); in vnic_add_client_data()
3998 static int send_login(struct ibmvnic_adapter *adapter) in send_login() argument
4002 struct device *dev = &adapter->vdev->dev; in send_login()
4015 if (!adapter->tx_scrq || !adapter->rx_scrq) { in send_login()
4016 netdev_err(adapter->netdev, in send_login()
4021 release_login_buffer(adapter); in send_login()
4022 release_login_rsp_buffer(adapter); in send_login()
4024 client_data_len = vnic_client_data_len(adapter); in send_login()
4028 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + in send_login()
4043 sizeof(u64) * adapter->req_tx_queues + in send_login()
4044 sizeof(u64) * adapter->req_rx_queues + in send_login()
4045 sizeof(u64) * adapter->req_rx_queues + in send_login()
4059 adapter->login_buf = login_buffer; in send_login()
4060 adapter->login_buf_token = buffer_token; in send_login()
4061 adapter->login_buf_sz = buffer_size; in send_login()
4062 adapter->login_rsp_buf = login_rsp_buffer; in send_login()
4063 adapter->login_rsp_buf_token = rsp_buffer_token; in send_login()
4064 adapter->login_rsp_buf_sz = rsp_buffer_size; in send_login()
4068 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); in send_login()
4071 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); in send_login()
4074 sizeof(u64) * adapter->req_tx_queues); in send_login()
4082 sizeof(u64) * adapter->req_tx_queues); in send_login()
4084 for (i = 0; i < adapter->req_tx_queues; i++) { in send_login()
4085 if (adapter->tx_scrq[i]) { in send_login()
4087 cpu_to_be64(adapter->tx_scrq[i]->crq_num); in send_login()
4091 for (i = 0; i < adapter->req_rx_queues; i++) { in send_login()
4092 if (adapter->rx_scrq[i]) { in send_login()
4094 cpu_to_be64(adapter->rx_scrq[i]->crq_num); in send_login()
4100 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); in send_login()
4105 vnic_add_client_data(adapter, vlcd); in send_login()
4107 netdev_dbg(adapter->netdev, "Login Buffer:\n"); in send_login()
4108 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { in send_login()
4109 netdev_dbg(adapter->netdev, "%016lx\n", in send_login()
4110 ((unsigned long *)(adapter->login_buf))[i]); in send_login()
4119 adapter->login_pending = true; in send_login()
4120 rc = ibmvnic_send_crq(adapter, &crq); in send_login()
4122 adapter->login_pending = false; in send_login()
4123 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); in send_login()
4131 adapter->login_rsp_buf = NULL; in send_login()
4136 adapter->login_buf = NULL; in send_login()
4141 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, in send_request_map() argument
4152 return ibmvnic_send_crq(adapter, &crq); in send_request_map()
4155 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) in send_request_unmap() argument
4163 return ibmvnic_send_crq(adapter, &crq); in send_request_unmap()
4166 static void send_query_map(struct ibmvnic_adapter *adapter) in send_query_map() argument
4173 ibmvnic_send_crq(adapter, &crq); in send_query_map()
4177 static void send_query_cap(struct ibmvnic_adapter *adapter) in send_query_cap() argument
4181 atomic_set(&adapter->running_cap_crqs, 0); in send_query_cap()
4187 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4188 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4191 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4192 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4195 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4196 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4199 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4200 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4203 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4204 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4207 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4208 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4212 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4213 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4217 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4218 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4222 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4223 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4227 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4228 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4231 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4232 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4235 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4236 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4239 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4240 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4243 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4244 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4247 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4248 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4251 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4252 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4255 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4256 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4259 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4260 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4263 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4264 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4267 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4268 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4271 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4272 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4276 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4277 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4281 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4282 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4286 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4287 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4290 atomic_inc(&adapter->running_cap_crqs); in send_query_cap()
4291 ibmvnic_send_crq(adapter, &crq); in send_query_cap()
4294 static void send_query_ip_offload(struct ibmvnic_adapter *adapter) in send_query_ip_offload() argument
4297 struct device *dev = &adapter->vdev->dev; in send_query_ip_offload()
4300 adapter->ip_offload_tok = in send_query_ip_offload()
4302 &adapter->ip_offload_buf, in send_query_ip_offload()
4306 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { in send_query_ip_offload()
4317 cpu_to_be32(adapter->ip_offload_tok); in send_query_ip_offload()
4319 ibmvnic_send_crq(adapter, &crq); in send_query_ip_offload()
4322 static void send_control_ip_offload(struct ibmvnic_adapter *adapter) in send_control_ip_offload() argument
4324 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; in send_control_ip_offload()
4325 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; in send_control_ip_offload()
4326 struct device *dev = &adapter->vdev->dev; in send_control_ip_offload()
4330 adapter->ip_offload_ctrl_tok = in send_control_ip_offload()
4333 sizeof(adapter->ip_offload_ctrl), in send_control_ip_offload()
4336 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { in send_control_ip_offload()
4341 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); in send_control_ip_offload()
4356 if (adapter->state != VNIC_PROBING) { in send_control_ip_offload()
4357 old_hw_features = adapter->netdev->hw_features; in send_control_ip_offload()
4358 adapter->netdev->hw_features = 0; in send_control_ip_offload()
4361 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; in send_control_ip_offload()
4364 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; in send_control_ip_offload()
4367 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; in send_control_ip_offload()
4369 if ((adapter->netdev->features & in send_control_ip_offload()
4371 adapter->netdev->hw_features |= NETIF_F_RXCSUM; in send_control_ip_offload()
4374 adapter->netdev->hw_features |= NETIF_F_TSO; in send_control_ip_offload()
4376 adapter->netdev->hw_features |= NETIF_F_TSO6; in send_control_ip_offload()
4378 if (adapter->state == VNIC_PROBING) { in send_control_ip_offload()
4379 adapter->netdev->features |= adapter->netdev->hw_features; in send_control_ip_offload()
4380 } else if (old_hw_features != adapter->netdev->hw_features) { in send_control_ip_offload()
4384 adapter->netdev->features &= adapter->netdev->hw_features; in send_control_ip_offload()
4386 tmp = (old_hw_features ^ adapter->netdev->hw_features) & in send_control_ip_offload()
4387 adapter->netdev->hw_features; in send_control_ip_offload()
4388 adapter->netdev->features |= in send_control_ip_offload()
4389 tmp & adapter->netdev->wanted_features; in send_control_ip_offload()
4396 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); in send_control_ip_offload()
4397 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); in send_control_ip_offload()
4398 ibmvnic_send_crq(adapter, &crq); in send_control_ip_offload()
4402 struct ibmvnic_adapter *adapter) in handle_vpd_size_rsp() argument
4404 struct device *dev = &adapter->vdev->dev; in handle_vpd_size_rsp()
4409 complete(&adapter->fw_done); in handle_vpd_size_rsp()
4413 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); in handle_vpd_size_rsp()
4414 complete(&adapter->fw_done); in handle_vpd_size_rsp()
4418 struct ibmvnic_adapter *adapter) in handle_vpd_rsp() argument
4420 struct device *dev = &adapter->vdev->dev; in handle_vpd_rsp()
4424 memset(adapter->fw_version, 0, 32); in handle_vpd_rsp()
4426 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, in handle_vpd_rsp()
4438 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); in handle_vpd_rsp()
4445 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { in handle_vpd_rsp()
4452 /* copy firmware version string from vpd into adapter */ in handle_vpd_rsp()
4454 (adapter->vpd->buff + adapter->vpd->len)) { in handle_vpd_rsp()
4455 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); in handle_vpd_rsp()
4461 if (adapter->fw_version[0] == '\0') in handle_vpd_rsp()
4462 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); in handle_vpd_rsp()
4463 complete(&adapter->fw_done); in handle_vpd_rsp()
4466 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) in handle_query_ip_offload_rsp() argument
4468 struct device *dev = &adapter->vdev->dev; in handle_query_ip_offload_rsp()
4469 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; in handle_query_ip_offload_rsp()
4472 dma_unmap_single(dev, adapter->ip_offload_tok, in handle_query_ip_offload_rsp()
4473 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); in handle_query_ip_offload_rsp()
4475 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); in handle_query_ip_offload_rsp()
4476 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) in handle_query_ip_offload_rsp()
4477 netdev_dbg(adapter->netdev, "%016lx\n", in handle_query_ip_offload_rsp()
4480 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); in handle_query_ip_offload_rsp()
4481 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); in handle_query_ip_offload_rsp()
4482 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", in handle_query_ip_offload_rsp()
4484 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", in handle_query_ip_offload_rsp()
4486 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", in handle_query_ip_offload_rsp()
4488 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", in handle_query_ip_offload_rsp()
4490 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", in handle_query_ip_offload_rsp()
4492 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", in handle_query_ip_offload_rsp()
4494 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", in handle_query_ip_offload_rsp()
4496 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", in handle_query_ip_offload_rsp()
4498 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", in handle_query_ip_offload_rsp()
4500 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", in handle_query_ip_offload_rsp()
4502 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", in handle_query_ip_offload_rsp()
4504 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", in handle_query_ip_offload_rsp()
4506 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", in handle_query_ip_offload_rsp()
4508 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", in handle_query_ip_offload_rsp()
4510 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", in handle_query_ip_offload_rsp()
4512 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", in handle_query_ip_offload_rsp()
4514 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", in handle_query_ip_offload_rsp()
4516 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", in handle_query_ip_offload_rsp()
4519 send_control_ip_offload(adapter); in handle_query_ip_offload_rsp()
4526 return "adapter problem"; in ibmvnic_fw_err_cause()
4545 struct ibmvnic_adapter *adapter) in handle_error_indication() argument
4547 struct device *dev = &adapter->vdev->dev; in handle_error_indication()
4559 ibmvnic_reset(adapter, VNIC_RESET_FATAL); in handle_error_indication()
4561 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); in handle_error_indication()
4565 struct ibmvnic_adapter *adapter) in handle_change_mac_rsp() argument
4567 struct net_device *netdev = adapter->netdev; in handle_change_mac_rsp()
4568 struct device *dev = &adapter->vdev->dev; in handle_change_mac_rsp()
4581 ether_addr_copy(adapter->mac_addr, in handle_change_mac_rsp()
4584 complete(&adapter->fw_done); in handle_change_mac_rsp()
4589 struct ibmvnic_adapter *adapter) in handle_request_cap_rsp() argument
4591 struct device *dev = &adapter->vdev->dev; in handle_request_cap_rsp()
4595 atomic_dec(&adapter->running_cap_crqs); in handle_request_cap_rsp()
4598 req_value = &adapter->req_tx_queues; in handle_request_cap_rsp()
4602 req_value = &adapter->req_rx_queues; in handle_request_cap_rsp()
4606 req_value = &adapter->req_rx_add_queues; in handle_request_cap_rsp()
4610 req_value = &adapter->req_tx_entries_per_subcrq; in handle_request_cap_rsp()
4614 req_value = &adapter->req_rx_add_entries_per_subcrq; in handle_request_cap_rsp()
4618 req_value = &adapter->req_mtu; in handle_request_cap_rsp()
4622 req_value = &adapter->promisc; in handle_request_cap_rsp()
4644 *req_value = adapter->fallback.mtu; in handle_request_cap_rsp()
4650 send_request_cap(adapter, 1); in handle_request_cap_rsp()
4659 if (atomic_read(&adapter->running_cap_crqs) == 0) { in handle_request_cap_rsp()
4660 adapter->wait_capability = false; in handle_request_cap_rsp()
4661 send_query_ip_offload(adapter); in handle_request_cap_rsp()
4666 struct ibmvnic_adapter *adapter) in handle_login_rsp() argument
4668 struct device *dev = &adapter->vdev->dev; in handle_login_rsp()
4669 struct net_device *netdev = adapter->netdev; in handle_login_rsp()
4670 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; in handle_login_rsp()
4671 struct ibmvnic_login_buffer *login = adapter->login_buf; in handle_login_rsp()
4682 if (!adapter->login_pending) { in handle_login_rsp()
4686 adapter->login_pending = false; in handle_login_rsp()
4688 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, in handle_login_rsp()
4690 dma_unmap_single(dev, adapter->login_rsp_buf_token, in handle_login_rsp()
4691 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); in handle_login_rsp()
4698 adapter->init_done_rc = login_rsp_crq->generic.rc.code; in handle_login_rsp()
4699 complete(&adapter->init_done); in handle_login_rsp()
4703 if (adapter->failover_pending) { in handle_login_rsp()
4704 adapter->init_done_rc = -EAGAIN; in handle_login_rsp()
4706 complete(&adapter->init_done); in handle_login_rsp()
4711 netdev->mtu = adapter->req_mtu - ETH_HLEN; in handle_login_rsp()
4713 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); in handle_login_rsp()
4714 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { in handle_login_rsp()
4715 netdev_dbg(adapter->netdev, "%016lx\n", in handle_login_rsp()
4716 ((unsigned long *)(adapter->login_rsp_buf))[i]); in handle_login_rsp()
4722 adapter->req_rx_add_queues != in handle_login_rsp()
4725 ibmvnic_reset(adapter, VNIC_RESET_FATAL); in handle_login_rsp()
4728 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + in handle_login_rsp()
4729 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); in handle_login_rsp()
4733 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); in handle_login_rsp()
4735 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); in handle_login_rsp()
4736 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); in handle_login_rsp()
4738 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + in handle_login_rsp()
4739 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); in handle_login_rsp()
4740 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + in handle_login_rsp()
4741 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); in handle_login_rsp()
4744 adapter->tx_scrq[i]->handle = tx_handle_array[i]; in handle_login_rsp()
4747 adapter->rx_scrq[i]->handle = rx_handle_array[i]; in handle_login_rsp()
4749 adapter->num_active_tx_scrqs = num_tx_pools; in handle_login_rsp()
4750 adapter->num_active_rx_scrqs = num_rx_pools; in handle_login_rsp()
4751 release_login_rsp_buffer(adapter); in handle_login_rsp()
4752 release_login_buffer(adapter); in handle_login_rsp()
4753 complete(&adapter->init_done); in handle_login_rsp()
4759 struct ibmvnic_adapter *adapter) in handle_request_unmap_rsp() argument
4761 struct device *dev = &adapter->vdev->dev; in handle_request_unmap_rsp()
4770 struct ibmvnic_adapter *adapter) in handle_query_map_rsp() argument
4772 struct net_device *netdev = adapter->netdev; in handle_query_map_rsp()
4773 struct device *dev = &adapter->vdev->dev; in handle_query_map_rsp()
4787 struct ibmvnic_adapter *adapter) in handle_query_cap_rsp() argument
4789 struct net_device *netdev = adapter->netdev; in handle_query_cap_rsp()
4790 struct device *dev = &adapter->vdev->dev; in handle_query_cap_rsp()
4793 atomic_dec(&adapter->running_cap_crqs); in handle_query_cap_rsp()
4795 atomic_read(&adapter->running_cap_crqs)); in handle_query_cap_rsp()
4804 adapter->min_tx_queues = in handle_query_cap_rsp()
4807 adapter->min_tx_queues); in handle_query_cap_rsp()
4810 adapter->min_rx_queues = in handle_query_cap_rsp()
4813 adapter->min_rx_queues); in handle_query_cap_rsp()
4816 adapter->min_rx_add_queues = in handle_query_cap_rsp()
4819 adapter->min_rx_add_queues); in handle_query_cap_rsp()
4822 adapter->max_tx_queues = in handle_query_cap_rsp()
4825 adapter->max_tx_queues); in handle_query_cap_rsp()
4828 adapter->max_rx_queues = in handle_query_cap_rsp()
4831 adapter->max_rx_queues); in handle_query_cap_rsp()
4834 adapter->max_rx_add_queues = in handle_query_cap_rsp()
4837 adapter->max_rx_add_queues); in handle_query_cap_rsp()
4840 adapter->min_tx_entries_per_subcrq = in handle_query_cap_rsp()
4843 adapter->min_tx_entries_per_subcrq); in handle_query_cap_rsp()
4846 adapter->min_rx_add_entries_per_subcrq = in handle_query_cap_rsp()
4849 adapter->min_rx_add_entries_per_subcrq); in handle_query_cap_rsp()
4852 adapter->max_tx_entries_per_subcrq = in handle_query_cap_rsp()
4855 adapter->max_tx_entries_per_subcrq); in handle_query_cap_rsp()
4858 adapter->max_rx_add_entries_per_subcrq = in handle_query_cap_rsp()
4861 adapter->max_rx_add_entries_per_subcrq); in handle_query_cap_rsp()
4864 adapter->tcp_ip_offload = in handle_query_cap_rsp()
4867 adapter->tcp_ip_offload); in handle_query_cap_rsp()
4870 adapter->promisc_supported = in handle_query_cap_rsp()
4873 adapter->promisc_supported); in handle_query_cap_rsp()
4876 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
4877 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; in handle_query_cap_rsp()
4878 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); in handle_query_cap_rsp()
4881 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
4882 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; in handle_query_cap_rsp()
4883 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); in handle_query_cap_rsp()
4886 adapter->max_multicast_filters = in handle_query_cap_rsp()
4889 adapter->max_multicast_filters); in handle_query_cap_rsp()
4892 adapter->vlan_header_insertion = in handle_query_cap_rsp()
4894 if (adapter->vlan_header_insertion) in handle_query_cap_rsp()
4897 adapter->vlan_header_insertion); in handle_query_cap_rsp()
4900 adapter->rx_vlan_header_insertion = in handle_query_cap_rsp()
4903 adapter->rx_vlan_header_insertion); in handle_query_cap_rsp()
4906 adapter->max_tx_sg_entries = in handle_query_cap_rsp()
4909 adapter->max_tx_sg_entries); in handle_query_cap_rsp()
4912 adapter->rx_sg_supported = in handle_query_cap_rsp()
4915 adapter->rx_sg_supported); in handle_query_cap_rsp()
4918 adapter->opt_tx_comp_sub_queues = in handle_query_cap_rsp()
4921 adapter->opt_tx_comp_sub_queues); in handle_query_cap_rsp()
4924 adapter->opt_rx_comp_queues = in handle_query_cap_rsp()
4927 adapter->opt_rx_comp_queues); in handle_query_cap_rsp()
4930 adapter->opt_rx_bufadd_q_per_rx_comp_q = in handle_query_cap_rsp()
4933 adapter->opt_rx_bufadd_q_per_rx_comp_q); in handle_query_cap_rsp()
4936 adapter->opt_tx_entries_per_subcrq = in handle_query_cap_rsp()
4939 adapter->opt_tx_entries_per_subcrq); in handle_query_cap_rsp()
4942 adapter->opt_rxba_entries_per_subcrq = in handle_query_cap_rsp()
4945 adapter->opt_rxba_entries_per_subcrq); in handle_query_cap_rsp()
4948 adapter->tx_rx_desc_req = crq->query_capability.number; in handle_query_cap_rsp()
4950 adapter->tx_rx_desc_req); in handle_query_cap_rsp()
4959 if (atomic_read(&adapter->running_cap_crqs) == 0) { in handle_query_cap_rsp()
4960 adapter->wait_capability = false; in handle_query_cap_rsp()
4961 send_request_cap(adapter, 0); in handle_query_cap_rsp()
4965 static int send_query_phys_parms(struct ibmvnic_adapter *adapter) in send_query_phys_parms() argument
4974 mutex_lock(&adapter->fw_lock); in send_query_phys_parms()
4975 adapter->fw_done_rc = 0; in send_query_phys_parms()
4976 reinit_completion(&adapter->fw_done); in send_query_phys_parms()
4978 rc = ibmvnic_send_crq(adapter, &crq); in send_query_phys_parms()
4980 mutex_unlock(&adapter->fw_lock); in send_query_phys_parms()
4984 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in send_query_phys_parms()
4986 mutex_unlock(&adapter->fw_lock); in send_query_phys_parms()
4990 mutex_unlock(&adapter->fw_lock); in send_query_phys_parms()
4991 return adapter->fw_done_rc ? -EIO : 0; in send_query_phys_parms()
4995 struct ibmvnic_adapter *adapter) in handle_query_phys_parms_rsp() argument
4997 struct net_device *netdev = adapter->netdev; in handle_query_phys_parms_rsp()
5008 adapter->speed = SPEED_10; in handle_query_phys_parms_rsp()
5011 adapter->speed = SPEED_100; in handle_query_phys_parms_rsp()
5014 adapter->speed = SPEED_1000; in handle_query_phys_parms_rsp()
5017 adapter->speed = SPEED_10000; in handle_query_phys_parms_rsp()
5020 adapter->speed = SPEED_25000; in handle_query_phys_parms_rsp()
5023 adapter->speed = SPEED_40000; in handle_query_phys_parms_rsp()
5026 adapter->speed = SPEED_50000; in handle_query_phys_parms_rsp()
5029 adapter->speed = SPEED_100000; in handle_query_phys_parms_rsp()
5032 adapter->speed = SPEED_200000; in handle_query_phys_parms_rsp()
5037 adapter->speed = SPEED_UNKNOWN; in handle_query_phys_parms_rsp()
5040 adapter->duplex = DUPLEX_FULL; in handle_query_phys_parms_rsp()
5042 adapter->duplex = DUPLEX_HALF; in handle_query_phys_parms_rsp()
5044 adapter->duplex = DUPLEX_UNKNOWN; in handle_query_phys_parms_rsp()
5050 struct ibmvnic_adapter *adapter) in ibmvnic_handle_crq() argument
5053 struct net_device *netdev = adapter->netdev; in ibmvnic_handle_crq()
5054 struct device *dev = &adapter->vdev->dev; in ibmvnic_handle_crq()
5066 adapter->from_passive_init = true; in ibmvnic_handle_crq()
5070 adapter->login_pending = false; in ibmvnic_handle_crq()
5072 if (!completion_done(&adapter->init_done)) { in ibmvnic_handle_crq()
5073 complete(&adapter->init_done); in ibmvnic_handle_crq()
5074 adapter->init_done_rc = -EIO; in ibmvnic_handle_crq()
5077 if (adapter->state == VNIC_DOWN) in ibmvnic_handle_crq()
5078 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT); in ibmvnic_handle_crq()
5080 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); in ibmvnic_handle_crq()
5084 * reset either because the adapter was still in ibmvnic_handle_crq()
5089 * is already scheduled or the adapter is in ibmvnic_handle_crq()
5095 adapter->failover_pending = false; in ibmvnic_handle_crq()
5100 adapter->crq.active = true; in ibmvnic_handle_crq()
5101 send_version_xchg(adapter); in ibmvnic_handle_crq()
5109 adapter->crq.active = false; in ibmvnic_handle_crq()
5113 if (!completion_done(&adapter->fw_done)) { in ibmvnic_handle_crq()
5114 adapter->fw_done_rc = -EIO; in ibmvnic_handle_crq()
5115 complete(&adapter->fw_done); in ibmvnic_handle_crq()
5117 if (!completion_done(&adapter->stats_done)) in ibmvnic_handle_crq()
5118 complete(&adapter->stats_done); in ibmvnic_handle_crq()
5119 if (test_bit(0, &adapter->resetting)) in ibmvnic_handle_crq()
5120 adapter->force_reset_recovery = true; in ibmvnic_handle_crq()
5122 dev_info(dev, "Migrated, re-enabling adapter\n"); in ibmvnic_handle_crq()
5123 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); in ibmvnic_handle_crq()
5126 adapter->failover_pending = true; in ibmvnic_handle_crq()
5128 /* The adapter lost the connection */ in ibmvnic_handle_crq()
5129 dev_err(dev, "Virtual Adapter failed (rc=%d)\n", in ibmvnic_handle_crq()
5131 ibmvnic_reset(adapter, VNIC_RESET_FATAL); in ibmvnic_handle_crq()
5153 send_query_cap(adapter); in ibmvnic_handle_crq()
5156 handle_query_cap_rsp(crq, adapter); in ibmvnic_handle_crq()
5159 handle_query_map_rsp(crq, adapter); in ibmvnic_handle_crq()
5162 adapter->fw_done_rc = crq->request_map_rsp.rc.code; in ibmvnic_handle_crq()
5163 complete(&adapter->fw_done); in ibmvnic_handle_crq()
5166 handle_request_unmap_rsp(crq, adapter); in ibmvnic_handle_crq()
5169 handle_request_cap_rsp(crq, adapter); in ibmvnic_handle_crq()
5173 handle_login_rsp(crq, adapter); in ibmvnic_handle_crq()
5180 adapter->logical_link_state = in ibmvnic_handle_crq()
5182 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; in ibmvnic_handle_crq()
5183 complete(&adapter->init_done); in ibmvnic_handle_crq()
5187 adapter->phys_link_state = in ibmvnic_handle_crq()
5189 adapter->logical_link_state = in ibmvnic_handle_crq()
5191 if (adapter->phys_link_state && adapter->logical_link_state) in ibmvnic_handle_crq()
5198 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); in ibmvnic_handle_crq()
5202 handle_error_indication(crq, adapter); in ibmvnic_handle_crq()
5206 complete(&adapter->stats_done); in ibmvnic_handle_crq()
5210 handle_query_ip_offload_rsp(adapter); in ibmvnic_handle_crq()
5217 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, in ibmvnic_handle_crq()
5218 sizeof(adapter->ip_offload_ctrl), in ibmvnic_handle_crq()
5220 complete(&adapter->init_done); in ibmvnic_handle_crq()
5224 complete(&adapter->fw_done); in ibmvnic_handle_crq()
5227 handle_vpd_size_rsp(crq, adapter); in ibmvnic_handle_crq()
5230 handle_vpd_rsp(crq, adapter); in ibmvnic_handle_crq()
5233 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); in ibmvnic_handle_crq()
5234 complete(&adapter->fw_done); in ibmvnic_handle_crq()
5244 struct ibmvnic_adapter *adapter = instance; in ibmvnic_interrupt() local
5246 tasklet_schedule(&adapter->tasklet); in ibmvnic_interrupt()
5252 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet); in ibmvnic_tasklet() local
5253 struct ibmvnic_crq_queue *queue = &adapter->crq; in ibmvnic_tasklet()
5261 while ((crq = ibmvnic_next_crq(adapter)) != NULL) { in ibmvnic_tasklet()
5268 ibmvnic_handle_crq(crq, adapter); in ibmvnic_tasklet()
5275 if (!adapter->wait_capability) in ibmvnic_tasklet()
5281 if (atomic_read(&adapter->running_cap_crqs) != 0) in ibmvnic_tasklet()
5282 adapter->wait_capability = true; in ibmvnic_tasklet()
5286 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter) in ibmvnic_reenable_crq_queue() argument
5288 struct vio_dev *vdev = adapter->vdev; in ibmvnic_reenable_crq_queue()
5296 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); in ibmvnic_reenable_crq_queue()
5301 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) in ibmvnic_reset_crq() argument
5303 struct ibmvnic_crq_queue *crq = &adapter->crq; in ibmvnic_reset_crq()
5304 struct device *dev = &adapter->vdev->dev; in ibmvnic_reset_crq()
5305 struct vio_dev *vdev = adapter->vdev; in ibmvnic_reset_crq()
5326 /* Adapter is good, but other end is not ready */ in ibmvnic_reset_crq()
5327 dev_warn(dev, "Partner adapter not ready\n"); in ibmvnic_reset_crq()
5334 static void release_crq_queue(struct ibmvnic_adapter *adapter) in release_crq_queue() argument
5336 struct ibmvnic_crq_queue *crq = &adapter->crq; in release_crq_queue()
5337 struct vio_dev *vdev = adapter->vdev; in release_crq_queue()
5343 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); in release_crq_queue()
5344 free_irq(vdev->irq, adapter); in release_crq_queue()
5345 tasklet_kill(&adapter->tasklet); in release_crq_queue()
5357 static int init_crq_queue(struct ibmvnic_adapter *adapter) in init_crq_queue() argument
5359 struct ibmvnic_crq_queue *crq = &adapter->crq; in init_crq_queue()
5360 struct device *dev = &adapter->vdev->dev; in init_crq_queue()
5361 struct vio_dev *vdev = adapter->vdev; in init_crq_queue()
5384 rc = ibmvnic_reset_crq(adapter); in init_crq_queue()
5388 dev_warn(dev, "Partner adapter not ready\n"); in init_crq_queue()
5390 dev_warn(dev, "Error %d opening adapter\n", rc); in init_crq_queue()
5396 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); in init_crq_queue()
5398 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); in init_crq_queue()
5400 adapter->vdev->unit_address); in init_crq_queue()
5401 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); in init_crq_queue()
5420 tasklet_kill(&adapter->tasklet); in init_crq_queue()
5432 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset) in ibmvnic_reset_init() argument
5434 struct device *dev = &adapter->vdev->dev; in ibmvnic_reset_init()
5436 u64 old_num_rx_queues = adapter->req_rx_queues; in ibmvnic_reset_init()
5437 u64 old_num_tx_queues = adapter->req_tx_queues; in ibmvnic_reset_init()
5440 adapter->from_passive_init = false; in ibmvnic_reset_init()
5443 reinit_completion(&adapter->init_done); in ibmvnic_reset_init()
5445 adapter->init_done_rc = 0; in ibmvnic_reset_init()
5446 rc = ibmvnic_send_crq_init(adapter); in ibmvnic_reset_init()
5452 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { in ibmvnic_reset_init()
5457 if (adapter->init_done_rc) { in ibmvnic_reset_init()
5458 release_crq_queue(adapter); in ibmvnic_reset_init()
5459 return adapter->init_done_rc; in ibmvnic_reset_init()
5462 if (adapter->from_passive_init) { in ibmvnic_reset_init()
5463 adapter->state = VNIC_OPEN; in ibmvnic_reset_init()
5464 adapter->from_passive_init = false; in ibmvnic_reset_init()
5469 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && in ibmvnic_reset_init()
5470 adapter->reset_reason != VNIC_RESET_MOBILITY) { in ibmvnic_reset_init()
5471 if (adapter->req_rx_queues != old_num_rx_queues || in ibmvnic_reset_init()
5472 adapter->req_tx_queues != old_num_tx_queues) { in ibmvnic_reset_init()
5473 release_sub_crqs(adapter, 0); in ibmvnic_reset_init()
5474 rc = init_sub_crqs(adapter); in ibmvnic_reset_init()
5476 rc = reset_sub_crq_queues(adapter); in ibmvnic_reset_init()
5479 rc = init_sub_crqs(adapter); in ibmvnic_reset_init()
5484 release_crq_queue(adapter); in ibmvnic_reset_init()
5488 rc = init_sub_crq_irqs(adapter); in ibmvnic_reset_init()
5491 release_crq_queue(adapter); in ibmvnic_reset_init()
5501 struct ibmvnic_adapter *adapter; in ibmvnic_probe() local
5524 adapter = netdev_priv(netdev); in ibmvnic_probe()
5525 adapter->state = VNIC_PROBING; in ibmvnic_probe()
5527 adapter->vdev = dev; in ibmvnic_probe()
5528 adapter->netdev = netdev; in ibmvnic_probe()
5529 adapter->login_pending = false; in ibmvnic_probe()
5531 ether_addr_copy(adapter->mac_addr, mac_addr_p); in ibmvnic_probe()
5532 ether_addr_copy(netdev->dev_addr, adapter->mac_addr); in ibmvnic_probe()
5538 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); in ibmvnic_probe()
5539 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, in ibmvnic_probe()
5541 INIT_LIST_HEAD(&adapter->rwi_list); in ibmvnic_probe()
5542 spin_lock_init(&adapter->rwi_lock); in ibmvnic_probe()
5543 spin_lock_init(&adapter->state_lock); in ibmvnic_probe()
5544 mutex_init(&adapter->fw_lock); in ibmvnic_probe()
5545 init_completion(&adapter->init_done); in ibmvnic_probe()
5546 init_completion(&adapter->fw_done); in ibmvnic_probe()
5547 init_completion(&adapter->reset_done); in ibmvnic_probe()
5548 init_completion(&adapter->stats_done); in ibmvnic_probe()
5549 clear_bit(0, &adapter->resetting); in ibmvnic_probe()
5553 rc = init_crq_queue(adapter); in ibmvnic_probe()
5560 rc = ibmvnic_reset_init(adapter, false); in ibmvnic_probe()
5571 rc = init_stats_buffers(adapter); in ibmvnic_probe()
5575 rc = init_stats_token(adapter); in ibmvnic_probe()
5592 adapter->state = VNIC_PROBED; in ibmvnic_probe()
5593 netdev->mtu = adapter->req_mtu - ETH_HLEN; in ibmvnic_probe()
5594 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; in ibmvnic_probe()
5595 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; in ibmvnic_probe()
5597 adapter->state = VNIC_DOWN; in ibmvnic_probe()
5600 adapter->wait_for_reset = false; in ibmvnic_probe()
5601 adapter->last_reset_time = jiffies; in ibmvnic_probe()
5608 release_stats_token(adapter); in ibmvnic_probe()
5611 release_stats_buffers(adapter); in ibmvnic_probe()
5614 release_sub_crqs(adapter, 1); in ibmvnic_probe()
5615 release_crq_queue(adapter); in ibmvnic_probe()
5616 mutex_destroy(&adapter->fw_lock); in ibmvnic_probe()
5625 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_remove() local
5628 spin_lock_irqsave(&adapter->state_lock, flags); in ibmvnic_remove()
5637 spin_lock(&adapter->rwi_lock); in ibmvnic_remove()
5638 adapter->state = VNIC_REMOVING; in ibmvnic_remove()
5639 spin_unlock(&adapter->rwi_lock); in ibmvnic_remove()
5641 spin_unlock_irqrestore(&adapter->state_lock, flags); in ibmvnic_remove()
5643 flush_work(&adapter->ibmvnic_reset); in ibmvnic_remove()
5644 flush_delayed_work(&adapter->ibmvnic_delayed_reset); in ibmvnic_remove()
5649 release_resources(adapter); in ibmvnic_remove()
5650 release_sub_crqs(adapter, 1); in ibmvnic_remove()
5651 release_crq_queue(adapter); in ibmvnic_remove()
5653 release_stats_token(adapter); in ibmvnic_remove()
5654 release_stats_buffers(adapter); in ibmvnic_remove()
5656 adapter->state = VNIC_REMOVED; in ibmvnic_remove()
5659 mutex_destroy(&adapter->fw_lock); in ibmvnic_remove()
5669 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in failover_store() local
5677 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, in failover_store()
5688 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, in failover_store()
5697 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); in failover_store()
5706 struct ibmvnic_adapter *adapter; in ibmvnic_get_desired_dma() local
5717 adapter = netdev_priv(netdev); in ibmvnic_get_desired_dma()
5722 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) in ibmvnic_get_desired_dma()
5725 for (i = 0; i < adapter->num_active_rx_pools; i++) in ibmvnic_get_desired_dma()
5726 ret += adapter->rx_pool[i].size * in ibmvnic_get_desired_dma()
5727 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); in ibmvnic_get_desired_dma()
5735 struct ibmvnic_adapter *adapter = netdev_priv(netdev); in ibmvnic_resume() local
5737 if (adapter->state != VNIC_OPEN) in ibmvnic_resume()
5740 tasklet_schedule(&adapter->tasklet); in ibmvnic_resume()