Lines Matching +full:rx +full:- +full:eq

1 // SPDX-License-Identifier: GPL-2.0-or-later
11 * Jan-Bernd Themann <themann@de.ibm.com>
46 static int msg_level = -1;
66 "[2^x - 1], x = [7..14]. Default = "
69 "[2^x - 1], x = [7..14]. Default = "
72 "[2^x - 1], x = [7..14]. Default = "
75 "[2^x - 1], x = [7..14]. Default = "
99 .compatible = "IBM,lhea-ethernet",
137 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) in ehea_schedule_port_reset()
138 schedule_work(&port->reset_task); in ehea_schedule_port_reset()
158 struct ehea_port *port = adapter->port[k]; in ehea_update_firmware_handles()
160 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_firmware_handles()
164 num_portres += port->num_def_qps; in ehea_update_firmware_handles()
184 struct ehea_port *port = adapter->port[k]; in ehea_update_firmware_handles()
186 if (!port || (port->state != EHEA_PORT_UP) || in ehea_update_firmware_handles()
190 for (l = 0; l < port->num_def_qps; l++) { in ehea_update_firmware_handles()
191 struct ehea_port_res *pr = &port->port_res[l]; in ehea_update_firmware_handles()
193 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
194 arr[i++].fwh = pr->qp->fw_handle; in ehea_update_firmware_handles()
195 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
196 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles()
197 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
198 arr[i++].fwh = pr->recv_cq->fw_handle; in ehea_update_firmware_handles()
199 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
200 arr[i++].fwh = pr->eq->fw_handle; in ehea_update_firmware_handles()
201 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
202 arr[i++].fwh = pr->send_mr.handle; in ehea_update_firmware_handles()
203 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
204 arr[i++].fwh = pr->recv_mr.handle; in ehea_update_firmware_handles()
206 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
207 arr[i++].fwh = port->qp_eq->fw_handle; in ehea_update_firmware_handles()
208 num_ports--; in ehea_update_firmware_handles()
211 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
212 arr[i++].fwh = adapter->neq->fw_handle; in ehea_update_firmware_handles()
214 if (adapter->mr.handle) { in ehea_update_firmware_handles()
215 arr[i].adh = adapter->handle; in ehea_update_firmware_handles()
216 arr[i++].fwh = adapter->mr.handle; in ehea_update_firmware_handles()
218 num_adapters--; in ehea_update_firmware_handles()
244 struct ehea_port *port = adapter->port[k]; in ehea_update_bcmc_registrations()
246 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_bcmc_registrations()
251 list_for_each_entry(mc_entry, &port->mc_list->list,list) in ehea_update_bcmc_registrations()
264 struct ehea_port *port = adapter->port[k]; in ehea_update_bcmc_registrations()
266 if (!port || (port->state != EHEA_PORT_UP)) in ehea_update_bcmc_registrations()
272 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
273 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
276 arr[i++].macaddr = port->mac_addr; in ehea_update_bcmc_registrations()
278 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
279 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
282 arr[i++].macaddr = port->mac_addr; in ehea_update_bcmc_registrations()
283 num_registrations -= 2; in ehea_update_bcmc_registrations()
286 &port->mc_list->list, list) { in ehea_update_bcmc_registrations()
290 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
291 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
294 if (mc_entry->macaddr == 0) in ehea_update_bcmc_registrations()
296 arr[i++].macaddr = mc_entry->macaddr; in ehea_update_bcmc_registrations()
298 arr[i].adh = adapter->handle; in ehea_update_bcmc_registrations()
299 arr[i].port_id = port->logical_port_id; in ehea_update_bcmc_registrations()
302 if (mc_entry->macaddr == 0) in ehea_update_bcmc_registrations()
304 arr[i++].macaddr = mc_entry->macaddr; in ehea_update_bcmc_registrations()
305 num_registrations -= 2; in ehea_update_bcmc_registrations()
325 for (i = 0; i < port->num_def_qps; i++) { in ehea_get_stats64()
326 rx_packets += port->port_res[i].rx_packets; in ehea_get_stats64()
327 rx_bytes += port->port_res[i].rx_bytes; in ehea_get_stats64()
330 for (i = 0; i < port->num_def_qps; i++) { in ehea_get_stats64()
331 tx_packets += port->port_res[i].tx_packets; in ehea_get_stats64()
332 tx_bytes += port->port_res[i].tx_bytes; in ehea_get_stats64()
335 stats->tx_packets = tx_packets; in ehea_get_stats64()
336 stats->rx_bytes = rx_bytes; in ehea_get_stats64()
337 stats->tx_bytes = tx_bytes; in ehea_get_stats64()
338 stats->rx_packets = rx_packets; in ehea_get_stats64()
340 stats->multicast = port->stats.multicast; in ehea_get_stats64()
341 stats->rx_errors = port->stats.rx_errors; in ehea_get_stats64()
348 struct net_device *dev = port->netdev; in ehea_update_stats()
349 struct rtnl_link_stats64 *stats = &port->stats; in ehea_update_stats()
359 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_update_stats()
360 port->logical_port_id, in ehea_update_stats()
370 stats->multicast = cb2->rxmcp; in ehea_update_stats()
371 stats->rx_errors = cb2->rxuerr; in ehea_update_stats()
376 schedule_delayed_work(&port->stats_work, in ehea_update_stats()
382 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_refill_rq1()
383 struct net_device *dev = pr->port->netdev; in ehea_refill_rq1()
384 int max_index_mask = pr->rq1_skba.len - 1; in ehea_refill_rq1()
385 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; in ehea_refill_rq1()
389 pr->rq1_skba.os_skbs = 0; in ehea_refill_rq1()
393 pr->rq1_skba.index = index; in ehea_refill_rq1()
394 pr->rq1_skba.os_skbs = fill_wqes; in ehea_refill_rq1()
403 pr->rq1_skba.os_skbs = fill_wqes - i; in ehea_refill_rq1()
407 index--; in ehea_refill_rq1()
416 ehea_update_rq1a(pr->qp, adder); in ehea_refill_rq1()
421 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_init_fill_rq1()
422 struct net_device *dev = pr->port->netdev; in ehea_init_fill_rq1()
425 if (nr_rq1a > pr->rq1_skba.len) { in ehea_init_fill_rq1()
436 ehea_update_rq1a(pr->qp, i - 1); in ehea_init_fill_rq1()
443 struct net_device *dev = pr->port->netdev; in ehea_refill_rq_def()
444 struct ehea_qp *qp = pr->qp; in ehea_refill_rq_def()
445 struct sk_buff **skb_arr = q_skba->arr; in ehea_refill_rq_def()
451 fill_wqes = q_skba->os_skbs + num_wqes; in ehea_refill_rq_def()
452 q_skba->os_skbs = 0; in ehea_refill_rq_def()
455 q_skba->os_skbs = fill_wqes; in ehea_refill_rq_def()
459 index = q_skba->index; in ehea_refill_rq_def()
460 max_index_mask = q_skba->len - 1; in ehea_refill_rq_def()
467 q_skba->os_skbs = fill_wqes - i; in ehea_refill_rq_def()
468 if (q_skba->os_skbs == q_skba->len - 2) { in ehea_refill_rq_def()
469 netdev_info(pr->port->netdev, in ehea_refill_rq_def()
470 "rq%i ran dry - no mem for skb\n", in ehea_refill_rq_def()
472 ret = -ENOMEM; in ehea_refill_rq_def()
478 tmp_addr = ehea_map_vaddr(skb->data); in ehea_refill_rq_def()
479 if (tmp_addr == -1) { in ehea_refill_rq_def()
481 q_skba->os_skbs = fill_wqes - i; in ehea_refill_rq_def()
487 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) in ehea_refill_rq_def()
489 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; in ehea_refill_rq_def()
490 rwqe->sg_list[0].vaddr = tmp_addr; in ehea_refill_rq_def()
491 rwqe->sg_list[0].len = packet_size; in ehea_refill_rq_def()
492 rwqe->data_segments = 1; in ehea_refill_rq_def()
499 q_skba->index = index; in ehea_refill_rq_def()
506 ehea_update_rq2a(pr->qp, adder); in ehea_refill_rq_def()
508 ehea_update_rq3a(pr->qp, adder); in ehea_refill_rq_def()
516 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, in ehea_refill_rq2()
524 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, in ehea_refill_rq3()
531 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe()
532 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe()
534 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe()
535 (cqe->header_length == 0)) in ehea_check_cqe()
537 return -EINVAL; in ehea_check_cqe()
544 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb()
547 skb->protocol = eth_type_trans(skb, dev); in ehea_fill_skb()
551 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb()
552 skb->ip_summed = CHECKSUM_COMPLETE; in ehea_fill_skb()
553 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb()
555 skb->ip_summed = CHECKSUM_UNNECESSARY; in ehea_fill_skb()
557 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]); in ehea_fill_skb()
564 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); in get_skb_by_index()
570 x &= (arr_len - 1); in get_skb_by_index()
577 pref = (skb_array[x]->data); in get_skb_by_index()
597 x &= (arr_len - 1); in get_skb_by_index_ll()
604 pref = (skb_array[x]->data); in get_skb_by_index_ll()
620 if (cqe->status & EHEA_CQE_STAT_ERR_TCP) in ehea_treat_poll_error()
621 pr->p_stats.err_tcp_cksum++; in ehea_treat_poll_error()
622 if (cqe->status & EHEA_CQE_STAT_ERR_IP) in ehea_treat_poll_error()
623 pr->p_stats.err_ip_cksum++; in ehea_treat_poll_error()
624 if (cqe->status & EHEA_CQE_STAT_ERR_CRC) in ehea_treat_poll_error()
625 pr->p_stats.err_frame_crc++; in ehea_treat_poll_error()
629 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); in ehea_treat_poll_error()
633 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); in ehea_treat_poll_error()
637 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { in ehea_treat_poll_error()
638 if (netif_msg_rx_err(pr->port)) { in ehea_treat_poll_error()
640 pr->qp->init_attr.qp_nr); in ehea_treat_poll_error()
643 ehea_schedule_port_reset(pr->port); in ehea_treat_poll_error()
654 struct ehea_port *port = pr->port; in ehea_proc_rwqes()
655 struct ehea_qp *qp = pr->qp; in ehea_proc_rwqes()
658 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; in ehea_proc_rwqes()
659 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; in ehea_proc_rwqes()
660 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; in ehea_proc_rwqes()
661 int skb_arr_rq1_len = pr->rq1_skba.len; in ehea_proc_rwqes()
662 int skb_arr_rq2_len = pr->rq2_skba.len; in ehea_proc_rwqes()
663 int skb_arr_rq3_len = pr->rq3_skba.len; in ehea_proc_rwqes()
697 cqe->num_bytes_transfered - 4); in ehea_proc_rwqes()
723 processed_bytes += skb->len; in ehea_proc_rwqes()
725 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) in ehea_proc_rwqes()
727 cqe->vlan_tag); in ehea_proc_rwqes()
729 napi_gro_receive(&pr->napi, skb); in ehea_proc_rwqes()
731 pr->p_stats.poll_receive_errors++; in ehea_proc_rwqes()
741 pr->rx_packets += processed; in ehea_proc_rwqes()
742 pr->rx_bytes += processed_bytes; in ehea_proc_rwqes()
757 for (i = 0; i < port->num_def_qps; i++) { in reset_sq_restart_flag()
758 struct ehea_port_res *pr = &port->port_res[i]; in reset_sq_restart_flag()
759 pr->sq_restart_flag = 0; in reset_sq_restart_flag()
761 wake_up(&port->restart_wq); in reset_sq_restart_flag()
770 for (i = 0; i < port->num_def_qps; i++) { in check_sqs()
771 struct ehea_port_res *pr = &port->port_res[i]; in check_sqs()
773 swqe = ehea_get_swqe(pr->qp, &swqe_index); in check_sqs()
775 atomic_dec(&pr->swqe_avail); in check_sqs()
777 swqe->tx_control |= EHEA_SWQE_PURGE; in check_sqs()
778 swqe->wr_id = SWQE_RESTART_CHECK; in check_sqs()
779 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in check_sqs()
780 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; in check_sqs()
781 swqe->immediate_data_length = 80; in check_sqs()
783 ehea_post_swqe(pr->qp, swqe); in check_sqs()
785 ret = wait_event_timeout(port->restart_wq, in check_sqs()
786 pr->sq_restart_flag == 0, in check_sqs()
791 ehea_schedule_port_reset(pr->port); in check_sqs()
801 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes()
807 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev, in ehea_proc_cqes()
808 pr - &pr->port->port_res[0]); in ehea_proc_cqes()
817 if (cqe->wr_id == SWQE_RESTART_CHECK) { in ehea_proc_cqes()
818 pr->sq_restart_flag = 1; in ehea_proc_cqes()
823 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { in ehea_proc_cqes()
825 cqe->status); in ehea_proc_cqes()
827 if (netif_msg_tx_err(pr->port)) in ehea_proc_cqes()
830 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { in ehea_proc_cqes()
832 ehea_schedule_port_reset(pr->port); in ehea_proc_cqes()
837 if (netif_msg_tx_done(pr->port)) in ehea_proc_cqes()
840 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) in ehea_proc_cqes()
843 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); in ehea_proc_cqes()
844 skb = pr->sq_skba.arr[index]; in ehea_proc_cqes()
846 pr->sq_skba.arr[index] = NULL; in ehea_proc_cqes()
849 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); in ehea_proc_cqes()
850 quota--; in ehea_proc_cqes()
856 atomic_add(swqe_av, &pr->swqe_avail); in ehea_proc_cqes()
859 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) { in ehea_proc_cqes()
862 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th)) in ehea_proc_cqes()
867 wake_up(&pr->port->swqe_avail_wq); in ehea_proc_cqes()
878 struct net_device *dev = pr->port->netdev; in ehea_poll()
882 int rx = 0; in ehea_poll() local
885 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
887 while (rx != budget) { in ehea_poll()
889 ehea_reset_cq_ep(pr->recv_cq); in ehea_poll()
890 ehea_reset_cq_ep(pr->send_cq); in ehea_poll()
891 ehea_reset_cq_n1(pr->recv_cq); in ehea_poll()
892 ehea_reset_cq_n1(pr->send_cq); in ehea_poll()
894 cqe = ehea_poll_rq1(pr->qp, &wqe_index); in ehea_poll()
895 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll()
898 return rx; in ehea_poll()
901 return rx; in ehea_poll()
904 rx += ehea_proc_rwqes(dev, pr, budget - rx); in ehea_poll()
907 return rx; in ehea_poll()
914 napi_schedule(&pr->napi); in ehea_recv_irq_handler()
928 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
931 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler()
933 eqe->entry, qp_token); in ehea_qp_aff_irq_handler()
935 qp = port->port_res[qp_token].qp; in ehea_qp_aff_irq_handler()
937 resource_type = ehea_error_data(port->adapter, qp->fw_handle, in ehea_qp_aff_irq_handler()
945 reset_port = 1; /* Reset in case of CQ or EQ error */ in ehea_qp_aff_irq_handler()
947 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
964 if (adapter->port[i]) in ehea_get_port()
965 if (adapter->port[i]->logical_port_id == logical_port) in ehea_get_port()
966 return adapter->port[i]; in ehea_get_port()
980 ret = -ENOMEM; in ehea_sense_port_attr()
984 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_sense_port_attr()
985 port->logical_port_id, H_PORT_CB0, in ehea_sense_port_attr()
989 ret = -EIO; in ehea_sense_port_attr()
994 port->mac_addr = cb0->port_mac_addr << 16; in ehea_sense_port_attr()
996 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { in ehea_sense_port_attr()
997 ret = -EADDRNOTAVAIL; in ehea_sense_port_attr()
1002 switch (cb0->port_speed) { in ehea_sense_port_attr()
1004 port->port_speed = EHEA_SPEED_10M; in ehea_sense_port_attr()
1005 port->full_duplex = 0; in ehea_sense_port_attr()
1008 port->port_speed = EHEA_SPEED_10M; in ehea_sense_port_attr()
1009 port->full_duplex = 1; in ehea_sense_port_attr()
1012 port->port_speed = EHEA_SPEED_100M; in ehea_sense_port_attr()
1013 port->full_duplex = 0; in ehea_sense_port_attr()
1016 port->port_speed = EHEA_SPEED_100M; in ehea_sense_port_attr()
1017 port->full_duplex = 1; in ehea_sense_port_attr()
1020 port->port_speed = EHEA_SPEED_1G; in ehea_sense_port_attr()
1021 port->full_duplex = 1; in ehea_sense_port_attr()
1024 port->port_speed = EHEA_SPEED_10G; in ehea_sense_port_attr()
1025 port->full_duplex = 1; in ehea_sense_port_attr()
1028 port->port_speed = 0; in ehea_sense_port_attr()
1029 port->full_duplex = 0; in ehea_sense_port_attr()
1033 port->autoneg = 1; in ehea_sense_port_attr()
1034 port->num_mcs = cb0->num_default_qps; in ehea_sense_port_attr()
1038 port->num_def_qps = cb0->num_default_qps; in ehea_sense_port_attr()
1040 port->num_def_qps = 1; in ehea_sense_port_attr()
1042 if (!port->num_def_qps) { in ehea_sense_port_attr()
1043 ret = -EINVAL; in ehea_sense_port_attr()
1065 ret = -ENOMEM; in ehea_set_portspeed()
1069 cb4->port_speed = port_speed; in ehea_set_portspeed()
1071 netif_carrier_off(port->netdev); in ehea_set_portspeed()
1073 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_set_portspeed()
1074 port->logical_port_id, in ehea_set_portspeed()
1077 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; in ehea_set_portspeed()
1079 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_set_portspeed()
1080 port->logical_port_id, in ehea_set_portspeed()
1084 switch (cb4->port_speed) { in ehea_set_portspeed()
1086 port->port_speed = EHEA_SPEED_10M; in ehea_set_portspeed()
1087 port->full_duplex = 0; in ehea_set_portspeed()
1090 port->port_speed = EHEA_SPEED_10M; in ehea_set_portspeed()
1091 port->full_duplex = 1; in ehea_set_portspeed()
1094 port->port_speed = EHEA_SPEED_100M; in ehea_set_portspeed()
1095 port->full_duplex = 0; in ehea_set_portspeed()
1098 port->port_speed = EHEA_SPEED_100M; in ehea_set_portspeed()
1099 port->full_duplex = 1; in ehea_set_portspeed()
1102 port->port_speed = EHEA_SPEED_1G; in ehea_set_portspeed()
1103 port->full_duplex = 1; in ehea_set_portspeed()
1106 port->port_speed = EHEA_SPEED_10G; in ehea_set_portspeed()
1107 port->full_duplex = 1; in ehea_set_portspeed()
1110 port->port_speed = 0; in ehea_set_portspeed()
1111 port->full_duplex = 0; in ehea_set_portspeed()
1116 ret = -EIO; in ehea_set_portspeed()
1121 ret = -EPERM; in ehea_set_portspeed()
1123 ret = -EIO; in ehea_set_portspeed()
1127 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) in ehea_set_portspeed()
1128 netif_carrier_on(port->netdev); in ehea_set_portspeed()
1150 dev = port->netdev; in ehea_parse_eqe()
1165 port->port_speed, in ehea_parse_eqe()
1166 port->full_duplex == 1 ? in ehea_parse_eqe()
1181 port->phy_link = EHEA_PHY_LINK_UP; in ehea_parse_eqe()
1187 port->phy_link = EHEA_PHY_LINK_DOWN; in ehea_parse_eqe()
1222 eqe = ehea_poll_eq(adapter->neq); in ehea_neq_tasklet()
1226 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry); in ehea_neq_tasklet()
1227 ehea_parse_eqe(adapter, eqe->entry); in ehea_neq_tasklet()
1228 eqe = ehea_poll_eq(adapter->neq); in ehea_neq_tasklet()
1236 ehea_h_reset_events(adapter->handle, in ehea_neq_tasklet()
1237 adapter->neq->fw_handle, event_mask); in ehea_neq_tasklet()
1243 tasklet_hi_schedule(&adapter->neq_tasklet); in ehea_interrupt_neq()
1251 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; in ehea_fill_port_res()
1253 ehea_init_fill_rq1(pr, pr->rq1_skba.len); in ehea_fill_port_res()
1255 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); in ehea_fill_port_res()
1257 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); in ehea_fill_port_res()
1269 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", in ehea_reg_interrupts()
1270 dev->name); in ehea_reg_interrupts()
1272 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, in ehea_reg_interrupts()
1274 0, port->int_aff_name, port); in ehea_reg_interrupts()
1277 port->qp_eq->attr.ist1); in ehea_reg_interrupts()
1283 port->qp_eq->attr.ist1); in ehea_reg_interrupts()
1286 for (i = 0; i < port->num_def_qps; i++) { in ehea_reg_interrupts()
1287 pr = &port->port_res[i]; in ehea_reg_interrupts()
1288 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, in ehea_reg_interrupts()
1289 "%s-queue%d", dev->name, i); in ehea_reg_interrupts()
1290 ret = ibmebus_request_irq(pr->eq->attr.ist1, in ehea_reg_interrupts()
1292 0, pr->int_send_name, pr); in ehea_reg_interrupts()
1295 i, pr->eq->attr.ist1); in ehea_reg_interrupts()
1300 pr->eq->attr.ist1, i); in ehea_reg_interrupts()
1307 while (--i >= 0) { in ehea_reg_interrupts()
1308 u32 ist = port->port_res[i].eq->attr.ist1; in ehea_reg_interrupts()
1309 ibmebus_free_irq(ist, &port->port_res[i]); in ehea_reg_interrupts()
1313 ibmebus_free_irq(port->qp_eq->attr.ist1, port); in ehea_reg_interrupts()
1314 i = port->num_def_qps; in ehea_reg_interrupts()
1328 for (i = 0; i < port->num_def_qps; i++) { in ehea_free_interrupts()
1329 pr = &port->port_res[i]; in ehea_free_interrupts()
1330 ibmebus_free_irq(pr->eq->attr.ist1, pr); in ehea_free_interrupts()
1333 i, pr->eq->attr.ist1); in ehea_free_interrupts()
1337 ibmebus_free_irq(port->qp_eq->attr.ist1, port); in ehea_free_interrupts()
1340 port->qp_eq->attr.ist1); in ehea_free_interrupts()
1349 ret = -ENOMEM; in ehea_configure_port()
1354 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) in ehea_configure_port()
1362 for (i = 0; i < port->num_mcs; i++) in ehea_configure_port()
1364 cb0->default_qpn_arr[i] = in ehea_configure_port()
1365 port->port_res[i].qp->init_attr.qp_nr; in ehea_configure_port()
1367 cb0->default_qpn_arr[i] = in ehea_configure_port()
1368 port->port_res[0].qp->init_attr.qp_nr; in ehea_configure_port()
1376 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_configure_port()
1377 port->logical_port_id, in ehea_configure_port()
1379 ret = -EIO; in ehea_configure_port()
1394 struct ehea_adapter *adapter = pr->port->adapter; in ehea_gen_smrs()
1396 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); in ehea_gen_smrs()
1400 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); in ehea_gen_smrs()
1407 ehea_rem_mr(&pr->send_mr); in ehea_gen_smrs()
1410 return -EIO; in ehea_gen_smrs()
1415 if ((ehea_rem_mr(&pr->send_mr)) || in ehea_rem_smrs()
1416 (ehea_rem_mr(&pr->recv_mr))) in ehea_rem_smrs()
1417 return -EIO; in ehea_rem_smrs()
1426 q_skba->arr = vzalloc(arr_size); in ehea_init_q_skba()
1427 if (!q_skba->arr) in ehea_init_q_skba()
1428 return -ENOMEM; in ehea_init_q_skba()
1430 q_skba->len = max_q_entries; in ehea_init_q_skba()
1431 q_skba->index = 0; in ehea_init_q_skba()
1432 q_skba->os_skbs = 0; in ehea_init_q_skba()
1440 struct ehea_adapter *adapter = port->adapter; in ehea_init_port_res()
1443 int ret = -EIO; in ehea_init_port_res()
1446 tx_bytes = pr->tx_bytes; in ehea_init_port_res()
1447 tx_packets = pr->tx_packets; in ehea_init_port_res()
1448 rx_bytes = pr->rx_bytes; in ehea_init_port_res()
1449 rx_packets = pr->rx_packets; in ehea_init_port_res()
1453 pr->tx_bytes = tx_bytes; in ehea_init_port_res()
1454 pr->tx_packets = tx_packets; in ehea_init_port_res()
1455 pr->rx_bytes = rx_bytes; in ehea_init_port_res()
1456 pr->rx_packets = rx_packets; in ehea_init_port_res()
1458 pr->port = port; in ehea_init_port_res()
1460 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); in ehea_init_port_res()
1461 if (!pr->eq) { in ehea_init_port_res()
1462 pr_err("create_eq failed (eq)\n"); in ehea_init_port_res()
1466 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, in ehea_init_port_res()
1467 pr->eq->fw_handle, in ehea_init_port_res()
1468 port->logical_port_id); in ehea_init_port_res()
1469 if (!pr->recv_cq) { in ehea_init_port_res()
1474 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, in ehea_init_port_res()
1475 pr->eq->fw_handle, in ehea_init_port_res()
1476 port->logical_port_id); in ehea_init_port_res()
1477 if (!pr->send_cq) { in ehea_init_port_res()
1484 pr->send_cq->attr.act_nr_of_cqes, in ehea_init_port_res()
1485 pr->recv_cq->attr.act_nr_of_cqes); in ehea_init_port_res()
1489 ret = -ENOMEM; in ehea_init_port_res()
1494 init_attr->low_lat_rq1 = 1; in ehea_init_port_res()
1495 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ in ehea_init_port_res()
1496 init_attr->rq_count = 3; in ehea_init_port_res()
1497 init_attr->qp_token = queue_token; in ehea_init_port_res()
1498 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; in ehea_init_port_res()
1499 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; in ehea_init_port_res()
1500 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; in ehea_init_port_res()
1501 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; in ehea_init_port_res()
1502 init_attr->wqe_size_enc_sq = EHEA_SG_SQ; in ehea_init_port_res()
1503 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; in ehea_init_port_res()
1504 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; in ehea_init_port_res()
1505 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; in ehea_init_port_res()
1506 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; in ehea_init_port_res()
1507 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; in ehea_init_port_res()
1508 init_attr->port_nr = port->logical_port_id; in ehea_init_port_res()
1509 init_attr->send_cq_handle = pr->send_cq->fw_handle; in ehea_init_port_res()
1510 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; in ehea_init_port_res()
1511 init_attr->aff_eq_handle = port->qp_eq->fw_handle; in ehea_init_port_res()
1513 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); in ehea_init_port_res()
1514 if (!pr->qp) { in ehea_init_port_res()
1516 ret = -EIO; in ehea_init_port_res()
1522 init_attr->qp_nr, in ehea_init_port_res()
1523 init_attr->act_nr_send_wqes, in ehea_init_port_res()
1524 init_attr->act_nr_rwqes_rq1, in ehea_init_port_res()
1525 init_attr->act_nr_rwqes_rq2, in ehea_init_port_res()
1526 init_attr->act_nr_rwqes_rq3); in ehea_init_port_res()
1528 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; in ehea_init_port_res()
1530 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); in ehea_init_port_res()
1531 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); in ehea_init_port_res()
1532 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); in ehea_init_port_res()
1533 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); in ehea_init_port_res()
1537 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; in ehea_init_port_res()
1539 ret = -EIO; in ehea_init_port_res()
1543 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); in ehea_init_port_res()
1547 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); in ehea_init_port_res()
1554 vfree(pr->sq_skba.arr); in ehea_init_port_res()
1555 vfree(pr->rq1_skba.arr); in ehea_init_port_res()
1556 vfree(pr->rq2_skba.arr); in ehea_init_port_res()
1557 vfree(pr->rq3_skba.arr); in ehea_init_port_res()
1558 ehea_destroy_qp(pr->qp); in ehea_init_port_res()
1559 ehea_destroy_cq(pr->send_cq); in ehea_init_port_res()
1560 ehea_destroy_cq(pr->recv_cq); in ehea_init_port_res()
1561 ehea_destroy_eq(pr->eq); in ehea_init_port_res()
1570 if (pr->qp) in ehea_clean_portres()
1571 netif_napi_del(&pr->napi); in ehea_clean_portres()
1573 ret = ehea_destroy_qp(pr->qp); in ehea_clean_portres()
1576 ehea_destroy_cq(pr->send_cq); in ehea_clean_portres()
1577 ehea_destroy_cq(pr->recv_cq); in ehea_clean_portres()
1578 ehea_destroy_eq(pr->eq); in ehea_clean_portres()
1580 for (i = 0; i < pr->rq1_skba.len; i++) in ehea_clean_portres()
1581 dev_kfree_skb(pr->rq1_skba.arr[i]); in ehea_clean_portres()
1583 for (i = 0; i < pr->rq2_skba.len; i++) in ehea_clean_portres()
1584 dev_kfree_skb(pr->rq2_skba.arr[i]); in ehea_clean_portres()
1586 for (i = 0; i < pr->rq3_skba.len; i++) in ehea_clean_portres()
1587 dev_kfree_skb(pr->rq3_skba.arr[i]); in ehea_clean_portres()
1589 for (i = 0; i < pr->sq_skba.len; i++) in ehea_clean_portres()
1590 dev_kfree_skb(pr->sq_skba.arr[i]); in ehea_clean_portres()
1592 vfree(pr->rq1_skba.arr); in ehea_clean_portres()
1593 vfree(pr->rq2_skba.arr); in ehea_clean_portres()
1594 vfree(pr->rq3_skba.arr); in ehea_clean_portres()
1595 vfree(pr->sq_skba.arr); in ehea_clean_portres()
1605 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; in write_swqe2_immediate()
1606 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; in write_swqe2_immediate()
1609 swqe->descriptors = 0; in write_swqe2_immediate()
1612 swqe->tx_control |= EHEA_SWQE_TSO; in write_swqe2_immediate()
1613 swqe->mss = skb_shinfo(skb)->gso_size; in write_swqe2_immediate()
1623 swqe->immediate_data_length = immediate_len; in write_swqe2_immediate()
1626 sg1entry->l_key = lkey; in write_swqe2_immediate()
1627 sg1entry->len = skb_data_size - immediate_len; in write_swqe2_immediate()
1628 sg1entry->vaddr = in write_swqe2_immediate()
1629 ehea_map_vaddr(skb->data + immediate_len); in write_swqe2_immediate()
1630 swqe->descriptors++; in write_swqe2_immediate()
1634 swqe->immediate_data_length = skb_data_size; in write_swqe2_immediate()
1645 nfrags = skb_shinfo(skb)->nr_frags; in write_swqe2_data()
1646 sg1entry = &swqe->u.immdata_desc.sg_entry; in write_swqe2_data()
1647 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; in write_swqe2_data()
1654 if (swqe->descriptors == 0) { in write_swqe2_data()
1656 frag = &skb_shinfo(skb)->frags[0]; in write_swqe2_data()
1659 sg1entry->l_key = lkey; in write_swqe2_data()
1660 sg1entry->len = skb_frag_size(frag); in write_swqe2_data()
1661 sg1entry->vaddr = in write_swqe2_data()
1663 swqe->descriptors++; in write_swqe2_data()
1669 frag = &skb_shinfo(skb)->frags[i]; in write_swqe2_data()
1670 sgentry = &sg_list[i - sg1entry_contains_frag_data]; in write_swqe2_data()
1672 sgentry->l_key = lkey; in write_swqe2_data()
1673 sgentry->len = skb_frag_size(frag); in write_swqe2_data()
1674 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); in write_swqe2_data()
1675 swqe->descriptors++; in write_swqe2_data()
1688 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_broadcast_reg_helper()
1689 port->logical_port_id, in ehea_broadcast_reg_helper()
1690 reg_type, port->mac_addr, 0, hcallid); in ehea_broadcast_reg_helper()
1694 ret = -EIO; in ehea_broadcast_reg_helper()
1700 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_broadcast_reg_helper()
1701 port->logical_port_id, in ehea_broadcast_reg_helper()
1702 reg_type, port->mac_addr, 0, hcallid); in ehea_broadcast_reg_helper()
1706 ret = -EIO; in ehea_broadcast_reg_helper()
1720 if (!is_valid_ether_addr(mac_addr->sa_data)) { in ehea_set_mac_addr()
1721 ret = -EADDRNOTAVAIL; in ehea_set_mac_addr()
1728 ret = -ENOMEM; in ehea_set_mac_addr()
1732 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); in ehea_set_mac_addr()
1734 cb0->port_mac_addr = cb0->port_mac_addr >> 16; in ehea_set_mac_addr()
1736 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_set_mac_addr()
1737 port->logical_port_id, H_PORT_CB0, in ehea_set_mac_addr()
1740 ret = -EIO; in ehea_set_mac_addr()
1744 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); in ehea_set_mac_addr()
1747 if (port->state == EHEA_PORT_UP) { in ehea_set_mac_addr()
1753 port->mac_addr = cb0->port_mac_addr << 16; in ehea_set_mac_addr()
1756 if (port->state == EHEA_PORT_UP) { in ehea_set_mac_addr()
1788 if (enable == port->promisc) in ehea_promiscuous()
1798 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; in ehea_promiscuous()
1800 hret = ehea_h_modify_ehea_port(port->adapter->handle, in ehea_promiscuous()
1801 port->logical_port_id, in ehea_promiscuous()
1808 port->promisc = enable; in ehea_promiscuous()
1823 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_multicast_reg_helper()
1824 port->logical_port_id, in ehea_multicast_reg_helper()
1833 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, in ehea_multicast_reg_helper()
1834 port->logical_port_id, in ehea_multicast_reg_helper()
1843 struct ehea_mc_list *mc_entry = port->mc_list; in ehea_drop_multicast_list()
1849 list_for_each_safe(pos, temp, &(port->mc_list->list)) { in ehea_drop_multicast_list()
1852 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, in ehea_drop_multicast_list()
1856 ret = -EIO; in ehea_drop_multicast_list()
1870 if (!port->allmulti) { in ehea_allmulti()
1876 port->allmulti = 1; in ehea_allmulti()
1886 port->allmulti = 0; in ehea_allmulti()
1903 INIT_LIST_HEAD(&ehea_mcl_entry->list); in ehea_add_multicast_entry()
1905 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); in ehea_add_multicast_entry()
1907 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, in ehea_add_multicast_entry()
1910 list_add(&ehea_mcl_entry->list, &port->mc_list->list); in ehea_add_multicast_entry()
1923 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC)); in ehea_set_multicast_list()
1925 if (dev->flags & IFF_ALLMULTI) { in ehea_set_multicast_list()
1940 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { in ehea_set_multicast_list()
1942 port->adapter->max_mc_mac); in ehea_set_multicast_list()
1947 ehea_add_multicast_entry(port, ha->addr); in ehea_set_multicast_list()
1956 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; in xmit_common()
1961 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1962 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM; in xmit_common()
1964 swqe->ip_start = skb_network_offset(skb); in xmit_common()
1965 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1; in xmit_common()
1967 switch (ip_hdr(skb)->protocol) { in xmit_common()
1969 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1970 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; in xmit_common()
1972 swqe->tcp_offset = swqe->ip_end + 1 + in xmit_common()
1977 if (skb->ip_summed == CHECKSUM_PARTIAL) in xmit_common()
1978 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; in xmit_common()
1980 swqe->tcp_offset = swqe->ip_end + 1 + in xmit_common()
1989 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT; in ehea_xmit2()
1999 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; in ehea_xmit3()
2003 if (!skb->data_len) in ehea_xmit3()
2004 skb_copy_from_linear_data(skb, imm_data, skb->len); in ehea_xmit3()
2006 skb_copy_bits(skb, 0, imm_data, skb->len); in ehea_xmit3()
2008 swqe->immediate_data_length = skb->len; in ehea_xmit3()
2021 pr = &port->port_res[skb_get_queue_mapping(skb)]; in ehea_start_xmit()
2024 swqe = ehea_get_swqe(pr->qp, &swqe_index); in ehea_start_xmit()
2026 atomic_dec(&pr->swqe_avail); in ehea_start_xmit()
2029 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; in ehea_start_xmit()
2030 swqe->vlan_tag = skb_vlan_tag_get(skb); in ehea_start_xmit()
2033 pr->tx_packets++; in ehea_start_xmit()
2034 pr->tx_bytes += skb->len; in ehea_start_xmit()
2036 if (skb->len <= SWQE3_MAX_IMM) { in ehea_start_xmit()
2037 u32 sig_iv = port->sig_comp_iv; in ehea_start_xmit()
2038 u32 swqe_num = pr->swqe_id_counter; in ehea_start_xmit()
2040 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) in ehea_start_xmit()
2042 if (pr->swqe_ll_count >= (sig_iv - 1)) { in ehea_start_xmit()
2043 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, in ehea_start_xmit()
2045 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in ehea_start_xmit()
2046 pr->swqe_ll_count = 0; in ehea_start_xmit()
2048 pr->swqe_ll_count += 1; in ehea_start_xmit()
2050 swqe->wr_id = in ehea_start_xmit()
2052 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) in ehea_start_xmit()
2054 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); in ehea_start_xmit()
2055 pr->sq_skba.arr[pr->sq_skba.index] = skb; in ehea_start_xmit()
2057 pr->sq_skba.index++; in ehea_start_xmit()
2058 pr->sq_skba.index &= (pr->sq_skba.len - 1); in ehea_start_xmit()
2060 lkey = pr->send_mr.lkey; in ehea_start_xmit()
2062 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; in ehea_start_xmit()
2064 pr->swqe_id_counter += 1; in ehea_start_xmit()
2067 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); in ehea_start_xmit()
2073 swqe->tx_control |= EHEA_SWQE_PURGE; in ehea_start_xmit()
2076 ehea_post_swqe(pr->qp, swqe); in ehea_start_xmit()
2078 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { in ehea_start_xmit()
2079 pr->p_stats.queue_stopped++; in ehea_start_xmit()
2089 struct ehea_adapter *adapter = port->adapter; in ehea_vlan_rx_add_vid()
2098 err = -ENOMEM; in ehea_vlan_rx_add_vid()
2102 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_add_vid()
2106 err = -EINVAL; in ehea_vlan_rx_add_vid()
2111 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F))); in ehea_vlan_rx_add_vid()
2113 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_add_vid()
2117 err = -EINVAL; in ehea_vlan_rx_add_vid()
2127 struct ehea_adapter *adapter = port->adapter; in ehea_vlan_rx_kill_vid()
2136 err = -ENOMEM; in ehea_vlan_rx_kill_vid()
2140 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_kill_vid()
2144 err = -EINVAL; in ehea_vlan_rx_kill_vid()
2149 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F))); in ehea_vlan_rx_kill_vid()
2151 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, in ehea_vlan_rx_kill_vid()
2155 err = -EINVAL; in ehea_vlan_rx_kill_vid()
2164 int ret = -EIO; in ehea_activate_qp()
2172 ret = -ENOMEM; in ehea_activate_qp()
2176 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2183 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; in ehea_activate_qp()
2184 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2192 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2199 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; in ehea_activate_qp()
2200 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2208 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2215 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; in ehea_activate_qp()
2216 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2224 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_activate_qp()
2243 port->qp_eq = ehea_create_eq(port->adapter, eq_type, in ehea_port_res_setup()
2245 if (!port->qp_eq) { in ehea_port_res_setup()
2246 ret = -EINVAL; in ehea_port_res_setup()
2266 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); in ehea_port_res_setup()
2271 ret = ehea_init_port_res(port, &port->port_res[i], in ehea_port_res_setup()
2280 while (--i >= 0) in ehea_port_res_setup()
2281 ehea_clean_portres(port, &port->port_res[i]); in ehea_port_res_setup()
2284 ehea_destroy_eq(port->qp_eq); in ehea_port_res_setup()
2293 for (i = 0; i < port->num_def_qps; i++) in ehea_clean_all_portres()
2294 ret |= ehea_clean_portres(port, &port->port_res[i]); in ehea_clean_all_portres()
2296 ret |= ehea_destroy_eq(port->qp_eq); in ehea_clean_all_portres()
2303 if (adapter->active_ports) in ehea_remove_adapter_mr()
2306 ehea_rem_mr(&adapter->mr); in ehea_remove_adapter_mr()
2311 if (adapter->active_ports) in ehea_add_adapter_mr()
2314 return ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_add_adapter_mr()
2322 if (port->state == EHEA_PORT_UP) in ehea_up()
2325 ret = ehea_port_res_setup(port, port->num_def_qps); in ehea_up()
2344 for (i = 0; i < port->num_def_qps; i++) { in ehea_up()
2345 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); in ehea_up()
2352 for (i = 0; i < port->num_def_qps; i++) { in ehea_up()
2353 ret = ehea_fill_port_res(&port->port_res[i]); in ehea_up()
2362 ret = -EIO; in ehea_up()
2366 port->state = EHEA_PORT_UP; in ehea_up()
2390 for (i = 0; i < port->num_def_qps; i++) in port_napi_disable()
2391 napi_disable(&port->port_res[i].napi); in port_napi_disable()
2398 for (i = 0; i < port->num_def_qps; i++) in port_napi_enable()
2399 napi_enable(&port->port_res[i].napi); in port_napi_enable()
2407 mutex_lock(&port->port_lock); in ehea_open()
2419 mutex_unlock(&port->port_lock); in ehea_open()
2420 schedule_delayed_work(&port->stats_work, in ehea_open()
2431 if (port->state == EHEA_PORT_DOWN) in ehea_down()
2440 port->state = EHEA_PORT_DOWN; in ehea_down()
2460 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); in ehea_stop()
2461 cancel_work_sync(&port->reset_task); in ehea_stop()
2462 cancel_delayed_work_sync(&port->stats_work); in ehea_stop()
2463 mutex_lock(&port->port_lock); in ehea_stop()
2467 mutex_unlock(&port->port_lock); in ehea_stop()
2468 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); in ehea_stop()
2480 for (i = 0; i < init_attr->act_nr_send_wqes; i++) { in ehea_purge_sq()
2482 swqe->tx_control |= EHEA_SWQE_PURGE; in ehea_purge_sq()
2490 for (i = 0; i < port->num_def_qps; i++) { in ehea_flush_sq()
2491 struct ehea_port_res *pr = &port->port_res[i]; in ehea_flush_sq()
2492 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; in ehea_flush_sq()
2495 ret = wait_event_timeout(port->swqe_avail_wq, in ehea_flush_sq()
2496 atomic_read(&pr->swqe_avail) >= swqe_max, in ehea_flush_sq()
2509 struct ehea_adapter *adapter = port->adapter; in ehea_stop_qps()
2511 int ret = -EIO; in ehea_stop_qps()
2520 ret = -ENOMEM; in ehea_stop_qps()
2524 for (i = 0; i < (port->num_def_qps); i++) { in ehea_stop_qps()
2525 struct ehea_port_res *pr = &port->port_res[i]; in ehea_stop_qps()
2526 struct ehea_qp *qp = pr->qp; in ehea_stop_qps()
2532 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2540 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; in ehea_stop_qps()
2541 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; in ehea_stop_qps()
2543 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2552 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_stop_qps()
2580 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; in ehea_update_rqs()
2581 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; in ehea_update_rqs()
2583 u32 lkey = pr->recv_mr.lkey; in ehea_update_rqs()
2589 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { in ehea_update_rqs()
2591 rwqe->sg_list[0].l_key = lkey; in ehea_update_rqs()
2592 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); in ehea_update_rqs()
2595 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); in ehea_update_rqs()
2598 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { in ehea_update_rqs()
2600 rwqe->sg_list[0].l_key = lkey; in ehea_update_rqs()
2601 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); in ehea_update_rqs()
2604 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); in ehea_update_rqs()
2611 struct ehea_adapter *adapter = port->adapter; in ehea_restart_qps()
2622 return -ENOMEM; in ehea_restart_qps()
2624 for (i = 0; i < (port->num_def_qps); i++) { in ehea_restart_qps()
2625 struct ehea_port_res *pr = &port->port_res[i]; in ehea_restart_qps()
2626 struct ehea_qp *qp = pr->qp; in ehea_restart_qps()
2637 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2642 ret = -EFAULT; in ehea_restart_qps()
2646 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; in ehea_restart_qps()
2647 cb0->qp_ctl_reg |= H_QP_CR_ENABLED; in ehea_restart_qps()
2649 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2655 ret = -EFAULT; in ehea_restart_qps()
2659 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, in ehea_restart_qps()
2664 ret = -EFAULT; in ehea_restart_qps()
2669 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); in ehea_restart_qps()
2684 struct net_device *dev = port->netdev; in ehea_reset_port()
2687 port->resets++; in ehea_reset_port()
2688 mutex_lock(&port->port_lock); in ehea_reset_port()
2707 mutex_unlock(&port->port_lock); in ehea_reset_port()
2716 pr_info("LPAR memory changed - re-initializing driver\n"); in ehea_rereg_mrs()
2719 if (adapter->active_ports) { in ehea_rereg_mrs()
2722 struct ehea_port *port = adapter->port[i]; in ehea_rereg_mrs()
2728 dev = port->netdev; in ehea_rereg_mrs()
2730 if (dev->flags & IFF_UP) { in ehea_rereg_mrs()
2731 mutex_lock(&port->port_lock); in ehea_rereg_mrs()
2736 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2740 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2746 ret = ehea_rem_mr(&adapter->mr); in ehea_rereg_mrs()
2748 pr_err("unregister MR failed - driver inoperable!\n"); in ehea_rereg_mrs()
2756 if (adapter->active_ports) { in ehea_rereg_mrs()
2758 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); in ehea_rereg_mrs()
2760 pr_err("register MR failed - driver inoperable!\n"); in ehea_rereg_mrs()
2766 struct ehea_port *port = adapter->port[i]; in ehea_rereg_mrs()
2769 struct net_device *dev = port->netdev; in ehea_rereg_mrs()
2771 if (dev->flags & IFF_UP) { in ehea_rereg_mrs()
2772 mutex_lock(&port->port_lock); in ehea_rereg_mrs()
2781 mutex_unlock(&port->port_lock); in ehea_rereg_mrs()
2786 pr_info("re-initializing driver complete\n"); in ehea_rereg_mrs()
2808 ret = -ENOMEM; in ehea_sense_adapter_attr()
2812 hret = ehea_h_query_ehea(adapter->handle, cb); in ehea_sense_adapter_attr()
2815 ret = -EIO; in ehea_sense_adapter_attr()
2819 adapter->max_mc_mac = cb->max_mc_mac - 1; in ehea_sense_adapter_attr()
2840 ret = -ENOMEM; in ehea_get_jumboframe_status()
2843 hret = ehea_h_query_ehea_port(port->adapter->handle, in ehea_get_jumboframe_status()
2844 port->logical_port_id, in ehea_get_jumboframe_status()
2848 if (cb4->jumbo_frame) in ehea_get_jumboframe_status()
2851 cb4->jumbo_frame = 1; in ehea_get_jumboframe_status()
2852 hret = ehea_h_modify_ehea_port(port->adapter-> in ehea_get_jumboframe_status()
2854 port-> in ehea_get_jumboframe_status()
2863 ret = -EINVAL; in ehea_get_jumboframe_status()
2875 return sprintf(buf, "%d", port->logical_port_id); in log_port_id_show()
2883 of_node_put(port->ofdev.dev.of_node); in logical_port_release()
2891 port->ofdev.dev.of_node = of_node_get(dn); in ehea_register_port()
2892 port->ofdev.dev.parent = &port->adapter->ofdev->dev; in ehea_register_port()
2893 port->ofdev.dev.bus = &ibmebus_bus_type; in ehea_register_port()
2895 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); in ehea_register_port()
2896 port->ofdev.dev.release = logical_port_release; in ehea_register_port()
2898 ret = of_device_register(&port->ofdev); in ehea_register_port()
2904 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); in ehea_register_port()
2910 return &port->ofdev.dev; in ehea_register_port()
2913 of_device_unregister(&port->ofdev); in ehea_register_port()
2920 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); in ehea_unregister_port()
2921 of_device_unregister(&port->ofdev); in ehea_unregister_port()
2951 ret = -ENOMEM; in ehea_setup_single_port()
2957 mutex_init(&port->port_lock); in ehea_setup_single_port()
2958 port->state = EHEA_PORT_DOWN; in ehea_setup_single_port()
2959 port->sig_comp_iv = sq_entries / 10; in ehea_setup_single_port()
2961 port->adapter = adapter; in ehea_setup_single_port()
2962 port->netdev = dev; in ehea_setup_single_port()
2963 port->logical_port_id = logical_port_id; in ehea_setup_single_port()
2965 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); in ehea_setup_single_port()
2967 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); in ehea_setup_single_port()
2968 if (!port->mc_list) { in ehea_setup_single_port()
2969 ret = -ENOMEM; in ehea_setup_single_port()
2973 INIT_LIST_HEAD(&port->mc_list->list); in ehea_setup_single_port()
2979 netif_set_real_num_rx_queues(dev, port->num_def_qps); in ehea_setup_single_port()
2980 netif_set_real_num_tx_queues(dev, port->num_def_qps); in ehea_setup_single_port()
2989 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); in ehea_setup_single_port()
2991 dev->netdev_ops = &ehea_netdev_ops; in ehea_setup_single_port()
2994 dev->hw_features = NETIF_F_SG | NETIF_F_TSO | in ehea_setup_single_port()
2996 dev->features = NETIF_F_SG | NETIF_F_TSO | in ehea_setup_single_port()
3000 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA | in ehea_setup_single_port()
3002 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; in ehea_setup_single_port()
3004 /* MTU range: 68 - 9022 */ in ehea_setup_single_port()
3005 dev->min_mtu = ETH_MIN_MTU; in ehea_setup_single_port()
3006 dev->max_mtu = EHEA_MAX_PACKET_SIZE; in ehea_setup_single_port()
3008 INIT_WORK(&port->reset_task, ehea_reset_port); in ehea_setup_single_port()
3009 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats); in ehea_setup_single_port()
3011 init_waitqueue_head(&port->swqe_avail_wq); in ehea_setup_single_port()
3012 init_waitqueue_head(&port->restart_wq); in ehea_setup_single_port()
3027 adapter->active_ports++; in ehea_setup_single_port()
3035 kfree(port->mc_list); in ehea_setup_single_port()
3048 struct ehea_adapter *adapter = port->adapter; in ehea_shutdown_single_port()
3050 cancel_work_sync(&port->reset_task); in ehea_shutdown_single_port()
3051 cancel_delayed_work_sync(&port->stats_work); in ehea_shutdown_single_port()
3052 unregister_netdev(port->netdev); in ehea_shutdown_single_port()
3054 kfree(port->mc_list); in ehea_shutdown_single_port()
3055 free_netdev(port->netdev); in ehea_shutdown_single_port()
3056 adapter->active_ports--; in ehea_shutdown_single_port()
3067 lhea_dn = adapter->ofdev->dev.of_node; in ehea_setup_ports()
3070 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", in ehea_setup_ports()
3080 return -EIO; in ehea_setup_ports()
3083 adapter->port[i] = ehea_setup_single_port(adapter, in ehea_setup_ports()
3086 if (adapter->port[i]) in ehea_setup_ports()
3087 netdev_info(adapter->port[i]->netdev, in ehea_setup_ports()
3104 lhea_dn = adapter->ofdev->dev.of_node; in ehea_get_eth_dn()
3107 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", in ehea_get_eth_dn()
3133 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", in probe_port_store()
3135 return -EINVAL; in probe_port_store()
3142 return -EINVAL; in probe_port_store()
3148 return -EIO; in probe_port_store()
3157 if (!adapter->port[i]) { in probe_port_store()
3158 adapter->port[i] = port; in probe_port_store()
3162 netdev_info(port->netdev, "added: (logical port id=%d)\n", in probe_port_store()
3166 return -EIO; in probe_port_store()
3186 netdev_info(port->netdev, "removed: (logical port id=%d)\n", in remove_port_store()
3192 if (adapter->port[i] == port) { in remove_port_store()
3193 adapter->port[i] = NULL; in remove_port_store()
3199 return -EINVAL; in remove_port_store()
3212 int ret = device_create_file(&dev->dev, &dev_attr_probe_port); in ehea_create_device_sysfs()
3216 ret = device_create_file(&dev->dev, &dev_attr_remove_port); in ehea_create_device_sysfs()
3223 device_remove_file(&dev->dev, &dev_attr_probe_port); in ehea_remove_device_sysfs()
3224 device_remove_file(&dev->dev, &dev_attr_remove_port); in ehea_remove_device_sysfs()
3252 fallthrough; /* re-add canceled memory block */ in ehea_mem_notifier()
3257 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
3265 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) in ehea_mem_notifier()
3373 if (!dev || !dev->dev.of_node) { in ehea_probe_adapter()
3375 return -EINVAL; in ehea_probe_adapter()
3378 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL); in ehea_probe_adapter()
3380 ret = -ENOMEM; in ehea_probe_adapter()
3381 dev_err(&dev->dev, "no mem for ehea_adapter\n"); in ehea_probe_adapter()
3385 list_add(&adapter->list, &adapter_list); in ehea_probe_adapter()
3387 adapter->ofdev = dev; in ehea_probe_adapter()
3389 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", in ehea_probe_adapter()
3392 adapter->handle = *adapter_handle; in ehea_probe_adapter()
3394 if (!adapter->handle) { in ehea_probe_adapter()
3395 dev_err(&dev->dev, "failed getting handle for adapter" in ehea_probe_adapter()
3396 " '%pOF'\n", dev->dev.of_node); in ehea_probe_adapter()
3397 ret = -ENODEV; in ehea_probe_adapter()
3401 adapter->pd = EHEA_PD_ID; in ehea_probe_adapter()
3410 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret); in ehea_probe_adapter()
3414 adapter->neq = ehea_create_eq(adapter, in ehea_probe_adapter()
3416 if (!adapter->neq) { in ehea_probe_adapter()
3417 ret = -EIO; in ehea_probe_adapter()
3418 dev_err(&dev->dev, "NEQ creation failed\n"); in ehea_probe_adapter()
3422 tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet); in ehea_probe_adapter()
3430 dev_err(&dev->dev, "setup_ports failed\n"); in ehea_probe_adapter()
3434 ret = ibmebus_request_irq(adapter->neq->attr.ist1, in ehea_probe_adapter()
3438 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); in ehea_probe_adapter()
3443 tasklet_hi_schedule(&adapter->neq_tasklet); in ehea_probe_adapter()
3450 if (adapter->port[i]) { in ehea_probe_adapter()
3451 ehea_shutdown_single_port(adapter->port[i]); in ehea_probe_adapter()
3452 adapter->port[i] = NULL; in ehea_probe_adapter()
3459 ehea_destroy_eq(adapter->neq); in ehea_probe_adapter()
3462 list_del(&adapter->list); in ehea_probe_adapter()
3476 if (adapter->port[i]) { in ehea_remove()
3477 ehea_shutdown_single_port(adapter->port[i]); in ehea_remove()
3478 adapter->port[i] = NULL; in ehea_remove()
3483 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); in ehea_remove()
3484 tasklet_kill(&adapter->neq_tasklet); in ehea_remove()
3486 ehea_destroy_eq(adapter->neq); in ehea_remove()
3488 list_del(&adapter->list); in ehea_remove()
3502 ret = -EINVAL; in check_module_parm()
3507 ret = -EINVAL; in check_module_parm()
3512 ret = -EINVAL; in check_module_parm()
3517 ret = -EINVAL; in check_module_parm()