Lines Matching +full:unlock +full:- +full:keys
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
43 const int *keys, const u64 *data) in fun_port_write_cmds() argument
55 return -EINVAL; in fun_port_write_cmds()
61 fp->netdev->dev_port); in fun_port_write_cmds()
64 FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]); in fun_port_write_cmds()
66 return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, in fun_port_write_cmds()
77 const int *keys, u64 *data) in fun_port_read_cmds() argument
91 return -EINVAL; in fun_port_read_cmds()
97 fp->netdev->dev_port); in fun_port_read_cmds()
99 cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]); in fun_port_read_cmds()
101 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, in fun_port_read_cmds()
107 data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data); in fun_port_read_cmds()
108 dev_dbg(fp->fdev->dev, in fun_port_read_cmds()
110 fp->lport, r48rsp->key_to_data, keys[i], data[i], in fun_port_read_cmds()
111 FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data)); in fun_port_read_cmds()
126 int speed = fp->link_speed; in fun_report_link()
129 if (fp->link_speed >= SPEED_1000) { in fun_report_link()
134 if (fp->active_fec & FUN_PORT_FEC_RS) in fun_report_link()
135 fec = ", RS-FEC"; in fun_report_link()
136 else if (fp->active_fec & FUN_PORT_FEC_FC) in fun_report_link()
137 fec = ", BASER-FEC"; in fun_report_link()
139 if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK) in fun_report_link()
141 else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE) in fun_report_link()
143 else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE) in fun_report_link()
146 netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n", in fun_report_link()
180 unsigned int table_len = fp->indir_table_nentries; in fun_config_rss()
182 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); in fun_config_rss()
194 if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID) in fun_config_rss()
195 return -EINVAL; in fun_config_rss()
202 FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id, in fun_config_rss()
203 dev->dev_port, algo, in fun_config_rss()
208 fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr); in fun_config_rss()
211 memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE); in fun_config_rss()
212 indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE; in fun_config_rss()
214 *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid); in fun_config_rss()
216 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, in fun_config_rss()
219 fp->rss_hw_id = be32_to_cpu(cmd.rsp.id); in fun_config_rss()
228 if (fp->rss_hw_id != FUN_HCI_ID_INVALID) { in fun_destroy_rss()
229 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id); in fun_destroy_rss()
230 fp->rss_hw_id = FUN_HCI_ID_INVALID; in fun_destroy_rss()
239 cpumask_copy(&p->affinity_mask, mask); in fun_irq_aff_notify()
246 /* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
260 return ERR_PTR(-ENOMEM); in fun_alloc_qirq()
262 res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx); in fun_alloc_qirq()
266 res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL); in fun_alloc_qirq()
270 irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx); in fun_alloc_qirq()
271 cpumask_set_cpu(cpu, &irq->affinity_mask); in fun_alloc_qirq()
272 irq->aff_notify.notify = fun_irq_aff_notify; in fun_alloc_qirq()
273 irq->aff_notify.release = fun_irq_aff_release; in fun_alloc_qirq()
274 irq->state = FUN_IRQ_INIT; in fun_alloc_qirq()
278 fun_release_irqs(fp->fdev, 1, &irq->irq_idx); in fun_alloc_qirq()
286 netif_napi_del(&irq->napi); in fun_free_qirq()
287 fun_release_irqs(fp->fdev, 1, &irq->irq_idx); in fun_free_qirq()
299 xa_for_each(&fp->irqs, idx, irq) { in fun_prune_queue_irqs()
300 if (irq->txq || irq->rxq) /* skip those in use */ in fun_prune_queue_irqs()
303 xa_erase(&fp->irqs, idx); in fun_prune_queue_irqs()
306 if (idx < fp->rx_irq_ofst) in fun_prune_queue_irqs()
307 fp->num_tx_irqs--; in fun_prune_queue_irqs()
309 fp->num_rx_irqs--; in fun_prune_queue_irqs()
323 int node = dev_to_node(&fp->pdev->dev); in fun_alloc_queue_irqs()
327 for (i = fp->num_tx_irqs; i < ntx; i++) { in fun_alloc_queue_irqs()
332 fp->num_tx_irqs++; in fun_alloc_queue_irqs()
333 netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll); in fun_alloc_queue_irqs()
336 for (i = fp->num_rx_irqs; i < nrx; i++) { in fun_alloc_queue_irqs()
337 irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst); in fun_alloc_queue_irqs()
341 fp->num_rx_irqs++; in fun_alloc_queue_irqs()
342 netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll); in fun_alloc_queue_irqs()
368 err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i), in alloc_txqs()
397 xa_load(&fp->irqs, i + fp->rx_irq_ofst), in alloc_rxqs()
429 return ERR_PTR(-ENOMEM); in alloc_xdpqs()
444 struct funeth_txq **xdpqs = qset->xdpqs; in fun_free_rings()
445 struct funeth_rxq **rxqs = qset->rxqs; in fun_free_rings()
451 rxqs = rtnl_dereference(fp->rxqs); in fun_free_rings()
452 xdpqs = rtnl_dereference(fp->xdpqs); in fun_free_rings()
453 qset->txqs = fp->txqs; in fun_free_rings()
454 qset->nrxqs = netdev->real_num_rx_queues; in fun_free_rings()
455 qset->ntxqs = netdev->real_num_tx_queues; in fun_free_rings()
456 qset->nxdpqs = fp->num_xdpqs; in fun_free_rings()
461 if (rxqs == rtnl_dereference(fp->rxqs)) { in fun_free_rings()
462 rcu_assign_pointer(fp->rxqs, NULL); in fun_free_rings()
463 rcu_assign_pointer(fp->xdpqs, NULL); in fun_free_rings()
465 fp->txqs = NULL; in fun_free_rings()
468 free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state); in fun_free_rings()
469 free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state); in fun_free_rings()
470 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state); in fun_free_rings()
471 if (qset->state == FUN_QSTATE_DESTROYED) in fun_free_rings()
475 qset->rxqs = rxqs; in fun_free_rings()
476 qset->xdpqs = xdpqs; in fun_free_rings()
485 err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs); in fun_alloc_rings()
489 rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL); in fun_alloc_rings()
491 return -ENOMEM; in fun_alloc_rings()
493 if (qset->nxdpqs) { in fun_alloc_rings()
494 xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth, in fun_alloc_rings()
495 qset->xdpq_start, qset->state); in fun_alloc_rings()
502 txqs = (struct funeth_txq **)&rxqs[qset->nrxqs]; in fun_alloc_rings()
503 err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth, in fun_alloc_rings()
504 qset->txq_start, qset->state); in fun_alloc_rings()
508 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth, in fun_alloc_rings()
509 qset->rq_depth, qset->rxq_start, qset->state); in fun_alloc_rings()
513 qset->rxqs = rxqs; in fun_alloc_rings()
514 qset->txqs = txqs; in fun_alloc_rings()
515 qset->xdpqs = xdpqs; in fun_alloc_rings()
519 free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED); in fun_alloc_rings()
521 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED); in fun_alloc_rings()
535 for (i = 0; i < qset->nrxqs; i++) { in fun_advance_ring_state()
536 err = fun_rxq_create_dev(qset->rxqs[i], in fun_advance_ring_state()
537 xa_load(&fp->irqs, in fun_advance_ring_state()
538 i + fp->rx_irq_ofst)); in fun_advance_ring_state()
543 for (i = 0; i < qset->ntxqs; i++) { in fun_advance_ring_state()
544 err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i)); in fun_advance_ring_state()
549 for (i = 0; i < qset->nxdpqs; i++) { in fun_advance_ring_state()
550 err = fun_txq_create_dev(qset->xdpqs[i], NULL); in fun_advance_ring_state()
571 if (fp->lport != INVALID_LPORT) in fun_port_create()
578 netdev->dev_port); in fun_port_create()
580 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, in fun_port_create()
584 fp->lport = be16_to_cpu(cmd.rsp.u.create.lport); in fun_port_create()
592 if (fp->lport == INVALID_LPORT) in fun_port_destroy()
595 fp->lport = INVALID_LPORT; in fun_port_destroy()
596 return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0, in fun_port_destroy()
597 netdev->dev_port); in fun_port_destroy()
613 0, fp->netdev->dev_port); in fun_eth_create()
615 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp, in fun_eth_create()
627 fp->netdev->dev_port, in fun_vi_create()
628 fp->netdev->dev_port) in fun_vi_create()
631 return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0); in fun_vi_create()
643 rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid, in fun_create_and_bind_tx()
646 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid); in fun_create_and_bind_tx()
657 if (p->rxq) { in fun_queue_irq_handler()
658 prefetch(p->rxq->next_cqe_info); in fun_queue_irq_handler()
659 p->rxq->irq_cnt++; in fun_queue_irq_handler()
661 napi_schedule_irqoff(&p->napi); in fun_queue_irq_handler()
674 xa_for_each(&fp->irqs, idx, p) { in fun_enable_irqs()
675 if (p->txq) { in fun_enable_irqs()
677 qidx = p->txq->qidx; in fun_enable_irqs()
678 } else if (p->rxq) { in fun_enable_irqs()
680 qidx = p->rxq->qidx; in fun_enable_irqs()
685 if (p->state != FUN_IRQ_INIT) in fun_enable_irqs()
688 snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name, in fun_enable_irqs()
690 err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p); in fun_enable_irqs()
693 p->irq, err); in fun_enable_irqs()
696 p->state = FUN_IRQ_REQUESTED; in fun_enable_irqs()
699 xa_for_each(&fp->irqs, idx, p) { in fun_enable_irqs()
700 if (p->state != FUN_IRQ_REQUESTED) in fun_enable_irqs()
702 irq_set_affinity_notifier(p->irq, &p->aff_notify); in fun_enable_irqs()
703 irq_set_affinity_and_hint(p->irq, &p->affinity_mask); in fun_enable_irqs()
704 napi_enable(&p->napi); in fun_enable_irqs()
705 p->state = FUN_IRQ_ENABLED; in fun_enable_irqs()
711 last = idx - 1; in fun_enable_irqs()
712 xa_for_each_range(&fp->irqs, idx, p, 0, last) in fun_enable_irqs()
713 if (p->state == FUN_IRQ_REQUESTED) { in fun_enable_irqs()
714 free_irq(p->irq, p); in fun_enable_irqs()
715 p->state = FUN_IRQ_INIT; in fun_enable_irqs()
723 napi_disable(&irq->napi); in fun_disable_one_irq()
724 irq_set_affinity_notifier(irq->irq, NULL); in fun_disable_one_irq()
725 irq_update_affinity_hint(irq->irq, NULL); in fun_disable_one_irq()
726 free_irq(irq->irq, irq); in fun_disable_one_irq()
727 irq->state = FUN_IRQ_INIT; in fun_disable_one_irq()
736 xa_for_each(&fp->irqs, idx, p) in fun_disable_irqs()
737 if (p->state == FUN_IRQ_ENABLED) in fun_disable_irqs()
748 if (!rcu_access_pointer(fp->rxqs)) in fun_down()
752 if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) { in fun_down()
761 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); in fun_down()
778 lower_32_bits(fp->stats_dma_addr), in fun_up()
779 upper_32_bits(fp->stats_dma_addr), in fun_up()
786 if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) { in fun_up()
796 fp->txqs = qset->txqs; in fun_up()
797 rcu_assign_pointer(fp->rxqs, qset->rxqs); in fun_up()
798 rcu_assign_pointer(fp->xdpqs, qset->xdpqs); in fun_up()
804 if (fp->rss_cfg) { in fun_up()
805 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, in fun_up()
806 fp->indir_table, FUN_ADMIN_SUBOP_CREATE); in fun_up()
808 /* The non-RSS case has only 1 queue. */ in fun_up()
809 err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port, in fun_up()
811 qset->rxqs[0]->hw_cqid); in fun_up()
828 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port); in fun_up()
838 .nrxqs = netdev->real_num_rx_queues, in funeth_open()
839 .ntxqs = netdev->real_num_tx_queues, in funeth_open()
840 .nxdpqs = fp->num_xdpqs, in funeth_open()
841 .cq_depth = fp->cq_depth, in funeth_open()
842 .rq_depth = fp->rq_depth, in funeth_open()
843 .sq_depth = fp->sq_depth, in funeth_open()
877 stats->tx_packets = fp->tx_packets; in fun_get_stats64()
878 stats->tx_bytes = fp->tx_bytes; in fun_get_stats64()
879 stats->tx_dropped = fp->tx_dropped; in fun_get_stats64()
881 stats->rx_packets = fp->rx_packets; in fun_get_stats64()
882 stats->rx_bytes = fp->rx_bytes; in fun_get_stats64()
883 stats->rx_dropped = fp->rx_dropped; in fun_get_stats64()
886 rxqs = rcu_dereference(fp->rxqs); in fun_get_stats64()
888 goto unlock; in fun_get_stats64()
890 for (i = 0; i < netdev->real_num_tx_queues; i++) { in fun_get_stats64()
893 FUN_QSTAT_READ(fp->txqs[i], start, txs); in fun_get_stats64()
894 stats->tx_packets += txs.tx_pkts; in fun_get_stats64()
895 stats->tx_bytes += txs.tx_bytes; in fun_get_stats64()
896 stats->tx_dropped += txs.tx_map_err; in fun_get_stats64()
899 for (i = 0; i < netdev->real_num_rx_queues; i++) { in fun_get_stats64()
903 stats->rx_packets += rxs.rx_pkts; in fun_get_stats64()
904 stats->rx_bytes += rxs.rx_bytes; in fun_get_stats64()
905 stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops; in fun_get_stats64()
908 xdpqs = rcu_dereference(fp->xdpqs); in fun_get_stats64()
910 goto unlock; in fun_get_stats64()
912 for (i = 0; i < fp->num_xdpqs; i++) { in fun_get_stats64()
916 stats->tx_packets += txs.tx_pkts; in fun_get_stats64()
917 stats->tx_bytes += txs.tx_bytes; in fun_get_stats64()
919 unlock: in fun_get_stats64()
930 netdev->mtu = new_mtu; in fun_change_mtu()
940 if (!is_valid_ether_addr(saddr->sa_data)) in fun_set_macaddr()
941 return -EADDRNOTAVAIL; in fun_set_macaddr()
943 if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) in fun_set_macaddr()
947 ether_addr_to_u64(saddr->sa_data)); in fun_set_macaddr()
949 eth_hw_addr_set(netdev, saddr->sa_data); in fun_set_macaddr()
955 static const int keys[] = { in fun_get_port_attributes() local
964 u64 data[ARRAY_SIZE(keys)]; in fun_get_port_attributes()
968 rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data); in fun_get_port_attributes()
972 for (i = 0; i < ARRAY_SIZE(keys); i++) { in fun_get_port_attributes()
973 switch (keys[i]) { in fun_get_port_attributes()
984 return -EINVAL; in fun_get_port_attributes()
989 fp->port_caps = data[i]; in fun_get_port_attributes()
993 fp->advertising = data[i]; in fun_get_port_attributes()
997 netdev->mtu = data[i]; in fun_get_port_attributes()
1002 if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) { in fun_get_port_attributes()
1008 fp->lane_attrs = data[0]; in fun_get_port_attributes()
1011 if (netdev->addr_assign_type == NET_ADDR_RANDOM) in fun_get_port_attributes()
1013 ether_addr_to_u64(netdev->dev_addr)); in fun_get_port_attributes()
1021 return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg, in fun_hwtstamp_get()
1022 sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0; in fun_hwtstamp_get()
1030 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) in fun_hwtstamp_set()
1031 return -EFAULT; in fun_hwtstamp_set()
1057 return -ERANGE; in fun_hwtstamp_set()
1060 fp->hwtstamp_cfg = cfg; in fun_hwtstamp_set()
1061 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; in fun_hwtstamp_set()
1072 return -EOPNOTSUPP; in fun_ioctl()
1085 xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL); in fun_enter_xdp()
1089 rxqs = rtnl_dereference(fp->rxqs); in fun_enter_xdp()
1090 for (i = 0; i < dev->real_num_rx_queues; i++) { in fun_enter_xdp()
1096 fp->num_xdpqs = nqs; in fun_enter_xdp()
1097 rcu_assign_pointer(fp->xdpqs, xdpqs); in fun_enter_xdp()
1100 while (i--) in fun_enter_xdp()
1107 /* Set the queues for non-XDP operation. */
1115 xdpqs = rtnl_dereference(fp->xdpqs); in fun_end_xdp()
1116 rcu_assign_pointer(fp->xdpqs, NULL); in fun_end_xdp()
1120 free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED); in fun_end_xdp()
1121 fp->num_xdpqs = 0; in fun_end_xdp()
1123 rxqs = rtnl_dereference(fp->rxqs); in fun_end_xdp()
1124 for (i = 0; i < dev->real_num_rx_queues; i++) in fun_end_xdp()
1129 (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
1133 struct bpf_prog *old_prog, *prog = xdp->prog; in fun_xdp_setup()
1138 if (prog && dev->mtu > XDP_MAX_MTU) { in fun_xdp_setup()
1139 netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu); in fun_xdp_setup()
1140 NL_SET_ERR_MSG_MOD(xdp->extack, in fun_xdp_setup()
1142 return -EINVAL; in fun_xdp_setup()
1146 fp->num_xdpqs = prog ? num_online_cpus() : 0; in fun_xdp_setup()
1147 } else if (prog && !fp->xdp_prog) { in fun_xdp_setup()
1150 NL_SET_ERR_MSG_MOD(xdp->extack, in fun_xdp_setup()
1154 } else if (!prog && fp->xdp_prog) { in fun_xdp_setup()
1157 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs); in fun_xdp_setup()
1159 for (i = 0; i < dev->real_num_rx_queues; i++) in fun_xdp_setup()
1160 WRITE_ONCE(rxqs[i]->xdp_prog, prog); in fun_xdp_setup()
1163 dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU; in fun_xdp_setup()
1164 old_prog = xchg(&fp->xdp_prog, prog); in fun_xdp_setup()
1173 switch (xdp->command) { in fun_xdp()
1177 return -EINVAL; in fun_xdp()
1185 return &fp->dl_port; in fun_get_devlink_port()
1190 if (ed->num_vports) in fun_init_vports()
1191 return -EINVAL; in fun_init_vports()
1193 ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL); in fun_init_vports()
1194 if (!ed->vport_info) in fun_init_vports()
1195 return -ENOMEM; in fun_init_vports()
1196 ed->num_vports = n; in fun_init_vports()
1202 kvfree(ed->vport_info); in fun_free_vports()
1203 ed->vport_info = NULL; in fun_free_vports()
1204 ed->num_vports = 0; in fun_free_vports()
1210 if (!ed->vport_info || vport >= ed->num_vports) in fun_get_vport()
1213 return ed->vport_info + vport; in fun_get_vport()
1220 struct fun_dev *fdev = fp->fdev; in fun_set_vf_mac()
1223 int rc = -EINVAL; in fun_set_vf_mac()
1226 return -EINVAL; in fun_set_vf_mac()
1228 mutex_lock(&ed->state_mutex); in fun_set_vf_mac()
1231 goto unlock; in fun_set_vf_mac()
1237 ether_addr_copy(vi->mac, mac); in fun_set_vf_mac()
1238 unlock: in fun_set_vf_mac()
1239 mutex_unlock(&ed->state_mutex); in fun_set_vf_mac()
1248 struct fun_dev *fdev = fp->fdev; in fun_set_vf_vlan()
1251 int rc = -EINVAL; in fun_set_vf_vlan()
1254 return -EINVAL; in fun_set_vf_vlan()
1257 return -EINVAL; in fun_set_vf_vlan()
1259 mutex_lock(&ed->state_mutex); in fun_set_vf_vlan()
1262 goto unlock; in fun_set_vf_vlan()
1268 vi->vlan = vlan; in fun_set_vf_vlan()
1269 vi->qos = qos; in fun_set_vf_vlan()
1270 vi->vlan_proto = vlan_proto; in fun_set_vf_vlan()
1272 unlock: in fun_set_vf_vlan()
1273 mutex_unlock(&ed->state_mutex); in fun_set_vf_vlan()
1282 struct fun_dev *fdev = fp->fdev; in fun_set_vf_rate()
1285 int rc = -EINVAL; in fun_set_vf_rate()
1288 return -EINVAL; in fun_set_vf_rate()
1290 mutex_lock(&ed->state_mutex); in fun_set_vf_rate()
1293 goto unlock; in fun_set_vf_rate()
1298 vi->max_rate = max_tx_rate; in fun_set_vf_rate()
1299 unlock: in fun_set_vf_rate()
1300 mutex_unlock(&ed->state_mutex); in fun_set_vf_rate()
1308 struct fun_ethdev *ed = to_fun_ethdev(fp->fdev); in fun_get_vf_config()
1311 mutex_lock(&ed->state_mutex); in fun_get_vf_config()
1314 goto unlock; in fun_get_vf_config()
1317 ivi->vf = vf; in fun_get_vf_config()
1318 ether_addr_copy(ivi->mac, vi->mac); in fun_get_vf_config()
1319 ivi->vlan = vi->vlan; in fun_get_vf_config()
1320 ivi->qos = vi->qos; in fun_get_vf_config()
1321 ivi->vlan_proto = vi->vlan_proto; in fun_get_vf_config()
1322 ivi->max_tx_rate = vi->max_rate; in fun_get_vf_config()
1323 ivi->spoofchk = vi->spoofchk; in fun_get_vf_config()
1324 unlock: in fun_get_vf_config()
1325 mutex_unlock(&ed->state_mutex); in fun_get_vf_config()
1326 return vi ? 0 : -EINVAL; in fun_get_vf_config()
1334 xa_destroy(&fp->irqs); in fun_uninit()
1368 for (i = 0; i < fp->indir_table_nentries; i++) in fun_dflt_rss_indir()
1369 fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx); in fun_dflt_rss_indir()
1381 if (!fp->rss_cfg) in fun_reset_rss_indir()
1387 fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx); in fun_reset_rss_indir()
1404 if (!fp->rss_cfg) in fun_rss_set_qnum()
1408 for (i = 0; i < fp->indir_table_nentries; i++) in fun_rss_set_qnum()
1409 if (fp->indir_table[i] >= nrx) in fun_rss_set_qnum()
1412 if (i >= fp->indir_table_nentries) in fun_rss_set_qnum()
1416 memcpy(old_lut, fp->indir_table, sizeof(old_lut)); in fun_rss_set_qnum()
1417 oldsz = fp->indir_table_nentries; in fun_rss_set_qnum()
1420 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key, in fun_rss_set_qnum()
1421 fp->indir_table, FUN_ADMIN_SUBOP_MODIFY); in fun_rss_set_qnum()
1425 memcpy(fp->indir_table, old_lut, sizeof(old_lut)); in fun_rss_set_qnum()
1426 fp->indir_table_nentries = oldsz; in fun_rss_set_qnum()
1438 size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table); in fun_init_rss()
1440 fp->rss_hw_id = FUN_HCI_ID_INVALID; in fun_init_rss()
1441 if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS)) in fun_init_rss()
1444 fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size, in fun_init_rss()
1445 &fp->rss_dma_addr, GFP_KERNEL); in fun_init_rss()
1446 if (!fp->rss_cfg) in fun_init_rss()
1447 return -ENOMEM; in fun_init_rss()
1449 fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ; in fun_init_rss()
1450 netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key)); in fun_init_rss()
1451 fun_reset_rss_indir(dev, dev->real_num_rx_queues); in fun_init_rss()
1457 if (fp->rss_cfg) { in fun_free_rss()
1458 dma_free_coherent(&fp->pdev->dev, in fun_free_rss()
1459 sizeof(fp->rss_key) + sizeof(fp->indir_table), in fun_free_rss()
1460 fp->rss_cfg, fp->rss_dma_addr); in fun_free_rss()
1461 fp->rss_cfg = NULL; in fun_free_rss()
1469 if (nrx != netdev->real_num_rx_queues) { in fun_set_ring_count()
1479 if (!(fp->port_caps & FUN_PORT_CAP_STATS)) in fun_init_stats_area()
1485 fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64), in fun_init_stats_area()
1486 &fp->stats_dma_addr, GFP_KERNEL); in fun_init_stats_area()
1487 if (!fp->stats) in fun_init_stats_area()
1488 return -ENOMEM; in fun_init_stats_area()
1496 if (fp->stats) { in fun_free_stats_area()
1498 dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64), in fun_free_stats_area()
1499 fp->stats, fp->stats_dma_addr); in fun_free_stats_area()
1500 fp->stats = NULL; in fun_free_stats_area()
1507 struct devlink *dl = priv_to_devlink(fp->fdev); in fun_dl_port_register()
1511 if (fp->port_caps & FUN_PORT_CAP_VPORT) { in fun_dl_port_register()
1513 idx = fp->lport; in fun_dl_port_register()
1515 idx = netdev->dev_port; in fun_dl_port_register()
1517 attrs.lanes = fp->lane_attrs & 7; in fun_dl_port_register()
1518 if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) { in fun_dl_port_register()
1520 attrs.phys.port_number = fp->lport & ~3; in fun_dl_port_register()
1521 attrs.phys.split_subport_number = fp->lport & 3; in fun_dl_port_register()
1523 attrs.phys.port_number = fp->lport; in fun_dl_port_register()
1527 devlink_port_attrs_set(&fp->dl_port, &attrs); in fun_dl_port_register()
1529 return devlink_port_register(dl, &fp->dl_port, idx); in fun_dl_port_register()
1538 if (ed->num_ports > 1 || is_kdump_kernel()) { in fun_max_qs()
1544 neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH); in fun_max_qs()
1551 * - At least 1 Rx and 1 Tx queues. in fun_max_qs()
1552 * - At most 1 Rx/Tx queue per core. in fun_max_qs()
1553 * - Each Rx/Tx queue needs 1 SQ. in fun_max_qs()
1555 *ntx = min(ed->nsqs_per_port - 1, num_online_cpus()); in fun_max_qs()
1568 ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES); in fun_queue_defaults()
1569 nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES); in fun_queue_defaults()
1572 nrx = min(nrx, nsqs - ntx); in fun_queue_defaults()
1575 ntx = min(ntx, nsqs - nrx); in fun_queue_defaults()
1594 newqs->nrxqs = dev->real_num_rx_queues; in fun_replace_queues()
1595 newqs->ntxqs = dev->real_num_tx_queues; in fun_replace_queues()
1596 newqs->nxdpqs = fp->num_xdpqs; in fun_replace_queues()
1597 newqs->state = FUN_QSTATE_INIT_SW; in fun_replace_queues()
1615 newqs->state = FUN_QSTATE_DESTROYED; in fun_replace_queues()
1628 unsigned int keep_tx = min(dev->real_num_tx_queues, ntx); in fun_change_num_queues()
1629 unsigned int keep_rx = min(dev->real_num_rx_queues, nrx); in fun_change_num_queues()
1632 .rxqs = rtnl_dereference(fp->rxqs), in fun_change_num_queues()
1633 .txqs = fp->txqs, in fun_change_num_queues()
1634 .nrxqs = dev->real_num_rx_queues, in fun_change_num_queues()
1635 .ntxqs = dev->real_num_tx_queues, in fun_change_num_queues()
1645 .cq_depth = fp->cq_depth, in fun_change_num_queues()
1646 .rq_depth = fp->rq_depth, in fun_change_num_queues()
1647 .sq_depth = fp->sq_depth, in fun_change_num_queues()
1662 memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs)); in fun_change_num_queues()
1664 if (nrx < dev->real_num_rx_queues) { in fun_change_num_queues()
1669 for (i = nrx; i < dev->real_num_rx_queues; i++) in fun_change_num_queues()
1670 fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi, in fun_change_num_queues()
1676 if (ntx < dev->real_num_tx_queues) in fun_change_num_queues()
1679 rcu_assign_pointer(fp->rxqs, newqs.rxqs); in fun_change_num_queues()
1680 fp->txqs = newqs.txqs; in fun_change_num_queues()
1683 if (ntx > dev->real_num_tx_queues) in fun_change_num_queues()
1686 if (nrx > dev->real_num_rx_queues) { in fun_change_num_queues()
1693 fun_disable_one_irq(oldqs.txqs[i]->irq); in fun_change_num_queues()
1701 fun_disable_one_irq(newqs.txqs[i]->irq); in fun_change_num_queues()
1712 struct fun_dev *fdev = &ed->fdev; in fun_create_netdev()
1724 rc = -ENOMEM; in fun_create_netdev()
1728 netdev->dev_port = portid; in fun_create_netdev()
1729 fun_queue_defaults(netdev, ed->nsqs_per_port); in fun_create_netdev()
1732 fp->fdev = fdev; in fun_create_netdev()
1733 fp->pdev = to_pci_dev(fdev->dev); in fun_create_netdev()
1734 fp->netdev = netdev; in fun_create_netdev()
1735 xa_init(&fp->irqs); in fun_create_netdev()
1736 fp->rx_irq_ofst = ntx; in fun_create_netdev()
1737 seqcount_init(&fp->link_seq); in fun_create_netdev()
1739 fp->lport = INVALID_LPORT; in fun_create_netdev()
1762 SET_NETDEV_DEV(netdev, fdev->dev); in fun_create_netdev()
1763 netdev->netdev_ops = &fun_netdev_ops; in fun_create_netdev()
1765 netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM; in fun_create_netdev()
1766 if (fp->port_caps & FUN_PORT_CAP_OFFLOADS) in fun_create_netdev()
1767 netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS; in fun_create_netdev()
1768 if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS) in fun_create_netdev()
1769 netdev->hw_features |= GSO_ENCAP_FLAGS; in fun_create_netdev()
1771 netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA; in fun_create_netdev()
1772 netdev->vlan_features = netdev->features & VLAN_FEAT; in fun_create_netdev()
1773 netdev->mpls_features = netdev->vlan_features; in fun_create_netdev()
1774 netdev->hw_enc_features = netdev->hw_features; in fun_create_netdev()
1776 netdev->min_mtu = ETH_MIN_MTU; in fun_create_netdev()
1777 netdev->max_mtu = FUN_MAX_MTU; in fun_create_netdev()
1782 fp->sq_depth = min(SQ_DEPTH, fdev->q_depth); in fun_create_netdev()
1783 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth); in fun_create_netdev()
1784 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth); in fun_create_netdev()
1785 fp->rx_coal_usec = CQ_INTCOAL_USEC; in fun_create_netdev()
1786 fp->rx_coal_count = CQ_INTCOAL_NPKT; in fun_create_netdev()
1787 fp->tx_coal_usec = SQ_INTCOAL_USEC; in fun_create_netdev()
1788 fp->tx_coal_count = SQ_INTCOAL_NPKT; in fun_create_netdev()
1789 fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count); in fun_create_netdev()
1795 fp->ktls_id = FUN_HCI_ID_INVALID; in fun_create_netdev()
1799 ed->netdevs[portid] = netdev; in fun_create_netdev()
1804 devlink_port_type_eth_set(&fp->dl_port, netdev); in fun_create_netdev()
1809 ed->netdevs[portid] = NULL; in fun_create_netdev()
1811 devlink_port_unregister(&fp->dl_port); in fun_create_netdev()
1821 dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc); in fun_create_netdev()
1830 devlink_port_type_clear(&fp->dl_port); in fun_destroy_netdev()
1832 devlink_port_unregister(&fp->dl_port); in fun_destroy_netdev()
1842 struct fun_dev *fd = &ed->fdev; in fun_create_ports()
1846 ed->nsqs_per_port = min(fd->num_irqs - 1, in fun_create_ports()
1847 fd->kern_end_qid - 2) / nports; in fun_create_ports()
1848 if (ed->nsqs_per_port < 2) { in fun_create_ports()
1849 dev_err(fd->dev, "Too few SQs for %u ports", nports); in fun_create_ports()
1850 return -EINVAL; in fun_create_ports()
1853 ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL); in fun_create_ports()
1854 if (!ed->netdevs) in fun_create_ports()
1855 return -ENOMEM; in fun_create_ports()
1857 ed->num_ports = nports; in fun_create_ports()
1868 fun_destroy_netdev(ed->netdevs[--i]); in fun_create_ports()
1869 kfree(ed->netdevs); in fun_create_ports()
1870 ed->netdevs = NULL; in fun_create_ports()
1871 ed->num_ports = 0; in fun_create_ports()
1879 for (i = 0; i < ed->num_ports; i++) in fun_destroy_ports()
1880 fun_destroy_netdev(ed->netdevs[i]); in fun_destroy_ports()
1882 kfree(ed->netdevs); in fun_destroy_ports()
1883 ed->netdevs = NULL; in fun_destroy_ports()
1884 ed->num_ports = 0; in fun_destroy_ports()
1890 unsigned int port_idx = be16_to_cpu(notif->id); in fun_update_link_state()
1894 if (port_idx >= ed->num_ports) in fun_update_link_state()
1897 netdev = ed->netdevs[port_idx]; in fun_update_link_state()
1900 write_seqcount_begin(&fp->link_seq); in fun_update_link_state()
1901 fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */ in fun_update_link_state()
1902 fp->active_fc = notif->flow_ctrl; in fun_update_link_state()
1903 fp->active_fec = notif->fec; in fun_update_link_state()
1904 fp->xcvr_type = notif->xcvr_type; in fun_update_link_state()
1905 fp->link_down_reason = notif->link_down_reason; in fun_update_link_state()
1906 fp->lp_advertising = be64_to_cpu(notif->lp_advertising); in fun_update_link_state()
1908 if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN) in fun_update_link_state()
1910 if (notif->link_state & FUN_PORT_FLAG_MAC_UP) in fun_update_link_state()
1913 write_seqcount_end(&fp->link_seq); in fun_update_link_state()
1920 u8 op = ((struct fun_admin_rsp_common *)entry)->op; in fun_event_cb()
1925 if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) { in fun_event_cb()
1927 } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) { in fun_event_cb()
1930 if (r->count.data) in fun_event_cb()
1931 set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags); in fun_event_cb()
1933 set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags); in fun_event_cb()
1936 dev_info(fdev->dev, "adminq event unexpected op %u subop %u", in fun_event_cb()
1937 op, rsp->subop); in fun_event_cb()
1940 dev_info(fdev->dev, "adminq event unexpected op %u", op); in fun_event_cb()
1950 if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags)) in fun_service_cb()
1953 if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags)) in fun_service_cb()
1957 if (rc < 0 || rc == ed->num_ports) in fun_service_cb()
1960 if (ed->num_ports) in fun_service_cb()
1974 dev_warn(&pdev->dev, in funeth_sriov_configure()
1975 "Cannot disable SR-IOV while VFs are assigned\n"); in funeth_sriov_configure()
1976 return -EPERM; in funeth_sriov_configure()
1979 mutex_lock(&ed->state_mutex); in funeth_sriov_configure()
1981 mutex_unlock(&ed->state_mutex); in funeth_sriov_configure()
1990 mutex_lock(&ed->state_mutex); in funeth_sriov_configure()
1992 mutex_unlock(&ed->state_mutex); in funeth_sriov_configure()
2018 devlink = fun_devlink_alloc(&pdev->dev); in funeth_probe()
2020 dev_err(&pdev->dev, "devlink alloc failed\n"); in funeth_probe()
2021 return -ENOMEM; in funeth_probe()
2025 mutex_init(&ed->state_mutex); in funeth_probe()
2027 fdev = &ed->fdev; in funeth_probe()
2045 mutex_destroy(&ed->state_mutex); in funeth_probe()
2067 mutex_destroy(&ed->state_mutex); in funeth_remove()