Lines Matching +full:num +full:- +full:ports
7 * Copyright (c) 2003-2016 Cavium, Inc.
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
68 /* Max Txqs: Half for each of the two ports :max_iq/2 */
74 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
80 /* Num of desc for rx rings */
83 /* Num of desc for tx rings */
97 /* Max Txqs: Half for each of the two ports :max_iq/2 */
103 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
109 /* Num of desc for rx rings */
112 /* Num of desc for tx rings */
176 /* Max Txqs: Half for each of the two ports :max_iq/2 */
182 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
188 /* Num of desc for rx rings */
191 /* Num of desc for tx rings */
205 /* Max Txqs: Half for each of the two ports :max_iq/2 */
211 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
217 /* Num of desc for rx rings */
220 /* Num of desc for tx rings */
234 /* Max Txqs: Half for each of the two ports :max_iq/2 */
240 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
246 /* Num of desc for rx rings */
249 /* Num of desc for tx rings */
263 /* Max Txqs: Half for each of the two ports :max_iq/2 */
269 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
275 /* Num of desc for rx rings */
278 /* Num of desc for tx rings */
342 /* Max Txqs: Half for each of the two ports :max_iq/2 */
348 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
354 /* Num of desc for rx rings */
357 /* Num of desc for tx rings */
371 /* Max Txqs: Half for each of the two ports :max_iq/2 */
377 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
383 /* Num of desc for rx rings */
386 /* Num of desc for tx rings */
445 /* Max Txqs: Half for each of the two ports :max_iq/2 */
451 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
457 /* Num of desc for rx rings */
460 /* Num of desc for tx rings */
474 /* Max Txqs: Half for each of the two ports :max_iq/2 */
480 /* Max Rxqs: Half for each of the two ports :max_oq/2 */
486 /* Num of desc for rx rings */
489 /* Num of desc for tx rings */
531 "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
532 "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
533 "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE",
534 "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
535 "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
554 if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1)) in oct_set_config_info()
572 u32 oct_id = oct->octeon_id; in __retrieve_octeon_config_info()
577 if (oct->chip_id == OCTEON_CN66XX) { in __retrieve_octeon_config_info()
579 } else if ((oct->chip_id == OCTEON_CN68XX) && in __retrieve_octeon_config_info()
582 } else if ((oct->chip_id == OCTEON_CN68XX) && in __retrieve_octeon_config_info()
585 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { in __retrieve_octeon_config_info()
587 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { in __retrieve_octeon_config_info()
599 switch (oct->chip_id) { in __verify_octeon_config_info()
622 dev_err(&oct->pci_dev->dev, "Configuration verification failed\n"); in oct_get_config_info()
641 return oct_dev_app_str[app_mode - CVM_DRV_APP_START]; in get_oct_app_string()
642 return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; in get_oct_app_string()
650 if (oct->io_qmask.oq & BIT_ULL(i)) in octeon_free_device_mem()
651 vfree(oct->droq[i]); in octeon_free_device_mem()
655 if (oct->io_qmask.iq & BIT_ULL(i)) in octeon_free_device_mem()
656 vfree(oct->instr_queue[i]); in octeon_free_device_mem()
659 i = oct->octeon_id; in octeon_free_device_mem()
663 octeon_device_count--; in octeon_free_device_mem()
693 configsize += (8 - (configsize & 0x7)); in octeon_allocate_device_mem()
697 octdevsize += (8 - (octdevsize & 0x7)); in octeon_allocate_device_mem()
700 priv_size += (8 - (priv_size & 0x7)); in octeon_allocate_device_mem()
710 oct->priv = (void *)(buf + octdevsize); in octeon_allocate_device_mem()
711 oct->chip = (void *)(buf + octdevsize + priv_size); in octeon_allocate_device_mem()
712 oct->dispatch.dlist = (struct octeon_dispatch *) in octeon_allocate_device_mem()
742 spin_lock_init(&oct->pci_win_lock); in octeon_allocate_device()
743 spin_lock_init(&oct->mem_access_lock); in octeon_allocate_device()
745 oct->octeon_id = oct_idx; in octeon_allocate_device()
746 snprintf(oct->device_name, sizeof(oct->device_name), in octeon_allocate_device()
747 "LiquidIO%d", (oct->octeon_id)); in octeon_allocate_device()
753 * @param octeon_dev - pointer to the octeon device structure.
754 * @param bus - PCIe bus #
755 * @param dev - PCIe device #
756 * @param func - PCIe function #
757 * @param is_pf - TRUE for PF, FALSE for VF
765 oct->loc.bus = bus; in octeon_register_device()
766 oct->loc.dev = dev; in octeon_register_device()
767 oct->loc.func = func; in octeon_register_device()
769 oct->adapter_refcount = &adapter_refcounts[oct->octeon_id]; in octeon_register_device()
770 atomic_set(oct->adapter_refcount, 0); in octeon_register_device()
772 /* Like the reference count, the f/w state is shared 'per-adapter' */ in octeon_register_device()
773 oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id]; in octeon_register_device()
774 atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED); in octeon_register_device()
777 for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) { in octeon_register_device()
779 dev_err(&oct->pci_dev->dev, in octeon_register_device()
783 atomic_inc(oct->adapter_refcount); in octeon_register_device()
789 if ((octeon_device[idx]->loc.bus == bus) && in octeon_register_device()
790 (octeon_device[idx]->loc.dev == dev)) { in octeon_register_device()
791 oct->adapter_refcount = in octeon_register_device()
792 octeon_device[idx]->adapter_refcount; in octeon_register_device()
793 oct->adapter_fw_state = in octeon_register_device()
794 octeon_device[idx]->adapter_fw_state; in octeon_register_device()
800 atomic_inc(oct->adapter_refcount); in octeon_register_device()
801 refcount = atomic_read(oct->adapter_refcount); in octeon_register_device()
803 dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__, in octeon_register_device()
804 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); in octeon_register_device()
809 /** Deregister a device at de-initialization time.
810 * @param octeon_dev - pointer to the octeon device structure.
817 atomic_dec(oct->adapter_refcount); in octeon_deregister_device()
818 refcount = atomic_read(oct->adapter_refcount); in octeon_deregister_device()
820 dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__, in octeon_deregister_device()
821 oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); in octeon_deregister_device()
836 oct->ioq_vector = vzalloc(size); in octeon_allocate_ioq_vector()
837 if (!oct->ioq_vector) in octeon_allocate_ioq_vector()
838 return -1; in octeon_allocate_ioq_vector()
840 ioq_vector = &oct->ioq_vector[i]; in octeon_allocate_ioq_vector()
841 ioq_vector->oct_dev = oct; in octeon_allocate_ioq_vector()
842 ioq_vector->iq_index = i; in octeon_allocate_ioq_vector()
843 ioq_vector->droq_index = i; in octeon_allocate_ioq_vector()
844 ioq_vector->mbox = oct->mbox[i]; in octeon_allocate_ioq_vector()
847 cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask); in octeon_allocate_ioq_vector()
849 if (oct->chip_id == OCTEON_CN23XX_PF_VID) in octeon_allocate_ioq_vector()
850 ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; in octeon_allocate_ioq_vector()
852 ioq_vector->ioq_num = i; in octeon_allocate_ioq_vector()
861 vfree(oct->ioq_vector); in octeon_free_ioq_vector()
870 int numa_node = dev_to_node(&oct->pci_dev->dev); in octeon_setup_instr_queues()
880 oct->num_iqs = 0; in octeon_setup_instr_queues()
882 oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]), in octeon_setup_instr_queues()
884 if (!oct->instr_queue[0]) in octeon_setup_instr_queues()
885 oct->instr_queue[0] = in octeon_setup_instr_queues()
887 if (!oct->instr_queue[0]) in octeon_setup_instr_queues()
889 memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); in octeon_setup_instr_queues()
890 oct->instr_queue[0]->q_index = 0; in octeon_setup_instr_queues()
891 oct->instr_queue[0]->app_ctx = (void *)(size_t)0; in octeon_setup_instr_queues()
892 oct->instr_queue[0]->ifidx = 0; in octeon_setup_instr_queues()
895 txpciq.s.pkind = oct->pfvf_hsword.pkind; in octeon_setup_instr_queues()
900 vfree(oct->instr_queue[0]); in octeon_setup_instr_queues()
901 oct->instr_queue[0] = NULL; in octeon_setup_instr_queues()
905 oct->num_iqs++; in octeon_setup_instr_queues()
914 int numa_node = dev_to_node(&oct->pci_dev->dev); in octeon_setup_output_queues()
928 oct->num_oqs = 0; in octeon_setup_output_queues()
929 oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); in octeon_setup_output_queues()
930 if (!oct->droq[0]) in octeon_setup_output_queues()
931 oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); in octeon_setup_output_queues()
932 if (!oct->droq[0]) in octeon_setup_output_queues()
936 vfree(oct->droq[oq_no]); in octeon_setup_output_queues()
937 oct->droq[oq_no] = NULL; in octeon_setup_output_queues()
940 oct->num_oqs++; in octeon_setup_output_queues()
952 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { in octeon_set_io_queues_off()
959 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off()
968 loop--; in octeon_set_io_queues_off()
971 dev_err(&oct->pci_dev->dev, in octeon_set_io_queues_off()
974 return -1; in octeon_set_io_queues_off()
985 dev_err(&oct->pci_dev->dev, in octeon_set_io_queues_off()
987 return -1; in octeon_set_io_queues_off()
1017 oct->dispatch.count = 0; in octeon_init_dispatch_list()
1020 oct->dispatch.dlist[i].opcode = 0; in octeon_init_dispatch_list()
1021 INIT_LIST_HEAD(&oct->dispatch.dlist[i].list); in octeon_init_dispatch_list()
1027 spin_lock_init(&oct->dispatch.lock); in octeon_init_dispatch_list()
1039 spin_lock_bh(&oct->dispatch.lock); in octeon_delete_dispatch_list()
1044 dispatch = &oct->dispatch.dlist[i].list; in octeon_delete_dispatch_list()
1045 while (dispatch->next != dispatch) { in octeon_delete_dispatch_list()
1046 temp = dispatch->next; in octeon_delete_dispatch_list()
1050 oct->dispatch.dlist[i].opcode = 0; in octeon_delete_dispatch_list()
1053 oct->dispatch.count = 0; in octeon_delete_dispatch_list()
1055 spin_unlock_bh(&oct->dispatch.lock); in octeon_delete_dispatch_list()
1074 spin_lock_bh(&octeon_dev->dispatch.lock); in octeon_get_dispatch()
1076 if (octeon_dev->dispatch.count == 0) { in octeon_get_dispatch()
1077 spin_unlock_bh(&octeon_dev->dispatch.lock); in octeon_get_dispatch()
1081 if (!(octeon_dev->dispatch.dlist[idx].opcode)) { in octeon_get_dispatch()
1082 spin_unlock_bh(&octeon_dev->dispatch.lock); in octeon_get_dispatch()
1086 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { in octeon_get_dispatch()
1087 fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; in octeon_get_dispatch()
1090 &octeon_dev->dispatch.dlist[idx].list) { in octeon_get_dispatch()
1091 if (((struct octeon_dispatch *)dispatch)->opcode == in octeon_get_dispatch()
1094 dispatch)->dispatch_fn; in octeon_get_dispatch()
1100 spin_unlock_bh(&octeon_dev->dispatch.lock); in octeon_get_dispatch()
1106 * octeon_id - id of the octeon device.
1107 * opcode - opcode for which driver should call the registered function
1108 * subcode - subcode for which driver should call the registered function
1109 * fn - The function to call when a packet with "opcode" arrives in
1111 * fn_arg - The argument to be passed when calling function "fn".
1133 spin_lock_bh(&oct->dispatch.lock); in octeon_register_dispatch_fn()
1135 if (oct->dispatch.dlist[idx].opcode == 0) { in octeon_register_dispatch_fn()
1136 oct->dispatch.dlist[idx].opcode = combined_opcode; in octeon_register_dispatch_fn()
1137 oct->dispatch.dlist[idx].dispatch_fn = fn; in octeon_register_dispatch_fn()
1138 oct->dispatch.dlist[idx].arg = fn_arg; in octeon_register_dispatch_fn()
1139 oct->dispatch.count++; in octeon_register_dispatch_fn()
1140 spin_unlock_bh(&oct->dispatch.lock); in octeon_register_dispatch_fn()
1144 spin_unlock_bh(&oct->dispatch.lock); in octeon_register_dispatch_fn()
1153 dev_dbg(&oct->pci_dev->dev, in octeon_register_dispatch_fn()
1159 dispatch->opcode = combined_opcode; in octeon_register_dispatch_fn()
1160 dispatch->dispatch_fn = fn; in octeon_register_dispatch_fn()
1161 dispatch->arg = fn_arg; in octeon_register_dispatch_fn()
1166 spin_lock_bh(&oct->dispatch.lock); in octeon_register_dispatch_fn()
1167 list_add(&dispatch->list, &oct->dispatch.dlist[idx].list); in octeon_register_dispatch_fn()
1168 oct->dispatch.count++; in octeon_register_dispatch_fn()
1169 spin_unlock_bh(&oct->dispatch.lock); in octeon_register_dispatch_fn()
1176 dev_err(&oct->pci_dev->dev, in octeon_register_dispatch_fn()
1190 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; in octeon_core_drv_init()
1201 if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) { in octeon_core_drv_init()
1202 dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n", in octeon_core_drv_init()
1203 atomic_read(&oct->status)); in octeon_core_drv_init()
1209 (u32)recv_pkt->rh.r_core_drv_init.app_mode), in octeon_core_drv_init()
1210 sizeof(app_name) - 1); in octeon_core_drv_init()
1211 oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; in octeon_core_drv_init()
1212 if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) { in octeon_core_drv_init()
1213 oct->fw_info.max_nic_ports = in octeon_core_drv_init()
1214 (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports; in octeon_core_drv_init()
1215 oct->fw_info.num_gmx_ports = in octeon_core_drv_init()
1216 (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports; in octeon_core_drv_init()
1219 if (oct->fw_info.max_nic_ports < num_nic_ports) { in octeon_core_drv_init()
1220 dev_err(&oct->pci_dev->dev, in octeon_core_drv_init()
1221 "Config has more ports than firmware allows (%d > %d).\n", in octeon_core_drv_init()
1222 num_nic_ports, oct->fw_info.max_nic_ports); in octeon_core_drv_init()
1225 oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; in octeon_core_drv_init()
1226 oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; in octeon_core_drv_init()
1227 oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; in octeon_core_drv_init()
1229 oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind; in octeon_core_drv_init()
1231 for (i = 0; i < oct->num_iqs; i++) in octeon_core_drv_init()
1232 oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind; in octeon_core_drv_init()
1234 atomic_set(&oct->status, OCT_DEV_CORE_OK); in octeon_core_drv_init()
1236 cs = &core_setup[oct->octeon_id]; in octeon_core_drv_init()
1238 if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) { in octeon_core_drv_init()
1239 dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n", in octeon_core_drv_init()
1241 recv_pkt->buffer_size[0]); in octeon_core_drv_init()
1245 recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs)); in octeon_core_drv_init()
1247 strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); in octeon_core_drv_init()
1248 strncpy(oct->boardinfo.serial_number, cs->board_serial_number, in octeon_core_drv_init()
1253 oct->boardinfo.major = cs->board_rev_major; in octeon_core_drv_init()
1254 oct->boardinfo.minor = cs->board_rev_minor; in octeon_core_drv_init()
1256 dev_info(&oct->pci_dev->dev, in octeon_core_drv_init()
1258 app_name, CVM_CAST64(cs->corefreq)); in octeon_core_drv_init()
1261 for (i = 0; i < recv_pkt->buffer_count; i++) in octeon_core_drv_init()
1262 recv_buffer_free(recv_pkt->buffer_ptr[i]); in octeon_core_drv_init()
1271 (oct->io_qmask.iq & BIT_ULL(q_no))) in octeon_get_tx_qsize()
1272 return oct->instr_queue[q_no]->max_count; in octeon_get_tx_qsize()
1274 return -1; in octeon_get_tx_qsize()
1280 (oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_get_rx_qsize()
1281 return oct->droq[q_no]->max_count; in octeon_get_rx_qsize()
1282 return -1; in octeon_get_rx_qsize()
1307 /* scratch register address is same in all the OCT-II and CN70XX models */
1311 * @param octeon_id - The id for which the octeon device pointer is required.
1329 spin_lock_irqsave(&oct->pci_win_lock, flags); in lio_pci_readq()
1335 if ((oct->chip_id == OCTEON_CN66XX) || in lio_pci_readq()
1336 (oct->chip_id == OCTEON_CN68XX) || in lio_pci_readq()
1337 (oct->chip_id == OCTEON_CN23XX_PF_VID)) in lio_pci_readq()
1339 writel(addrhi, oct->reg_list.pci_win_rd_addr_hi); in lio_pci_readq()
1342 readl(oct->reg_list.pci_win_rd_addr_hi); in lio_pci_readq()
1344 writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo); in lio_pci_readq()
1345 readl(oct->reg_list.pci_win_rd_addr_lo); in lio_pci_readq()
1347 val64 = readq(oct->reg_list.pci_win_rd_data); in lio_pci_readq()
1349 spin_unlock_irqrestore(&oct->pci_win_lock, flags); in lio_pci_readq()
1360 spin_lock_irqsave(&oct->pci_win_lock, flags); in lio_pci_writeq()
1362 writeq(addr, oct->reg_list.pci_win_wr_addr); in lio_pci_writeq()
1365 writel(val >> 32, oct->reg_list.pci_win_wr_data_hi); in lio_pci_writeq()
1367 readl(oct->reg_list.pci_win_wr_data_hi); in lio_pci_writeq()
1369 writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo); in lio_pci_writeq()
1371 spin_unlock_irqrestore(&oct->pci_win_lock, flags); in lio_pci_writeq()
1415 * @param dev - octeon device pointer passed as a void *.
1425 return octeon_dev->octeon_id; in lio_get_device_id()
1426 return -1; in lio_get_device_id()
1437 pkts_pend = (u32)atomic_read(&droq->pkts_pending); in lio_enable_irq()
1438 writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg); in lio_enable_irq()
1439 droq->pkt_count = pkts_pend; in lio_enable_irq()
1440 oct = droq->oct_dev; in lio_enable_irq()
1443 spin_lock_bh(&iq->lock); in lio_enable_irq()
1444 writel(iq->pkts_processed, iq->inst_cnt_reg); in lio_enable_irq()
1445 iq->pkt_in_done -= iq->pkts_processed; in lio_enable_irq()
1446 iq->pkts_processed = 0; in lio_enable_irq()
1448 spin_unlock_bh(&iq->lock); in lio_enable_irq()
1449 oct = iq->oct_dev; in lio_enable_irq()
1456 writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg); in lio_enable_irq()
1459 instr_cnt = readq(iq->inst_cnt_reg); in lio_enable_irq()
1462 iq->inst_cnt_reg); in lio_enable_irq()