Lines Matching +full:0 +full:xcd00

46 module_param(use_threaded_interrupts, int, 0);
61 "this size. Use 0 to disable SGLs.");
81 if (ret != 0 || n > num_possible_cpus()) in io_queue_count_set()
207 #define NVMEQ_ENABLED 0
230 int npages; /* In the PRP list. 0 means small pool in use */
248 return 0; in nvme_dbbuf_dma_alloc()
265 return 0; in nvme_dbbuf_dma_alloc()
319 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { in nvme_dbbuf_set()
399 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx()
401 WARN_ON(hctx_idx != 0); in nvme_admin_init_hctx()
402 WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); in nvme_admin_init_hctx()
405 return 0; in nvme_admin_init_hctx()
416 return 0; in nvme_init_hctx()
424 int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; in nvme_init_request()
432 return 0; in nvme_init_request()
441 return 0; in queue_irq_offset()
450 for (i = 0, qoff = 0; i < set->nr_maps; i++) { in nvme_pci_map_queues()
472 return 0; in nvme_pci_map_queues()
484 next_tail = 0; in nvme_write_sq_db()
508 nvmeq->sq_tail = 0; in nvme_submit_cmd()
553 for (i = 0; i < iod->npages; i++) { in nvme_free_prps()
569 for (i = 0; i < iod->npages; i++) { in nvme_free_sgls()
602 if (iod->npages == 0) in nvme_unmap_data()
603 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], in nvme_unmap_data()
642 if (length <= 0) { in nvme_pci_setup_prps()
643 iod->first_dma = 0; in nvme_pci_setup_prps()
664 iod->npages = 0; in nvme_pci_setup_prps()
676 list[0] = prp_list; in nvme_pci_setup_prps()
678 i = 0; in nvme_pci_setup_prps()
686 prp_list[0] = old_prp_list[i - 1]; in nvme_pci_setup_prps()
694 if (length <= 0) in nvme_pci_setup_prps()
696 if (dma_len > 0) in nvme_pci_setup_prps()
698 if (unlikely(dma_len < 0)) in nvme_pci_setup_prps()
747 int i = 0; in nvme_pci_setup_sgls()
759 iod->npages = 0; in nvme_pci_setup_sgls()
771 nvme_pci_iod_list(req)[0] = sg_list; in nvme_pci_setup_sgls()
785 i = 0; in nvme_pci_setup_sgls()
793 } while (--entries > 0); in nvme_pci_setup_sgls()
809 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_prp_simple()
826 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); in nvme_setup_sgl_simple()
860 iod->dma_len = 0; in nvme_map_data()
900 rq_dma_dir(req), 0); in nvme_map_metadata()
921 iod->aborted = 0; in nvme_queue_rq()
923 iod->nents = 0; in nvme_queue_rq()
994 return nvmeq->dev->admin_tagset.tags[0]; in nvme_queue_tagset()
1034 nvmeq->cq_head = 0; in nvme_update_cq_head()
1043 int found = 0; in nvme_process_cq()
1100 return 0; in nvme_poll()
1112 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event()
1127 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_delete_queue()
1150 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_cq()
1179 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in adapter_alloc_sq()
1198 "Abort status: 0x%x", nvme_req(req)->status); in abort_endio()
1238 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", in nvme_warn_reset()
1242 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", in nvme_warn_reset()
1326 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { in nvme_timeout()
1348 blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio); in nvme_timeout()
1401 return 0; in nvme_suspend_queue()
1408 for (i = dev->ctrl.queue_count - 1; i > 0; i--) in nvme_suspend_io_queues()
1414 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_disable_admin_queue()
1434 for (i = dev->ctrl.queue_count - 1; i > 0; i--) { in nvme_reap_pending_cqes()
1478 return 0; in nvme_alloc_sq_cmds()
1489 return 0; in nvme_alloc_sq_cmds()
1497 return 0; in nvme_alloc_queue()
1512 nvmeq->cq_head = 0; in nvme_alloc_queue()
1518 return 0; in nvme_alloc_queue()
1545 nvmeq->sq_tail = 0; in nvme_init_queue()
1546 nvmeq->last_sq_tail = 0; in nvme_init_queue()
1547 nvmeq->cq_head = 0; in nvme_init_queue()
1550 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
1575 return 0; in nvme_setup_io_queues_trylock()
1582 u16 vector = 0; in nvme_create_queue()
1591 vector = dev->num_vecs == 1 ? 0 : qid; in nvme_create_queue()
1600 if (result < 0) in nvme_create_queue()
1613 if (result < 0) in nvme_create_queue()
1693 return 0; in nvme_alloc_admin_tags()
1706 return 0; in nvme_remap_bar()
1707 if (size > pci_resource_len(pdev, 0)) in nvme_remap_bar()
1711 dev->bar = ioremap(pci_resource_start(pdev, 0), size); in nvme_remap_bar()
1713 dev->bar_mapped_size = 0; in nvme_remap_bar()
1719 return 0; in nvme_remap_bar()
1728 result = nvme_remap_bar(dev, db_bar_size(dev, 0)); in nvme_pci_configure_admin_queue()
1729 if (result < 0) in nvme_pci_configure_admin_queue()
1732 dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? in nvme_pci_configure_admin_queue()
1733 NVME_CAP_NSSRC(dev->ctrl.cap) : 0; in nvme_pci_configure_admin_queue()
1740 if (result < 0) in nvme_pci_configure_admin_queue()
1743 result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_pci_configure_admin_queue()
1749 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue()
1761 nvmeq->cq_vector = 0; in nvme_pci_configure_admin_queue()
1762 nvme_init_queue(nvmeq, 0); in nvme_pci_configure_admin_queue()
1776 int ret = 0; in nvme_create_io_queues()
1807 return ret >= 0 ? 0 : ret; in nvme_create_io_queues()
1895 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); in nvme_set_host_mem()
1910 for (i = 0; i < dev->nr_host_mem_descs; i++) { in nvme_free_host_mem()
1925 dev->nr_host_mem_descs = 0; in nvme_free_host_mem()
1934 int i = 0; in __nvme_alloc_host_mem()
1954 for (size = 0; size < preferred && i < max_entries; size += len) { in __nvme_alloc_host_mem()
1976 return 0; in __nvme_alloc_host_mem()
1979 while (--i >= 0) { in __nvme_alloc_host_mem()
2006 return 0; in nvme_alloc_host_mem()
2028 return 0; in nvme_setup_host_mem()
2045 return 0; /* controller must work without HMB */ in nvme_setup_host_mem()
2102 if (strtobool(buf, &new) < 0) in hmb_store()
2111 ret = nvme_set_host_mem(ndev, 0); in hmb_store()
2116 if (ret < 0) in hmb_store()
2134 return 0; in nvme_pci_attrs_are_visible()
2137 return 0; in nvme_pci_attrs_are_visible()
2169 * If only one interrupt is available or 'write_queue' == 0, combine in nvme_calc_irq_sets()
2172 * If 'write_queues' > 0, ensure it leaves room for at least one read in nvme_calc_irq_sets()
2177 nr_read_queues = 0; in nvme_calc_irq_sets()
2179 nr_read_queues = 0; in nvme_calc_irq_sets()
2215 dev->io_queues[HCTX_TYPE_READ] = 0; in nvme_setup_irqs()
2248 struct nvme_queue *adminq = &dev->queues[0]; in nvme_setup_io_queues()
2263 if (result < 0) in nvme_setup_io_queues()
2266 if (nr_io_queues == 0) in nvme_setup_io_queues()
2267 return 0; in nvme_setup_io_queues()
2280 pci_free_irq(pdev, 0, adminq); in nvme_setup_io_queues()
2285 if (result > 0) in nvme_setup_io_queues()
2306 pci_free_irq(pdev, 0, adminq); in nvme_setup_io_queues()
2315 if (result <= 0) { in nvme_setup_io_queues()
2353 return 0; in nvme_setup_io_queues()
2396 return 0; in nvme_delete_queue()
2401 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_disable_io_queues()
2406 while (nr_queues > 0) { in __nvme_disable_io_queues()
2417 if (timeout == 0) in __nvme_disable_io_queues()
2497 if (result < 0) in nvme_pci_enable()
2504 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ in nvme_pci_enable()
2522 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { in nvme_pci_enable()
2528 (pdev->device == 0xa821 || pdev->device == 0xa822) && in nvme_pci_enable()
2529 NVME_CAP_MQES(dev->ctrl.cap) == 0) { in nvme_pci_enable()
2551 return 0; in nvme_pci_enable()
2604 if (!dead && dev->ctrl.queue_count > 0) { in nvme_dev_disable()
2609 nvme_suspend_queue(&dev->queues[0]); in nvme_dev_disable()
2636 return 0; in nvme_disable_prepare_reset()
2643 NVME_CTRL_PAGE_SIZE, 0); in nvme_setup_prp_pools()
2649 256, 256, 0); in nvme_setup_prp_pools()
2654 return 0; in nvme_setup_prp_pools()
2745 dma_set_max_seg_size(dev->dev, 0xffffffff); in nvme_reset_work()
2791 if (result < 0) in nvme_reset_work()
2855 return 0; in nvme_pci_reg_read32()
2861 return 0; in nvme_pci_reg_write32()
2867 return 0; in nvme_pci_reg_read64()
2900 return 0; in nvme_dev_map()
2908 if (pdev->vendor == 0x144d && pdev->device == 0xa802) { in check_vendor_combination_bug()
2921 } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { in check_vendor_combination_bug()
2932 } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || in check_vendor_combination_bug()
2933 pdev->device == 0xa808 || pdev->device == 0xa809)) || in check_vendor_combination_bug()
2934 (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { in check_vendor_combination_bug()
2946 return 0; in check_vendor_combination_bug()
3034 return 0; in nvme_probe()
3109 nvme_free_queues(dev, 0); in nvme_remove()
3118 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); in nvme_get_power_state()
3123 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); in nvme_set_power_state()
3132 nvme_set_power_state(ctrl, ndev->last_ps) != 0) in nvme_resume()
3137 return 0; in nvme_resume()
3182 ret = nvme_set_host_mem(ndev, 0); in nvme_suspend()
3183 if (ret < 0) in nvme_suspend()
3188 if (ret < 0) in nvme_suspend()
3199 if (ret < 0) in nvme_suspend()
3211 ctrl->npss = 0; in nvme_suspend()
3295 { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */
3298 { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */
3301 { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
3304 { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
3307 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
3312 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
3314 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
3317 { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
3319 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
3322 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
3324 { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
3326 { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
3328 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
3330 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
3334 { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
3336 { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */
3339 { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
3341 { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
3344 { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
3346 { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
3348 { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */
3350 { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */
3352 { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
3354 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
3356 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
3358 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
3360 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
3362 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
3364 { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
3366 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
3368 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
3369 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
3375 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
3376 { 0, }