Lines Matching +full:deep +full:- +full:touch
2 * Linux driver for VMware's para-virtualized SCSI HBA.
4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
58 * 1-to-1 mapping completions back to requests.
116 MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
123 MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
127 MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
131 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
134 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
137 MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
141 MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1…
153 return &(adapter->dev->dev); in pvscsi_dev()
161 end = &adapter->cmd_map[adapter->req_depth]; in pvscsi_find_context()
162 for (ctx = adapter->cmd_map; ctx < end; ctx++) in pvscsi_find_context()
163 if (ctx->cmd == cmd) in pvscsi_find_context()
174 if (list_empty(&adapter->cmd_pool)) in pvscsi_acquire_context()
177 ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); in pvscsi_acquire_context()
178 ctx->cmd = cmd; in pvscsi_acquire_context()
179 list_del(&ctx->list); in pvscsi_acquire_context()
187 ctx->cmd = NULL; in pvscsi_release_context()
188 ctx->abort_cmp = NULL; in pvscsi_release_context()
189 list_add(&ctx->list, &adapter->cmd_pool); in pvscsi_release_context()
194 * non-zero integer. ctx always points to an entry in cmd_map array, hence
200 return ctx - adapter->cmd_map + 1; in pvscsi_map_context()
206 return &adapter->cmd_map[context - 1]; in pvscsi_get_context()
212 writel(val, adapter->mmioBase + offset); in pvscsi_reg_write()
217 return readl(adapter->mmioBase + offset); in pvscsi_reg_read()
236 if (adapter->use_msg) in pvscsi_unmask_intr()
265 cmd.target = ctx->cmd->device->id; in pvscsi_abort_cmd()
293 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_kick_io()
295 if (!adapter->use_req_threshold || in pvscsi_kick_io()
296 s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) in pvscsi_kick_io()
337 sge = &ctx->sgl->sge[0]; in pvscsi_create_sg()
357 e->dataLen = bufflen; in pvscsi_map_buffers()
358 e->dataAddr = 0; in pvscsi_map_buffers()
367 if (segs == -ENOMEM) { in pvscsi_map_buffers()
370 return -ENOMEM; in pvscsi_map_buffers()
374 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; in pvscsi_map_buffers()
375 ctx->sglPA = dma_map_single(&adapter->dev->dev, in pvscsi_map_buffers()
376 ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); in pvscsi_map_buffers()
377 if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { in pvscsi_map_buffers()
381 ctx->sglPA = 0; in pvscsi_map_buffers()
382 return -ENOMEM; in pvscsi_map_buffers()
384 e->dataAddr = ctx->sglPA; in pvscsi_map_buffers()
386 e->dataAddr = sg_dma_address(sg); in pvscsi_map_buffers()
392 ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, in pvscsi_map_buffers()
393 cmd->sc_data_direction); in pvscsi_map_buffers()
394 if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { in pvscsi_map_buffers()
397 return -ENOMEM; in pvscsi_map_buffers()
399 e->dataAddr = ctx->dataPA; in pvscsi_map_buffers()
412 if (cmd->sense_buffer) in pvscsi_patch_sense()
413 cmd->sense_buffer[0] = 0; in pvscsi_patch_sense()
422 cmd = ctx->cmd; in pvscsi_unmap_buffers()
430 if (ctx->sglPA) { in pvscsi_unmap_buffers()
431 dma_unmap_single(&adapter->dev->dev, ctx->sglPA, in pvscsi_unmap_buffers()
433 ctx->sglPA = 0; in pvscsi_unmap_buffers()
436 dma_unmap_single(&adapter->dev->dev, ctx->dataPA, in pvscsi_unmap_buffers()
437 bufflen, cmd->sc_data_direction); in pvscsi_unmap_buffers()
439 if (cmd->sense_buffer) in pvscsi_unmap_buffers()
440 dma_unmap_single(&adapter->dev->dev, ctx->sensePA, in pvscsi_unmap_buffers()
446 adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_allocate_rings()
447 &adapter->ringStatePA, GFP_KERNEL); in pvscsi_allocate_rings()
448 if (!adapter->rings_state) in pvscsi_allocate_rings()
449 return -ENOMEM; in pvscsi_allocate_rings()
451 adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, in pvscsi_allocate_rings()
453 adapter->req_depth = adapter->req_pages in pvscsi_allocate_rings()
455 adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
456 adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, in pvscsi_allocate_rings()
458 if (!adapter->req_ring) in pvscsi_allocate_rings()
459 return -ENOMEM; in pvscsi_allocate_rings()
461 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, in pvscsi_allocate_rings()
463 adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
464 adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, in pvscsi_allocate_rings()
466 if (!adapter->cmp_ring) in pvscsi_allocate_rings()
467 return -ENOMEM; in pvscsi_allocate_rings()
469 BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); in pvscsi_allocate_rings()
470 BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
471 BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
473 if (!adapter->use_msg) in pvscsi_allocate_rings()
476 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, in pvscsi_allocate_rings()
478 adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
479 adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, in pvscsi_allocate_rings()
481 if (!adapter->msg_ring) in pvscsi_allocate_rings()
482 return -ENOMEM; in pvscsi_allocate_rings()
483 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
494 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; in pvscsi_setup_all_rings()
495 cmd.reqRingNumPages = adapter->req_pages; in pvscsi_setup_all_rings()
496 cmd.cmpRingNumPages = adapter->cmp_pages; in pvscsi_setup_all_rings()
498 base = adapter->reqRingPA; in pvscsi_setup_all_rings()
499 for (i = 0; i < adapter->req_pages; i++) { in pvscsi_setup_all_rings()
504 base = adapter->cmpRingPA; in pvscsi_setup_all_rings()
505 for (i = 0; i < adapter->cmp_pages; i++) { in pvscsi_setup_all_rings()
510 memset(adapter->rings_state, 0, PAGE_SIZE); in pvscsi_setup_all_rings()
511 memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
512 memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
517 if (adapter->use_msg) { in pvscsi_setup_all_rings()
520 cmd_msg.numPages = adapter->msg_pages; in pvscsi_setup_all_rings()
522 base = adapter->msgRingPA; in pvscsi_setup_all_rings()
523 for (i = 0; i < adapter->msg_pages; i++) { in pvscsi_setup_all_rings()
527 memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
536 if (!sdev->tagged_supported) in pvscsi_change_queue_depth()
551 u32 btstat = e->hostStatus; in pvscsi_complete_request()
552 u32 sdstat = e->scsiStatus; in pvscsi_complete_request()
554 ctx = pvscsi_get_context(adapter, e->context); in pvscsi_complete_request()
555 cmd = ctx->cmd; in pvscsi_complete_request()
556 abort_cmp = ctx->abort_cmp; in pvscsi_complete_request()
572 cmd->result = 0; in pvscsi_complete_request()
578 cmd->result = (DID_RESET << 16); in pvscsi_complete_request()
580 cmd->result = (DID_OK << 16) | sdstat; in pvscsi_complete_request()
582 cmd->sense_buffer) in pvscsi_complete_request()
583 cmd->result |= (DRIVER_SENSE << 24); in pvscsi_complete_request()
591 cmd->result = (DID_OK << 16); in pvscsi_complete_request()
597 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); in pvscsi_complete_request()
598 cmd->result = (DID_ERROR << 16); in pvscsi_complete_request()
602 /* Our emulation returns this for non-connected devs */ in pvscsi_complete_request()
603 cmd->result = (DID_BAD_TARGET << 16); in pvscsi_complete_request()
609 cmd->result = (DRIVER_INVALID << 24); in pvscsi_complete_request()
620 cmd->result |= (DID_ERROR << 16); in pvscsi_complete_request()
626 cmd->result = (DID_RESET << 16); in pvscsi_complete_request()
630 cmd->result = (DID_BUS_BUSY << 16); in pvscsi_complete_request()
634 cmd->result = (DID_PARITY << 16); in pvscsi_complete_request()
638 cmd->result = (DID_ERROR << 16); in pvscsi_complete_request()
644 dev_dbg(&cmd->device->sdev_gendev, in pvscsi_complete_request()
646 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); in pvscsi_complete_request()
648 cmd->scsi_done(cmd); in pvscsi_complete_request()
660 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_completion_ring()
661 struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; in pvscsi_process_completion_ring()
662 u32 cmp_entries = s->cmpNumEntriesLog2; in pvscsi_process_completion_ring()
664 while (s->cmpConsIdx != s->cmpProdIdx) { in pvscsi_process_completion_ring()
665 struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & in pvscsi_process_completion_ring()
670 * Since the device emulation advances s->cmpProdIdx only after in pvscsi_process_completion_ring()
677 * to s->cmpConsIdx before the read of (*e) inside in pvscsi_process_completion_ring()
682 s->cmpConsIdx++; in pvscsi_process_completion_ring()
697 s = adapter->rings_state; in pvscsi_queue_ring()
698 sdev = cmd->device; in pvscsi_queue_ring()
699 req_entries = s->reqNumEntriesLog2; in pvscsi_queue_ring()
704 * However, we have already ruled out this possibility - we would not in pvscsi_queue_ring()
709 if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { in pvscsi_queue_ring()
712 s->reqProdIdx, s->cmpConsIdx); in pvscsi_queue_ring()
713 return -1; in pvscsi_queue_ring()
716 e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); in pvscsi_queue_ring()
718 e->bus = sdev->channel; in pvscsi_queue_ring()
719 e->target = sdev->id; in pvscsi_queue_ring()
720 memset(e->lun, 0, sizeof(e->lun)); in pvscsi_queue_ring()
721 e->lun[1] = sdev->lun; in pvscsi_queue_ring()
723 if (cmd->sense_buffer) { in pvscsi_queue_ring()
724 ctx->sensePA = dma_map_single(&adapter->dev->dev, in pvscsi_queue_ring()
725 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in pvscsi_queue_ring()
727 if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { in pvscsi_queue_ring()
730 ctx->sensePA = 0; in pvscsi_queue_ring()
731 return -ENOMEM; in pvscsi_queue_ring()
733 e->senseAddr = ctx->sensePA; in pvscsi_queue_ring()
734 e->senseLen = SCSI_SENSE_BUFFERSIZE; in pvscsi_queue_ring()
736 e->senseLen = 0; in pvscsi_queue_ring()
737 e->senseAddr = 0; in pvscsi_queue_ring()
739 e->cdbLen = cmd->cmd_len; in pvscsi_queue_ring()
740 e->vcpuHint = smp_processor_id(); in pvscsi_queue_ring()
741 memcpy(e->cdb, cmd->cmnd, e->cdbLen); in pvscsi_queue_ring()
743 e->tag = SIMPLE_QUEUE_TAG; in pvscsi_queue_ring()
745 if (cmd->sc_data_direction == DMA_FROM_DEVICE) in pvscsi_queue_ring()
746 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; in pvscsi_queue_ring()
747 else if (cmd->sc_data_direction == DMA_TO_DEVICE) in pvscsi_queue_ring()
748 e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; in pvscsi_queue_ring()
749 else if (cmd->sc_data_direction == DMA_NONE) in pvscsi_queue_ring()
750 e->flags = PVSCSI_FLAG_CMD_DIR_NONE; in pvscsi_queue_ring()
752 e->flags = 0; in pvscsi_queue_ring()
755 if (cmd->sense_buffer) { in pvscsi_queue_ring()
756 dma_unmap_single(&adapter->dev->dev, ctx->sensePA, in pvscsi_queue_ring()
759 ctx->sensePA = 0; in pvscsi_queue_ring()
761 return -ENOMEM; in pvscsi_queue_ring()
764 e->context = pvscsi_map_context(adapter, ctx); in pvscsi_queue_ring()
768 s->reqProdIdx++; in pvscsi_queue_ring()
775 struct Scsi_Host *host = cmd->device->host; in pvscsi_queue_lck()
781 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_queue_lck()
787 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_queue_lck()
791 cmd->scsi_done = done; in pvscsi_queue_lck()
792 op = cmd->cmnd[0]; in pvscsi_queue_lck()
794 dev_dbg(&cmd->device->sdev_gendev, in pvscsi_queue_lck()
797 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_queue_lck()
808 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); in DEF_SCSI_QCMD()
816 adapter->host->host_no, cmd); in DEF_SCSI_QCMD()
818 spin_lock_irqsave(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
821 * Poll the completion ring first - we might be trying to abort in DEF_SCSI_QCMD()
840 ctx->abort_cmp = &abort_cmp; in DEF_SCSI_QCMD()
843 spin_unlock_irqrestore(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
846 spin_lock_irqsave(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
853 ctx->abort_cmp = NULL; in DEF_SCSI_QCMD()
864 cmd->result = (DID_ABORT << 16); in DEF_SCSI_QCMD()
865 cmd->scsi_done(cmd); in DEF_SCSI_QCMD()
868 spin_unlock_irqrestore(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
875 * destroys the 1-1 mapping between context field passed to emulation and our
882 for (i = 0; i < adapter->req_depth; i++) { in pvscsi_reset_all()
883 struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; in pvscsi_reset_all()
884 struct scsi_cmnd *cmd = ctx->cmd; in pvscsi_reset_all()
891 cmd->result = (DID_RESET << 16); in pvscsi_reset_all()
892 cmd->scsi_done(cmd); in pvscsi_reset_all()
899 struct Scsi_Host *host = cmd->device->host; in pvscsi_host_reset()
906 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_host_reset()
908 use_msg = adapter->use_msg; in pvscsi_host_reset()
911 adapter->use_msg = false; in pvscsi_host_reset()
912 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_host_reset()
918 flush_workqueue(adapter->workqueue); in pvscsi_host_reset()
919 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_host_reset()
936 * not touch the ring memory after reset, so the immediately pre-reset in pvscsi_host_reset()
942 adapter->use_msg = use_msg; in pvscsi_host_reset()
946 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_host_reset()
953 struct Scsi_Host *host = cmd->device->host; in pvscsi_bus_reset()
965 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_bus_reset()
971 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_bus_reset()
978 struct Scsi_Host *host = cmd->device->host; in pvscsi_device_reset()
983 host->host_no, cmd->device->id); in pvscsi_device_reset()
990 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_device_reset()
993 ll_device_reset(adapter, cmd->device->id); in pvscsi_device_reset()
996 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_device_reset()
1009 "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, in pvscsi_info()
1010 adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, in pvscsi_info()
1022 .this_id = -1,
1036 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_msg()
1037 struct Scsi_Host *host = adapter->host; in pvscsi_process_msg()
1040 printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", in pvscsi_process_msg()
1041 e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); in pvscsi_process_msg()
1045 if (e->type == PVSCSI_MSG_DEV_ADDED) { in pvscsi_process_msg()
1051 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1056 sdev = scsi_device_lookup(host, desc->bus, desc->target, in pvscsi_process_msg()
1057 desc->lun[1]); in pvscsi_process_msg()
1062 scsi_add_device(adapter->host, desc->bus, in pvscsi_process_msg()
1063 desc->target, desc->lun[1]); in pvscsi_process_msg()
1066 } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { in pvscsi_process_msg()
1072 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1077 sdev = scsi_device_lookup(host, desc->bus, desc->target, in pvscsi_process_msg()
1078 desc->lun[1]); in pvscsi_process_msg()
1085 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1093 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_msg_pending()
1095 return s->msgProdIdx != s->msgConsIdx; in pvscsi_msg_pending()
1100 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_msg_ring()
1101 struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; in pvscsi_process_msg_ring()
1102 u32 msg_entries = s->msgNumEntriesLog2; in pvscsi_process_msg_ring()
1105 struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & in pvscsi_process_msg_ring()
1111 s->msgConsIdx++; in pvscsi_process_msg_ring()
1134 if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) in pvscsi_setup_msg_workqueue()
1138 "vmw_pvscsi_wq_%u", adapter->host->host_no); in pvscsi_setup_msg_workqueue()
1140 adapter->workqueue = create_singlethread_workqueue(name); in pvscsi_setup_msg_workqueue()
1141 if (!adapter->workqueue) { in pvscsi_setup_msg_workqueue()
1145 INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); in pvscsi_setup_msg_workqueue()
1161 if (val == -1) { in pvscsi_setup_req_threshold()
1183 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_isr()
1185 if (adapter->use_msg && pvscsi_msg_pending(adapter)) in pvscsi_isr()
1186 queue_work(adapter->workqueue, &adapter->work); in pvscsi_isr()
1187 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_isr()
1205 struct pvscsi_ctx *ctx = adapter->cmd_map; in pvscsi_free_sgls()
1208 for (i = 0; i < adapter->req_depth; ++i, ++ctx) in pvscsi_free_sgls()
1209 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); in pvscsi_free_sgls()
1214 free_irq(pci_irq_vector(adapter->dev, 0), adapter); in pvscsi_shutdown_intr()
1215 pci_free_irq_vectors(adapter->dev); in pvscsi_shutdown_intr()
1220 if (adapter->workqueue) in pvscsi_release_resources()
1221 destroy_workqueue(adapter->workqueue); in pvscsi_release_resources()
1223 if (adapter->mmioBase) in pvscsi_release_resources()
1224 pci_iounmap(adapter->dev, adapter->mmioBase); in pvscsi_release_resources()
1226 pci_release_regions(adapter->dev); in pvscsi_release_resources()
1228 if (adapter->cmd_map) { in pvscsi_release_resources()
1230 kfree(adapter->cmd_map); in pvscsi_release_resources()
1233 if (adapter->rings_state) in pvscsi_release_resources()
1234 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_release_resources()
1235 adapter->rings_state, adapter->ringStatePA); in pvscsi_release_resources()
1237 if (adapter->req_ring) in pvscsi_release_resources()
1238 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1239 adapter->req_pages * PAGE_SIZE, in pvscsi_release_resources()
1240 adapter->req_ring, adapter->reqRingPA); in pvscsi_release_resources()
1242 if (adapter->cmp_ring) in pvscsi_release_resources()
1243 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1244 adapter->cmp_pages * PAGE_SIZE, in pvscsi_release_resources()
1245 adapter->cmp_ring, adapter->cmpRingPA); in pvscsi_release_resources()
1247 if (adapter->msg_ring) in pvscsi_release_resources()
1248 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1249 adapter->msg_pages * PAGE_SIZE, in pvscsi_release_resources()
1250 adapter->msg_ring, adapter->msgRingPA); in pvscsi_release_resources()
1258 * Dynamic allocation can fail, and we can't go deep into the memory
1261 * in that case because we can't get an allocation - the I/O could be
1271 ctx = adapter->cmd_map; in pvscsi_allocate_sg()
1274 for (i = 0; i < adapter->req_depth; ++i, ++ctx) { in pvscsi_allocate_sg()
1275 ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, in pvscsi_allocate_sg()
1277 ctx->sglPA = 0; in pvscsi_allocate_sg()
1278 BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); in pvscsi_allocate_sg()
1279 if (!ctx->sgl) { in pvscsi_allocate_sg()
1280 for (; i >= 0; --i, --ctx) { in pvscsi_allocate_sg()
1281 free_pages((unsigned long)ctx->sgl, in pvscsi_allocate_sg()
1283 ctx->sgl = NULL; in pvscsi_allocate_sg()
1285 return -ENOMEM; in pvscsi_allocate_sg()
1307 config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_get_max_targets()
1328 header->hostStatus = BTSTAT_INVPARAM; in pvscsi_get_max_targets()
1329 header->scsiStatus = SDSTAT_CHECK; in pvscsi_get_max_targets()
1333 if (header->hostStatus == BTSTAT_SUCCESS && in pvscsi_get_max_targets()
1334 header->scsiStatus == SDSTAT_GOOD) { in pvscsi_get_max_targets()
1338 numPhys = config->numPhys; in pvscsi_get_max_targets()
1341 header->hostStatus, header->scsiStatus); in pvscsi_get_max_targets()
1342 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, in pvscsi_get_max_targets()
1358 error = -ENODEV; in pvscsi_probe()
1363 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in pvscsi_probe()
1365 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { in pvscsi_probe()
1379 adapter->dev = pdev; in pvscsi_probe()
1380 adapter->rev = pdev->revision; in pvscsi_probe()
1403 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); in pvscsi_probe()
1405 if (!adapter->mmioBase) { in pvscsi_probe()
1449 adapter->dev = pdev; in pvscsi_probe()
1450 adapter->host = host; in pvscsi_probe()
1454 adapter->rev = adapter_temp.rev; in pvscsi_probe()
1455 adapter->mmioBase = adapter_temp.mmioBase; in pvscsi_probe()
1457 spin_lock_init(&adapter->hw_lock); in pvscsi_probe()
1458 host->max_channel = 0; in pvscsi_probe()
1459 host->max_lun = 1; in pvscsi_probe()
1460 host->max_cmd_len = 16; in pvscsi_probe()
1461 host->max_id = max_id; in pvscsi_probe()
1467 adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); in pvscsi_probe()
1481 adapter->cmd_map = kcalloc(adapter->req_depth, in pvscsi_probe()
1483 if (!adapter->cmd_map) { in pvscsi_probe()
1485 error = -ENOMEM; in pvscsi_probe()
1489 INIT_LIST_HEAD(&adapter->cmd_pool); in pvscsi_probe()
1490 for (i = 0; i < adapter->req_depth; i++) { in pvscsi_probe()
1491 struct pvscsi_ctx *ctx = adapter->cmd_map + i; in pvscsi_probe()
1492 list_add(&ctx->list, &adapter->cmd_pool); in pvscsi_probe()
1506 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); in pvscsi_probe()
1510 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); in pvscsi_probe()
1511 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", in pvscsi_probe()
1512 adapter->use_req_threshold ? "en" : "dis"); in pvscsi_probe()
1514 if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { in pvscsi_probe()
1516 adapter->dev->msix_enabled ? "-X" : ""); in pvscsi_probe()
1531 error = scsi_add_host(host, &pdev->dev); in pvscsi_probe()
1538 dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", in pvscsi_probe()
1539 adapter->rev, host->host_no); in pvscsi_probe()
1568 if (adapter->workqueue) in __pvscsi_shutdown()
1569 flush_workqueue(adapter->workqueue); in __pvscsi_shutdown()
1611 pr_info("%s - version %s\n", in pvscsi_init()