Lines Matching refs:queue_group

61 	struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 unsigned int cdb_length, struct pqi_queue_group *queue_group,
2327 struct pqi_queue_group *queue_group) in pqi_raid_bypass_submit_scsi_cmd() argument
2649 cdb, cdb_length, queue_group, encryption_info_ptr, true); in pqi_raid_bypass_submit_scsi_cmd()
2968 struct pqi_queue_group *queue_group) in pqi_process_io_intr() argument
2978 oq_ci = queue_group->oq_ci_copy; in pqi_process_io_intr()
2981 oq_pi = readl(queue_group->oq_pi); in pqi_process_io_intr()
2986 response = queue_group->oq_element_array + in pqi_process_io_intr()
3045 queue_group->oq_ci_copy = oq_ci; in pqi_process_io_intr()
3046 writel(oq_ci, queue_group->oq_ci); in pqi_process_io_intr()
3072 struct pqi_queue_group *queue_group; in pqi_send_event_ack() local
3074 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3075 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); in pqi_send_event_ack()
3078 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3080 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; in pqi_send_event_ack()
3081 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); in pqi_send_event_ack()
3088 &queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3094 next_element = queue_group->iq_element_array[RAID_PATH] + in pqi_send_event_ack()
3100 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; in pqi_send_event_ack()
3106 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); in pqi_send_event_ack()
3108 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3502 struct pqi_queue_group *queue_group; in pqi_irq_handler() local
3505 queue_group = data; in pqi_irq_handler()
3506 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
3511 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); in pqi_irq_handler()
3519 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); in pqi_irq_handler()
3520 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); in pqi_irq_handler()
3598 struct pqi_queue_group *queue_group; in pqi_alloc_operational_queues() local
3654 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3655 queue_group->iq_element_array[RAID_PATH] = element_array; in pqi_alloc_operational_queues()
3656 queue_group->iq_element_array_bus_addr[RAID_PATH] = in pqi_alloc_operational_queues()
3662 queue_group->iq_element_array[AIO_PATH] = element_array; in pqi_alloc_operational_queues()
3663 queue_group->iq_element_array_bus_addr[AIO_PATH] = in pqi_alloc_operational_queues()
3672 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3673 queue_group->oq_element_array = element_array; in pqi_alloc_operational_queues()
3674 queue_group->oq_element_array_bus_addr = in pqi_alloc_operational_queues()
3693 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
3694 queue_group->iq_ci[RAID_PATH] = next_queue_index; in pqi_alloc_operational_queues()
3695 queue_group->iq_ci_bus_addr[RAID_PATH] = in pqi_alloc_operational_queues()
3702 queue_group->iq_ci[AIO_PATH] = next_queue_index; in pqi_alloc_operational_queues()
3703 queue_group->iq_ci_bus_addr[AIO_PATH] = in pqi_alloc_operational_queues()
3710 queue_group->oq_pi = next_queue_index; in pqi_alloc_operational_queues()
3711 queue_group->oq_pi_bus_addr = in pqi_alloc_operational_queues()
3940 struct pqi_queue_group *queue_group, enum pqi_io_path path, in pqi_start_io() argument
3954 spin_lock_irqsave(&queue_group->submit_lock[path], flags); in pqi_start_io()
3957 io_request->queue_group = queue_group; in pqi_start_io()
3959 &queue_group->request_list[path]); in pqi_start_io()
3962 iq_pi = queue_group->iq_pi_copy[path]; in pqi_start_io()
3965 &queue_group->request_list[path], request_list_entry) { in pqi_start_io()
3975 iq_ci = readl(queue_group->iq_ci[path]); in pqi_start_io()
3981 put_unaligned_le16(queue_group->oq_id, in pqi_start_io()
3984 next_element = queue_group->iq_element_array[path] + in pqi_start_io()
3996 memcpy(queue_group->iq_element_array[path], in pqi_start_io()
4007 if (iq_pi != queue_group->iq_pi_copy[path]) { in pqi_start_io()
4008 queue_group->iq_pi_copy[path] = iq_pi; in pqi_start_io()
4013 writel(iq_pi, queue_group->iq_pi[path]); in pqi_start_io()
4016 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); in pqi_start_io()
4383 struct pqi_queue_group *queue_group; in pqi_create_queue_group() local
4387 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4398 put_unaligned_le16(queue_group->iq_id[RAID_PATH], in pqi_create_queue_group()
4401 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], in pqi_create_queue_group()
4403 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], in pqi_create_queue_group()
4419 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4433 put_unaligned_le16(queue_group->iq_id[AIO_PATH], in pqi_create_queue_group()
4435 put_unaligned_le64((u64)queue_group-> in pqi_create_queue_group()
4438 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], in pqi_create_queue_group()
4454 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4469 put_unaligned_le16(queue_group->iq_id[AIO_PATH], in pqi_create_queue_group()
4490 put_unaligned_le16(queue_group->oq_id, in pqi_create_queue_group()
4492 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, in pqi_create_queue_group()
4494 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, in pqi_create_queue_group()
4501 put_unaligned_le16(queue_group->int_msg_num, in pqi_create_queue_group()
4512 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
4988 struct pqi_queue_group *queue_group) in pqi_raid_submit_scsi_cmd_with_io_request() argument
5070 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); in pqi_raid_submit_scsi_cmd_with_io_request()
5077 struct pqi_queue_group *queue_group) in pqi_raid_submit_scsi_cmd() argument
5084 device, scmd, queue_group); in pqi_raid_submit_scsi_cmd()
5164 struct pqi_queue_group *queue_group; in pqi_retry_raid_bypass() local
5176 queue_group = io_request->queue_group; in pqi_retry_raid_bypass()
5181 device, scmd, queue_group); in pqi_retry_raid_bypass()
5264 struct pqi_queue_group *queue_group) in pqi_aio_submit_scsi_cmd() argument
5267 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); in pqi_aio_submit_scsi_cmd()
5272 unsigned int cdb_length, struct pqi_queue_group *queue_group, in pqi_aio_submit_io() argument
5335 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); in pqi_aio_submit_io()
5382 struct pqi_queue_group *queue_group; in pqi_scsi_queue_command() local
5417 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
5424 scmd, queue_group); in pqi_scsi_queue_command()
5430 queue_group); in pqi_scsi_queue_command()
5434 queue_group); in pqi_scsi_queue_command()
5437 queue_group); in pqi_scsi_queue_command()
5449 struct pqi_queue_group *queue_group) in pqi_wait_until_queued_io_drained() argument
5458 &queue_group->submit_lock[path], flags); in pqi_wait_until_queued_io_drained()
5460 list_empty(&queue_group->request_list[path]); in pqi_wait_until_queued_io_drained()
5462 &queue_group->submit_lock[path], flags); in pqi_wait_until_queued_io_drained()
5480 struct pqi_queue_group *queue_group; in pqi_wait_until_inbound_queues_empty() local
5485 queue_group = &ctrl_info->queue_groups[i]; in pqi_wait_until_inbound_queues_empty()
5487 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); in pqi_wait_until_inbound_queues_empty()
5492 iq_pi = queue_group->iq_pi_copy[path]; in pqi_wait_until_inbound_queues_empty()
5495 iq_ci = readl(queue_group->iq_ci[path]); in pqi_wait_until_inbound_queues_empty()
5514 struct pqi_queue_group *queue_group; in pqi_fail_io_queued_for_device() local
5522 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
5526 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_device()
5529 &queue_group->request_list[path], in pqi_fail_io_queued_for_device()
5545 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_device()
5554 struct pqi_queue_group *queue_group; in pqi_fail_io_queued_for_all_devices() local
5561 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_all_devices()
5564 spin_lock_irqsave(&queue_group->submit_lock[path], in pqi_fail_io_queued_for_all_devices()
5568 &queue_group->request_list[path], in pqi_fail_io_queued_for_all_devices()
5581 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_all_devices()