Lines Matching full:cb

90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)  in myrb_create_mempools()  argument
95 elem_size = cb->host->sg_tablesize * elem_align; in myrb_create_mempools()
96 cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev, in myrb_create_mempools()
98 if (cb->sg_pool == NULL) { in myrb_create_mempools()
99 shost_printk(KERN_ERR, cb->host, in myrb_create_mempools()
104 cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev, in myrb_create_mempools()
107 if (!cb->dcdb_pool) { in myrb_create_mempools()
108 dma_pool_destroy(cb->sg_pool); in myrb_create_mempools()
109 cb->sg_pool = NULL; in myrb_create_mempools()
110 shost_printk(KERN_ERR, cb->host, in myrb_create_mempools()
115 snprintf(cb->work_q_name, sizeof(cb->work_q_name), in myrb_create_mempools()
116 "myrb_wq_%d", cb->host->host_no); in myrb_create_mempools()
117 cb->work_q = create_singlethread_workqueue(cb->work_q_name); in myrb_create_mempools()
118 if (!cb->work_q) { in myrb_create_mempools()
119 dma_pool_destroy(cb->dcdb_pool); in myrb_create_mempools()
120 cb->dcdb_pool = NULL; in myrb_create_mempools()
121 dma_pool_destroy(cb->sg_pool); in myrb_create_mempools()
122 cb->sg_pool = NULL; in myrb_create_mempools()
123 shost_printk(KERN_ERR, cb->host, in myrb_create_mempools()
131 INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor); in myrb_create_mempools()
132 queue_delayed_work(cb->work_q, &cb->monitor_work, 1); in myrb_create_mempools()
140 static void myrb_destroy_mempools(struct myrb_hba *cb) in myrb_destroy_mempools() argument
142 cancel_delayed_work_sync(&cb->monitor_work); in myrb_destroy_mempools()
143 destroy_workqueue(cb->work_q); in myrb_destroy_mempools()
145 dma_pool_destroy(cb->sg_pool); in myrb_destroy_mempools()
146 dma_pool_destroy(cb->dcdb_pool); in myrb_destroy_mempools()
163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) in myrb_qcmd() argument
165 void __iomem *base = cb->io_base; in myrb_qcmd()
167 union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox; in myrb_qcmd()
169 cb->write_cmd_mbox(next_mbox, mbox); in myrb_qcmd()
170 if (cb->prev_cmd_mbox1->words[0] == 0 || in myrb_qcmd()
171 cb->prev_cmd_mbox2->words[0] == 0) in myrb_qcmd()
172 cb->get_cmd_mbox(base); in myrb_qcmd()
173 cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1; in myrb_qcmd()
174 cb->prev_cmd_mbox1 = next_mbox; in myrb_qcmd()
175 if (++next_mbox > cb->last_cmd_mbox) in myrb_qcmd()
176 next_mbox = cb->first_cmd_mbox; in myrb_qcmd()
177 cb->next_cmd_mbox = next_mbox; in myrb_qcmd()
185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb, in myrb_exec_cmd() argument
193 spin_lock_irqsave(&cb->queue_lock, flags); in myrb_exec_cmd()
194 cb->qcmd(cb, cmd_blk); in myrb_exec_cmd()
195 spin_unlock_irqrestore(&cb->queue_lock, flags); in myrb_exec_cmd()
206 static unsigned short myrb_exec_type3(struct myrb_hba *cb, in myrb_exec_type3() argument
209 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; in myrb_exec_type3()
213 mutex_lock(&cb->dcmd_mutex); in myrb_exec_type3()
218 status = myrb_exec_cmd(cb, cmd_blk); in myrb_exec_type3()
219 mutex_unlock(&cb->dcmd_mutex); in myrb_exec_type3()
228 static unsigned short myrb_exec_type3D(struct myrb_hba *cb, in myrb_exec_type3D() argument
232 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; in myrb_exec_type3D()
237 pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info, in myrb_exec_type3D()
240 if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr)) in myrb_exec_type3D()
243 mutex_lock(&cb->dcmd_mutex); in myrb_exec_type3D()
250 status = myrb_exec_cmd(cb, cmd_blk); in myrb_exec_type3D()
251 mutex_unlock(&cb->dcmd_mutex); in myrb_exec_type3D()
252 dma_unmap_single(&cb->pdev->dev, pdev_info_addr, in myrb_exec_type3D()
279 * @cb: pointer to the hba structure
284 static void myrb_get_event(struct myrb_hba *cb, unsigned int event) in myrb_get_event() argument
286 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; in myrb_get_event()
292 ev_buf = dma_alloc_coherent(&cb->pdev->dev, in myrb_get_event()
305 status = myrb_exec_cmd(cb, cmd_blk); in myrb_get_event()
307 shost_printk(KERN_INFO, cb->host, in myrb_get_event()
320 shost_printk(KERN_CRIT, cb->host, in myrb_get_event()
325 shost_printk(KERN_CRIT, cb->host, in myrb_get_event()
331 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry), in myrb_get_event()
340 static void myrb_get_errtable(struct myrb_hba *cb) in myrb_get_errtable() argument
342 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; in myrb_get_errtable()
347 memcpy(&old_table, cb->err_table, sizeof(old_table)); in myrb_get_errtable()
352 mbox->type3.addr = cb->err_table_addr; in myrb_get_errtable()
353 status = myrb_exec_cmd(cb, cmd_blk); in myrb_get_errtable()
355 struct myrb_error_entry *table = cb->err_table; in myrb_get_errtable()
360 shost_for_each_device(sdev, cb->host) { in myrb_get_errtable()
361 if (sdev->channel >= myrb_logical_channel(cb->host)) in myrb_get_errtable()
387 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb) in myrb_get_ldev_info() argument
390 int ldev_num, ldev_cnt = cb->enquiry->ldev_count; in myrb_get_ldev_info()
391 struct Scsi_Host *shost = cb->host; in myrb_get_ldev_info()
393 status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO, in myrb_get_ldev_info()
394 cb->ldev_info_addr); in myrb_get_ldev_info()
400 struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num; in myrb_get_ldev_info()
438 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb, in myrb_get_rbld_progress() argument
441 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; in myrb_get_rbld_progress()
447 rbld_buf = dma_alloc_coherent(&cb->pdev->dev, in myrb_get_rbld_progress()
457 status = myrb_exec_cmd(cb, cmd_blk); in myrb_get_rbld_progress()
460 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), in myrb_get_rbld_progress()
470 static void myrb_update_rbld_progress(struct myrb_hba *cb) in myrb_update_rbld_progress() argument
475 status = myrb_get_rbld_progress(cb, &rbld_buf); in myrb_update_rbld_progress()
477 cb->last_rbld_status == MYRB_STATUS_SUCCESS) in myrb_update_rbld_progress()
484 sdev = scsi_device_lookup(cb->host, in myrb_update_rbld_progress()
485 myrb_logical_channel(cb->host), in myrb_update_rbld_progress()
522 cb->last_rbld_status = status; in myrb_update_rbld_progress()
531 static void myrb_get_cc_progress(struct myrb_hba *cb) in myrb_get_cc_progress() argument
533 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; in myrb_get_cc_progress()
539 rbld_buf = dma_alloc_coherent(&cb->pdev->dev, in myrb_get_cc_progress()
543 cb->need_cc_status = true; in myrb_get_cc_progress()
550 status = myrb_exec_cmd(cb, cmd_blk); in myrb_get_cc_progress()
558 sdev = scsi_device_lookup(cb->host, in myrb_get_cc_progress()
559 myrb_logical_channel(cb->host), in myrb_get_cc_progress()
569 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress), in myrb_get_cc_progress()
578 static void myrb_bgi_control(struct myrb_hba *cb) in myrb_bgi_control() argument
580 struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk; in myrb_bgi_control()
587 bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), in myrb_bgi_control()
590 shost_printk(KERN_ERR, cb->host, in myrb_bgi_control()
599 status = myrb_exec_cmd(cb, cmd_blk); in myrb_bgi_control()
600 last_bgi = &cb->bgi_status; in myrb_bgi_control()
601 sdev = scsi_device_lookup(cb->host, in myrb_bgi_control()
602 myrb_logical_channel(cb->host), in myrb_bgi_control()
639 memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status)); in myrb_bgi_control()
642 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) in myrb_bgi_control()
645 cb->bgi_status.status = MYRB_BGI_INVALID; in myrb_bgi_control()
648 if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS) in myrb_bgi_control()
653 cb->bgi_status.status = MYRB_BGI_INVALID; in myrb_bgi_control()
658 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status), in myrb_bgi_control()
669 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb) in myrb_hba_enquiry() argument
674 memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry)); in myrb_hba_enquiry()
676 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr); in myrb_hba_enquiry()
680 new = cb->enquiry; in myrb_hba_enquiry()
685 shost_printk(KERN_CRIT, cb->host, in myrb_hba_enquiry()
693 shost_printk(KERN_CRIT, cb->host, in myrb_hba_enquiry()
698 shost_printk(KERN_CRIT, cb->host, in myrb_hba_enquiry()
702 cb->new_ev_seq = new->ev_seq; in myrb_hba_enquiry()
703 cb->need_err_info = true; in myrb_hba_enquiry()
704 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
706 cb->old_ev_seq, cb->new_ev_seq, in myrb_hba_enquiry()
714 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
719 cb->need_ldev_info = true; in myrb_hba_enquiry()
723 time_after_eq(jiffies, cb->secondary_monitor_time in myrb_hba_enquiry()
725 cb->need_bgi_status = cb->bgi_status_supported; in myrb_hba_enquiry()
726 cb->secondary_monitor_time = jiffies; in myrb_hba_enquiry()
732 cb->need_rbld = true; in myrb_hba_enquiry()
733 cb->rbld_first = (new->ldev_critical < old.ldev_critical); in myrb_hba_enquiry()
738 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
745 cb->need_cc_status = true; in myrb_hba_enquiry()
748 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
752 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
756 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
760 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
764 shost_printk(KERN_INFO, cb->host, in myrb_hba_enquiry()
769 cb->need_cc_status = true; in myrb_hba_enquiry()
779 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb, in myrb_set_pdev_state() argument
782 struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk; in myrb_set_pdev_state()
786 mutex_lock(&cb->dcmd_mutex); in myrb_set_pdev_state()
792 status = myrb_exec_cmd(cb, cmd_blk); in myrb_set_pdev_state()
793 mutex_unlock(&cb->dcmd_mutex); in myrb_set_pdev_state()
806 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) in myrb_enable_mmio() argument
808 void __iomem *base = cb->io_base; in myrb_enable_mmio()
809 struct pci_dev *pdev = cb->pdev; in myrb_enable_mmio()
824 cb->enquiry = dma_alloc_coherent(&pdev->dev, in myrb_enable_mmio()
826 &cb->enquiry_addr, GFP_KERNEL); in myrb_enable_mmio()
827 if (!cb->enquiry) in myrb_enable_mmio()
832 cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size, in myrb_enable_mmio()
833 &cb->err_table_addr, GFP_KERNEL); in myrb_enable_mmio()
834 if (!cb->err_table) in myrb_enable_mmio()
838 cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size, in myrb_enable_mmio()
839 &cb->ldev_info_addr, GFP_KERNEL); in myrb_enable_mmio()
840 if (!cb->ldev_info_buf) in myrb_enable_mmio()
850 cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox); in myrb_enable_mmio()
851 cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev, in myrb_enable_mmio()
852 cb->cmd_mbox_size, in myrb_enable_mmio()
853 &cb->cmd_mbox_addr, in myrb_enable_mmio()
855 if (!cb->first_cmd_mbox) in myrb_enable_mmio()
858 cmd_mbox_mem = cb->first_cmd_mbox; in myrb_enable_mmio()
860 cb->last_cmd_mbox = cmd_mbox_mem; in myrb_enable_mmio()
861 cb->next_cmd_mbox = cb->first_cmd_mbox; in myrb_enable_mmio()
862 cb->prev_cmd_mbox1 = cb->last_cmd_mbox; in myrb_enable_mmio()
863 cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1; in myrb_enable_mmio()
866 cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT * in myrb_enable_mmio()
868 cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev, in myrb_enable_mmio()
869 cb->stat_mbox_size, in myrb_enable_mmio()
870 &cb->stat_mbox_addr, in myrb_enable_mmio()
872 if (!cb->first_stat_mbox) in myrb_enable_mmio()
875 stat_mbox_mem = cb->first_stat_mbox; in myrb_enable_mmio()
877 cb->last_stat_mbox = stat_mbox_mem; in myrb_enable_mmio()
878 cb->next_stat_mbox = cb->first_stat_mbox; in myrb_enable_mmio()
881 cb->dual_mode_interface = true; in myrb_enable_mmio()
885 mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr; in myrb_enable_mmio()
886 mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr; in myrb_enable_mmio()
890 cb->dual_mode_interface = false; in myrb_enable_mmio()
911 static int myrb_get_hba_config(struct myrb_hba *cb) in myrb_get_hba_config() argument
917 struct Scsi_Host *shost = cb->host; in myrb_get_hba_config()
918 struct pci_dev *pdev = cb->pdev; in myrb_get_hba_config()
926 shost_printk(KERN_ERR, cb->host, in myrb_get_hba_config()
933 shost_printk(KERN_ERR, cb->host, in myrb_get_hba_config()
939 mutex_lock(&cb->dma_mutex); in myrb_get_hba_config()
940 status = myrb_hba_enquiry(cb); in myrb_get_hba_config()
941 mutex_unlock(&cb->dma_mutex); in myrb_get_hba_config()
943 shost_printk(KERN_WARNING, cb->host, in myrb_get_hba_config()
948 status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr); in myrb_get_hba_config()
950 shost_printk(KERN_WARNING, cb->host, in myrb_get_hba_config()
955 status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr); in myrb_get_hba_config()
957 shost_printk(KERN_WARNING, cb->host, in myrb_get_hba_config()
962 status = myrb_get_ldev_info(cb); in myrb_get_hba_config()
964 shost_printk(KERN_WARNING, cb->host, in myrb_get_hba_config()
975 strcpy(cb->model_name, "DAC960PU"); in myrb_get_hba_config()
977 strcpy(cb->model_name, "DAC960PD"); in myrb_get_hba_config()
980 strcpy(cb->model_name, "DAC960PL"); in myrb_get_hba_config()
983 strcpy(cb->model_name, "DAC960PG"); in myrb_get_hba_config()
986 strcpy(cb->model_name, "DAC960PJ"); in myrb_get_hba_config()
989 strcpy(cb->model_name, "DAC960PR"); in myrb_get_hba_config()
992 strcpy(cb->model_name, "DAC960PT"); in myrb_get_hba_config()
995 strcpy(cb->model_name, "DAC960PTL0"); in myrb_get_hba_config()
998 strcpy(cb->model_name, "DAC960PRL"); in myrb_get_hba_config()
1001 strcpy(cb->model_name, "DAC960PTL1"); in myrb_get_hba_config()
1004 strcpy(cb->model_name, "eXtremeRAID 1100"); in myrb_get_hba_config()
1007 shost_printk(KERN_WARNING, cb->host, in myrb_get_hba_config()
1045 enquiry2->fw.major_version = cb->enquiry->fw_major_version; in myrb_get_hba_config()
1046 enquiry2->fw.minor_version = cb->enquiry->fw_minor_version; in myrb_get_hba_config()
1050 snprintf(cb->fw_version, sizeof(cb->fw_version), in myrb_get_hba_config()
1063 strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) { in myrb_get_hba_config()
1064 shost_printk(KERN_WARNING, cb->host, in myrb_get_hba_config()
1066 cb->fw_version); in myrb_get_hba_config()
1090 cb->bus_width = 32; in myrb_get_hba_config()
1092 cb->bus_width = 16; in myrb_get_hba_config()
1094 cb->bus_width = 8; in myrb_get_hba_config()
1095 cb->ldev_block_size = enquiry2->ldev_block_size; in myrb_get_hba_config()
1099 cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE); in myrb_get_hba_config()
1108 shost->can_queue = cb->enquiry->max_tcq; in myrb_get_hba_config()
1124 cb->stripe_size = config2->blocks_per_stripe * config2->block_factor in myrb_get_hba_config()
1126 cb->segment_size = config2->blocks_per_cacheline * config2->block_factor in myrb_get_hba_config()
1129 cb->ldev_geom_heads = 255; in myrb_get_hba_config()
1130 cb->ldev_geom_sectors = 63; in myrb_get_hba_config()
1132 cb->ldev_geom_heads = 128; in myrb_get_hba_config()
1133 cb->ldev_geom_sectors = 32; in myrb_get_hba_config()
1139 if ((cb->fw_version[0] == '4' && in myrb_get_hba_config()
1140 strcmp(cb->fw_version, "4.08") >= 0) || in myrb_get_hba_config()
1141 (cb->fw_version[0] == '5' && in myrb_get_hba_config()
1142 strcmp(cb->fw_version, "5.08") >= 0)) { in myrb_get_hba_config()
1143 cb->bgi_status_supported = true; in myrb_get_hba_config()
1144 myrb_bgi_control(cb); in myrb_get_hba_config()
1146 cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS; in myrb_get_hba_config()
1150 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1151 "Configuring %s PCI RAID Controller\n", cb->model_name); in myrb_get_hba_config()
1152 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1154 cb->fw_version, memsize); in myrb_get_hba_config()
1155 if (cb->io_addr == 0) in myrb_get_hba_config()
1156 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1158 (unsigned long)cb->pci_addr, cb->irq); in myrb_get_hba_config()
1160 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1162 (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr, in myrb_get_hba_config()
1163 cb->irq); in myrb_get_hba_config()
1164 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1166 cb->host->can_queue, cb->host->max_sectors); in myrb_get_hba_config()
1167 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1169 cb->host->can_queue, cb->host->sg_tablesize, in myrb_get_hba_config()
1171 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1173 cb->stripe_size, cb->segment_size, in myrb_get_hba_config()
1174 cb->ldev_geom_heads, cb->ldev_geom_sectors, in myrb_get_hba_config()
1175 cb->safte_enabled ? in myrb_get_hba_config()
1177 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1179 pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead, in myrb_get_hba_config()
1180 cb->host->max_id); in myrb_get_hba_config()
1182 shost_printk(KERN_INFO, cb->host, in myrb_get_hba_config()
1184 cb->enquiry->ldev_count, MYRB_MAX_LDEVS); in myrb_get_hba_config()
1198 static void myrb_unmap(struct myrb_hba *cb) in myrb_unmap() argument
1200 if (cb->ldev_info_buf) { in myrb_unmap()
1203 dma_free_coherent(&cb->pdev->dev, ldev_info_size, in myrb_unmap()
1204 cb->ldev_info_buf, cb->ldev_info_addr); in myrb_unmap()
1205 cb->ldev_info_buf = NULL; in myrb_unmap()
1207 if (cb->err_table) { in myrb_unmap()
1210 dma_free_coherent(&cb->pdev->dev, err_table_size, in myrb_unmap()
1211 cb->err_table, cb->err_table_addr); in myrb_unmap()
1212 cb->err_table = NULL; in myrb_unmap()
1214 if (cb->enquiry) { in myrb_unmap()
1215 dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry), in myrb_unmap()
1216 cb->enquiry, cb->enquiry_addr); in myrb_unmap()
1217 cb->enquiry = NULL; in myrb_unmap()
1219 if (cb->first_stat_mbox) { in myrb_unmap()
1220 dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size, in myrb_unmap()
1221 cb->first_stat_mbox, cb->stat_mbox_addr); in myrb_unmap()
1222 cb->first_stat_mbox = NULL; in myrb_unmap()
1224 if (cb->first_cmd_mbox) { in myrb_unmap()
1225 dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size, in myrb_unmap()
1226 cb->first_cmd_mbox, cb->cmd_mbox_addr); in myrb_unmap()
1227 cb->first_cmd_mbox = NULL; in myrb_unmap()
1234 static void myrb_cleanup(struct myrb_hba *cb) in myrb_cleanup() argument
1236 struct pci_dev *pdev = cb->pdev; in myrb_cleanup()
1239 myrb_unmap(cb); in myrb_cleanup()
1241 if (cb->mmio_base) { in myrb_cleanup()
1242 if (cb->disable_intr) in myrb_cleanup()
1243 cb->disable_intr(cb->io_base); in myrb_cleanup()
1244 iounmap(cb->mmio_base); in myrb_cleanup()
1246 if (cb->irq) in myrb_cleanup()
1247 free_irq(cb->irq, cb); in myrb_cleanup()
1248 if (cb->io_addr) in myrb_cleanup()
1249 release_region(cb->io_addr, 0x80); in myrb_cleanup()
1252 scsi_host_put(cb->host); in myrb_cleanup()
1258 struct myrb_hba *cb = shost_priv(shost); in myrb_host_reset() local
1260 cb->reset(cb->io_base); in myrb_host_reset()
1268 struct myrb_hba *cb = shost_priv(shost); in myrb_pthru_queuecommand() local
1279 dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr); in myrb_pthru_queuecommand()
1284 dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr); in myrb_pthru_queuecommand()
1333 spin_lock_irqsave(&cb->queue_lock, flags); in myrb_pthru_queuecommand()
1334 cb->qcmd(cb, cmd_blk); in myrb_pthru_queuecommand()
1335 spin_unlock_irqrestore(&cb->queue_lock, flags); in myrb_pthru_queuecommand()
1339 static void myrb_inquiry(struct myrb_hba *cb, in myrb_inquiry() argument
1350 if (cb->bus_width > 16) in myrb_inquiry()
1352 if (cb->bus_width > 8) in myrb_inquiry()
1354 memcpy(&inq[16], cb->model_name, 16); in myrb_inquiry()
1355 memcpy(&inq[32], cb->fw_version, 1); in myrb_inquiry()
1356 memcpy(&inq[33], &cb->fw_version[2], 2); in myrb_inquiry()
1357 memcpy(&inq[35], &cb->fw_version[7], 1); in myrb_inquiry()
1363 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd, in myrb_mode_sense() argument
1385 put_unaligned_be32(cb->ldev_block_size, &block_desc[5]); in myrb_mode_sense()
1391 if (cb->segment_size) { in myrb_mode_sense()
1393 put_unaligned_be16(cb->segment_size, &mode_pg[14]); in myrb_mode_sense()
1399 static void myrb_request_sense(struct myrb_hba *cb, in myrb_request_sense() argument
1407 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd, in myrb_read_capacity() argument
1414 ldev_info->size, cb->ldev_block_size); in myrb_read_capacity()
1416 put_unaligned_be32(cb->ldev_block_size, &data[4]); in myrb_read_capacity()
1423 struct myrb_hba *cb = shost_priv(shost); in myrb_ldev_queuecommand() local
1453 myrb_inquiry(cb, scmd); in myrb_ldev_queuecommand()
1468 myrb_mode_sense(cb, scmd, ldev_info); in myrb_ldev_queuecommand()
1488 myrb_read_capacity(cb, scmd, ldev_info); in myrb_ldev_queuecommand()
1492 myrb_request_sense(cb, scmd); in myrb_ldev_queuecommand()
1575 hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr); in myrb_ldev_queuecommand()
1600 spin_lock_irqsave(&cb->queue_lock, flags); in myrb_ldev_queuecommand()
1601 cb->qcmd(cb, cmd_blk); in myrb_ldev_queuecommand()
1602 spin_unlock_irqrestore(&cb->queue_lock, flags); in myrb_ldev_queuecommand()
1625 struct myrb_hba *cb = shost_priv(sdev->host); in myrb_ldev_slave_alloc() local
1630 ldev_info = cb->ldev_info_buf + ldev_num; in myrb_ldev_slave_alloc()
1671 struct myrb_hba *cb = shost_priv(sdev->host); in myrb_pdev_slave_alloc() local
1682 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, in myrb_pdev_slave_alloc()
1753 struct myrb_hba *cb = shost_priv(sdev->host); in myrb_biosparam() local
1755 geom[0] = cb->ldev_geom_heads; in myrb_biosparam()
1756 geom[1] = cb->ldev_geom_sectors; in myrb_biosparam()
1766 struct myrb_hba *cb = shost_priv(sdev->host); in raid_state_show() local
1787 status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE, in raid_state_show()
1811 struct myrb_hba *cb = shost_priv(sdev->host); in raid_state_store() local
1841 status = myrb_set_pdev_state(cb, sdev, new_state); in raid_state_store()
1901 struct myrb_hba *cb = shost_priv(sdev->host); in rebuild_show() local
1908 status = myrb_get_rbld_progress(cb, &rbld_buf); in rebuild_show()
1923 struct myrb_hba *cb = shost_priv(sdev->host); in rebuild_store() local
1937 status = myrb_get_rbld_progress(cb, NULL); in rebuild_store()
1944 mutex_lock(&cb->dcmd_mutex); in rebuild_store()
1945 cmd_blk = &cb->dcmd_blk; in rebuild_store()
1952 status = myrb_exec_cmd(cb, cmd_blk); in rebuild_store()
1953 mutex_unlock(&cb->dcmd_mutex); in rebuild_store()
1955 struct pci_dev *pdev = cb->pdev; in rebuild_store()
1972 mutex_lock(&cb->dcmd_mutex); in rebuild_store()
1973 cmd_blk = &cb->dcmd_blk; in rebuild_store()
1980 status = myrb_exec_cmd(cb, cmd_blk); in rebuild_store()
1982 mutex_unlock(&cb->dcmd_mutex); in rebuild_store()
2028 struct myrb_hba *cb = shost_priv(sdev->host); in consistency_check_store() local
2044 status = myrb_get_rbld_progress(cb, &rbld_buf); in consistency_check_store()
2051 mutex_lock(&cb->dcmd_mutex); in consistency_check_store()
2052 cmd_blk = &cb->dcmd_blk; in consistency_check_store()
2060 status = myrb_exec_cmd(cb, cmd_blk); in consistency_check_store()
2061 mutex_unlock(&cb->dcmd_mutex); in consistency_check_store()
2063 struct pci_dev *pdev = cb->pdev; in consistency_check_store()
2079 mutex_lock(&cb->dcmd_mutex); in consistency_check_store()
2080 cmd_blk = &cb->dcmd_blk; in consistency_check_store()
2087 status = myrb_exec_cmd(cb, cmd_blk); in consistency_check_store()
2089 mutex_unlock(&cb->dcmd_mutex); in consistency_check_store()
2141 struct myrb_hba *cb = shost_priv(shost); in ctlr_num_show() local
2143 return snprintf(buf, 20, "%u\n", cb->ctlr_num); in ctlr_num_show()
2151 struct myrb_hba *cb = shost_priv(shost); in firmware_show() local
2153 return snprintf(buf, 16, "%s\n", cb->fw_version); in firmware_show()
2161 struct myrb_hba *cb = shost_priv(shost); in model_show() local
2163 return snprintf(buf, 16, "%s\n", cb->model_name); in model_show()
2171 struct myrb_hba *cb = shost_priv(shost); in flush_cache_store() local
2174 status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); in flush_cache_store()
2240 struct myrb_hba *cb = shost_priv(sdev->host); in myrb_get_resync() local
2248 status = myrb_get_rbld_progress(cb, &rbld_buf); in myrb_get_resync()
2267 struct myrb_hba *cb = shost_priv(sdev->host); in myrb_get_state() local
2275 status = myrb_get_rbld_progress(cb, NULL); in myrb_get_state()
2302 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk, in myrb_handle_scsi() argument
2314 dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb, in myrb_handle_scsi()
2319 dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr); in myrb_handle_scsi()
2372 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) in myrb_handle_cmdblk() argument
2385 struct myrb_hba *cb = container_of(work, in myrb_monitor() local
2387 struct Scsi_Host *shost = cb->host; in myrb_monitor()
2392 if (cb->new_ev_seq > cb->old_ev_seq) { in myrb_monitor()
2393 int event = cb->old_ev_seq; in myrb_monitor()
2397 cb->new_ev_seq, event); in myrb_monitor()
2398 myrb_get_event(cb, event); in myrb_monitor()
2399 cb->old_ev_seq = event + 1; in myrb_monitor()
2401 } else if (cb->need_err_info) { in myrb_monitor()
2402 cb->need_err_info = false; in myrb_monitor()
2404 myrb_get_errtable(cb); in myrb_monitor()
2406 } else if (cb->need_rbld && cb->rbld_first) { in myrb_monitor()
2407 cb->need_rbld = false; in myrb_monitor()
2410 myrb_update_rbld_progress(cb); in myrb_monitor()
2412 } else if (cb->need_ldev_info) { in myrb_monitor()
2413 cb->need_ldev_info = false; in myrb_monitor()
2416 myrb_get_ldev_info(cb); in myrb_monitor()
2418 } else if (cb->need_rbld) { in myrb_monitor()
2419 cb->need_rbld = false; in myrb_monitor()
2422 myrb_update_rbld_progress(cb); in myrb_monitor()
2424 } else if (cb->need_cc_status) { in myrb_monitor()
2425 cb->need_cc_status = false; in myrb_monitor()
2428 myrb_get_cc_progress(cb); in myrb_monitor()
2430 } else if (cb->need_bgi_status) { in myrb_monitor()
2431 cb->need_bgi_status = false; in myrb_monitor()
2433 myrb_bgi_control(cb); in myrb_monitor()
2437 mutex_lock(&cb->dma_mutex); in myrb_monitor()
2438 myrb_hba_enquiry(cb); in myrb_monitor()
2439 mutex_unlock(&cb->dma_mutex); in myrb_monitor()
2440 if ((cb->new_ev_seq - cb->old_ev_seq > 0) || in myrb_monitor()
2441 cb->need_err_info || cb->need_rbld || in myrb_monitor()
2442 cb->need_ldev_info || cb->need_cc_status || in myrb_monitor()
2443 cb->need_bgi_status) { in myrb_monitor()
2450 cb->primary_monitor_time = jiffies; in myrb_monitor()
2451 queue_delayed_work(cb->work_q, &cb->monitor_work, interval); in myrb_monitor()
2462 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error, in myrb_err_status() argument
2465 struct pci_dev *pdev = cb->pdev; in myrb_err_status()
2668 struct myrb_hba *cb, void __iomem *base) in DAC960_LA_hw_init() argument
2680 myrb_err_status(cb, error, parm0, parm1)) in DAC960_LA_hw_init()
2690 if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) { in DAC960_LA_hw_init()
2697 cb->qcmd = myrb_qcmd; in DAC960_LA_hw_init()
2698 cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox; in DAC960_LA_hw_init()
2699 if (cb->dual_mode_interface) in DAC960_LA_hw_init()
2700 cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd; in DAC960_LA_hw_init()
2702 cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd; in DAC960_LA_hw_init()
2703 cb->disable_intr = DAC960_LA_disable_intr; in DAC960_LA_hw_init()
2704 cb->reset = DAC960_LA_reset_ctrl; in DAC960_LA_hw_init()
2711 struct myrb_hba *cb = arg; in DAC960_LA_intr_handler() local
2712 void __iomem *base = cb->io_base; in DAC960_LA_intr_handler()
2716 spin_lock_irqsave(&cb->queue_lock, flags); in DAC960_LA_intr_handler()
2718 next_stat_mbox = cb->next_stat_mbox; in DAC960_LA_intr_handler()
2725 cmd_blk = &cb->dcmd_blk; in DAC960_LA_intr_handler()
2727 cmd_blk = &cb->mcmd_blk; in DAC960_LA_intr_handler()
2729 scmd = scsi_host_find_tag(cb->host, id - 3); in DAC960_LA_intr_handler()
2736 dev_err(&cb->pdev->dev, in DAC960_LA_intr_handler()
2740 if (++next_stat_mbox > cb->last_stat_mbox) in DAC960_LA_intr_handler()
2741 next_stat_mbox = cb->first_stat_mbox; in DAC960_LA_intr_handler()
2745 myrb_handle_cmdblk(cb, cmd_blk); in DAC960_LA_intr_handler()
2747 myrb_handle_scsi(cb, cmd_blk, scmd); in DAC960_LA_intr_handler()
2750 cb->next_stat_mbox = next_stat_mbox; in DAC960_LA_intr_handler()
2751 spin_unlock_irqrestore(&cb->queue_lock, flags); in DAC960_LA_intr_handler()
2916 struct myrb_hba *cb, void __iomem *base) in DAC960_PG_hw_init() argument
2928 myrb_err_status(cb, error, parm0, parm1)) in DAC960_PG_hw_init()
2938 if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) { in DAC960_PG_hw_init()
2945 cb->qcmd = myrb_qcmd; in DAC960_PG_hw_init()
2946 cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox; in DAC960_PG_hw_init()
2947 if (cb->dual_mode_interface) in DAC960_PG_hw_init()
2948 cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd; in DAC960_PG_hw_init()
2950 cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd; in DAC960_PG_hw_init()
2951 cb->disable_intr = DAC960_PG_disable_intr; in DAC960_PG_hw_init()
2952 cb->reset = DAC960_PG_reset_ctrl; in DAC960_PG_hw_init()
2959 struct myrb_hba *cb = arg; in DAC960_PG_intr_handler() local
2960 void __iomem *base = cb->io_base; in DAC960_PG_intr_handler()
2964 spin_lock_irqsave(&cb->queue_lock, flags); in DAC960_PG_intr_handler()
2966 next_stat_mbox = cb->next_stat_mbox; in DAC960_PG_intr_handler()
2973 cmd_blk = &cb->dcmd_blk; in DAC960_PG_intr_handler()
2975 cmd_blk = &cb->mcmd_blk; in DAC960_PG_intr_handler()
2977 scmd = scsi_host_find_tag(cb->host, id - 3); in DAC960_PG_intr_handler()
2984 dev_err(&cb->pdev->dev, in DAC960_PG_intr_handler()
2988 if (++next_stat_mbox > cb->last_stat_mbox) in DAC960_PG_intr_handler()
2989 next_stat_mbox = cb->first_stat_mbox; in DAC960_PG_intr_handler()
2992 myrb_handle_cmdblk(cb, cmd_blk); in DAC960_PG_intr_handler()
2994 myrb_handle_scsi(cb, cmd_blk, scmd); in DAC960_PG_intr_handler()
2996 cb->next_stat_mbox = next_stat_mbox; in DAC960_PG_intr_handler()
2997 spin_unlock_irqrestore(&cb->queue_lock, flags); in DAC960_PG_intr_handler()
3100 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) in DAC960_PD_qcmd() argument
3102 void __iomem *base = cb->io_base; in DAC960_PD_qcmd()
3112 struct myrb_hba *cb, void __iomem *base) in DAC960_PD_hw_init() argument
3117 if (!request_region(cb->io_addr, 0x80, "myrb")) { in DAC960_PD_hw_init()
3119 (unsigned long)cb->io_addr); in DAC960_PD_hw_init()
3129 myrb_err_status(cb, error, parm0, parm1)) in DAC960_PD_hw_init()
3139 if (!myrb_enable_mmio(cb, NULL)) { in DAC960_PD_hw_init()
3146 cb->qcmd = DAC960_PD_qcmd; in DAC960_PD_hw_init()
3147 cb->disable_intr = DAC960_PD_disable_intr; in DAC960_PD_hw_init()
3148 cb->reset = DAC960_PD_reset_ctrl; in DAC960_PD_hw_init()
3155 struct myrb_hba *cb = arg; in DAC960_PD_intr_handler() local
3156 void __iomem *base = cb->io_base; in DAC960_PD_intr_handler()
3159 spin_lock_irqsave(&cb->queue_lock, flags); in DAC960_PD_intr_handler()
3166 cmd_blk = &cb->dcmd_blk; in DAC960_PD_intr_handler()
3168 cmd_blk = &cb->mcmd_blk; in DAC960_PD_intr_handler()
3170 scmd = scsi_host_find_tag(cb->host, id - 3); in DAC960_PD_intr_handler()
3177 dev_err(&cb->pdev->dev, in DAC960_PD_intr_handler()
3184 myrb_handle_cmdblk(cb, cmd_blk); in DAC960_PD_intr_handler()
3186 myrb_handle_scsi(cb, cmd_blk, scmd); in DAC960_PD_intr_handler()
3188 spin_unlock_irqrestore(&cb->queue_lock, flags); in DAC960_PD_intr_handler()
3239 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk) in DAC960_P_qcmd() argument
3241 void __iomem *base = cb->io_base; in DAC960_P_qcmd()
3278 struct myrb_hba *cb, void __iomem *base) in DAC960_P_hw_init() argument
3283 if (!request_region(cb->io_addr, 0x80, "myrb")) { in DAC960_P_hw_init()
3285 (unsigned long)cb->io_addr); in DAC960_P_hw_init()
3295 myrb_err_status(cb, error, parm0, parm1)) in DAC960_P_hw_init()
3305 if (!myrb_enable_mmio(cb, NULL)) { in DAC960_P_hw_init()
3312 cb->qcmd = DAC960_P_qcmd; in DAC960_P_hw_init()
3313 cb->disable_intr = DAC960_PD_disable_intr; in DAC960_P_hw_init()
3314 cb->reset = DAC960_PD_reset_ctrl; in DAC960_P_hw_init()
3321 struct myrb_hba *cb = arg; in DAC960_P_intr_handler() local
3322 void __iomem *base = cb->io_base; in DAC960_P_intr_handler()
3325 spin_lock_irqsave(&cb->queue_lock, flags); in DAC960_P_intr_handler()
3335 cmd_blk = &cb->dcmd_blk; in DAC960_P_intr_handler()
3337 cmd_blk = &cb->mcmd_blk; in DAC960_P_intr_handler()
3339 scmd = scsi_host_find_tag(cb->host, id - 3); in DAC960_P_intr_handler()
3346 dev_err(&cb->pdev->dev, in DAC960_P_intr_handler()
3360 myrb_translate_enquiry(cb->enquiry); in DAC960_P_intr_handler()
3382 myrb_handle_cmdblk(cb, cmd_blk); in DAC960_P_intr_handler()
3384 myrb_handle_scsi(cb, cmd_blk, scmd); in DAC960_P_intr_handler()
3386 spin_unlock_irqrestore(&cb->queue_lock, flags); in DAC960_P_intr_handler()
3404 struct myrb_hba *cb = NULL; in myrb_detect() local
3413 cb = shost_priv(shost); in myrb_detect()
3414 mutex_init(&cb->dcmd_mutex); in myrb_detect()
3415 mutex_init(&cb->dma_mutex); in myrb_detect()
3416 cb->pdev = pdev; in myrb_detect()
3417 cb->host = shost; in myrb_detect()
3427 cb->io_addr = pci_resource_start(pdev, 0); in myrb_detect()
3428 cb->pci_addr = pci_resource_start(pdev, 1); in myrb_detect()
3430 cb->pci_addr = pci_resource_start(pdev, 0); in myrb_detect()
3432 pci_set_drvdata(pdev, cb); in myrb_detect()
3433 spin_lock_init(&cb->queue_lock); in myrb_detect()
3436 cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size); in myrb_detect()
3437 if (cb->mmio_base == NULL) { in myrb_detect()
3443 cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK); in myrb_detect()
3444 if (privdata->hw_init(pdev, cb, cb->io_base)) in myrb_detect()
3447 if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) { in myrb_detect()
3452 cb->irq = pdev->irq; in myrb_detect()
3453 return cb; in myrb_detect()
3458 myrb_cleanup(cb); in myrb_detect()
3464 struct myrb_hba *cb; in myrb_probe() local
3467 cb = myrb_detect(dev, entry); in myrb_probe()
3468 if (!cb) in myrb_probe()
3471 ret = myrb_get_hba_config(cb); in myrb_probe()
3473 myrb_cleanup(cb); in myrb_probe()
3477 if (!myrb_create_mempools(dev, cb)) { in myrb_probe()
3482 ret = scsi_add_host(cb->host, &dev->dev); in myrb_probe()
3485 myrb_destroy_mempools(cb); in myrb_probe()
3488 scsi_scan_host(cb->host); in myrb_probe()
3491 myrb_cleanup(cb); in myrb_probe()
3498 struct myrb_hba *cb = pci_get_drvdata(pdev); in myrb_remove() local
3500 shost_printk(KERN_NOTICE, cb->host, "Flushing Cache..."); in myrb_remove()
3501 myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0); in myrb_remove()
3502 myrb_cleanup(cb); in myrb_remove()
3503 myrb_destroy_mempools(cb); in myrb_remove()