Lines Matching +full:lock +full:- +full:latency +full:- +full:ns
1 // SPDX-License-Identifier: GPL-2.0-or-later
78 spinlock_t lock; member
84 /*----------------- Misc Utility Functions -------------------*/
89 tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) & in rsxx_addr8_to_laddr()
90 card->_stripe.upper_mask) | in rsxx_addr8_to_laddr()
91 ((addr8) & card->_stripe.lower_mask); in rsxx_addr8_to_laddr()
100 tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask; in rsxx_get_dma_tgt()
108 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); in rsxx_dma_queue_reset()
113 if (dma->sub_page.cnt) in get_dma_size()
114 return dma->sub_page.cnt << 9; in get_dma_size()
120 /*----------------- DMA Tracker -------------------*/
125 trackers->list[tag].dma = dma; in set_tracker_dma()
131 return trackers->list[tag].dma; in get_tracker_dma()
138 spin_lock(&trackers->lock); in pop_tracker()
139 tag = trackers->head; in pop_tracker()
140 if (tag != -1) { in pop_tracker()
141 trackers->head = trackers->list[tag].next_tag; in pop_tracker()
142 trackers->list[tag].next_tag = -1; in pop_tracker()
144 spin_unlock(&trackers->lock); in pop_tracker()
151 spin_lock(&trackers->lock); in push_tracker()
152 trackers->list[tag].next_tag = trackers->head; in push_tracker()
153 trackers->head = tag; in push_tracker()
154 trackers->list[tag].dma = NULL; in push_tracker()
155 spin_unlock(&trackers->lock); in push_tracker()
159 /*----------------- Interrupt Coalescing -------------*/
162 * Interrupt Timer (64ns units) [15:0]
170 #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
175 static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency) in dma_intr_coal_val() argument
177 u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS; in dma_intr_coal_val()
193 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || in dma_intr_coal_auto_tune()
194 unlikely(card->eeh_state)) in dma_intr_coal_auto_tune()
197 for (i = 0; i < card->n_targets; i++) in dma_intr_coal_auto_tune()
198 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); in dma_intr_coal_auto_tune()
200 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, in dma_intr_coal_auto_tune()
202 card->config.data.intr_coal.latency); in dma_intr_coal_auto_tune()
203 iowrite32(intr_coal, card->regmap + INTR_COAL); in dma_intr_coal_auto_tune()
206 /*----------------- RSXX DMA Handling -------------------*/
209 if (dma->cmd != HW_CMD_BLK_DISCARD) { in rsxx_free_dma()
210 if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) { in rsxx_free_dma()
211 dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr, in rsxx_free_dma()
213 dma->cmd == HW_CMD_BLK_WRITE ? in rsxx_free_dma()
227 ctrl->stats.dma_sw_err++; in rsxx_complete_dma()
229 ctrl->stats.dma_hw_fault++; in rsxx_complete_dma()
231 ctrl->stats.dma_cancelled++; in rsxx_complete_dma()
233 if (dma->cb) in rsxx_complete_dma()
234 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); in rsxx_complete_dma()
247 list_del(&dma->list); in rsxx_cleanup_dma_queue()
265 spin_lock_bh(&ctrl->queue_lock); in rsxx_requeue_dma()
266 ctrl->stats.sw_q_depth++; in rsxx_requeue_dma()
267 list_add(&dma->list, &ctrl->queue); in rsxx_requeue_dma()
268 spin_unlock_bh(&ctrl->queue_lock); in rsxx_requeue_dma()
278 dev_dbg(CARD_TO_DEV(ctrl->card), in rsxx_handle_dma_error()
280 dma->cmd, dma->laddr, hw_st); in rsxx_handle_dma_error()
283 ctrl->stats.crc_errors++; in rsxx_handle_dma_error()
285 ctrl->stats.hard_errors++; in rsxx_handle_dma_error()
287 ctrl->stats.soft_errors++; in rsxx_handle_dma_error()
289 switch (dma->cmd) { in rsxx_handle_dma_error()
292 if (ctrl->card->scrub_hard) { in rsxx_handle_dma_error()
293 dma->cmd = HW_CMD_BLK_RECON_READ; in rsxx_handle_dma_error()
295 ctrl->stats.reads_retried++; in rsxx_handle_dma_error()
298 ctrl->stats.reads_failed++; in rsxx_handle_dma_error()
302 ctrl->stats.reads_failed++; in rsxx_handle_dma_error()
310 ctrl->stats.reads_failed++; in rsxx_handle_dma_error()
316 ctrl->stats.writes_failed++; in rsxx_handle_dma_error()
321 ctrl->stats.discards_failed++; in rsxx_handle_dma_error()
325 dev_err(CARD_TO_DEV(ctrl->card), in rsxx_handle_dma_error()
328 dma->cmd, dma->laddr, hw_st); in rsxx_handle_dma_error()
345 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || in dma_engine_stalled()
346 unlikely(ctrl->card->eeh_state)) in dma_engine_stalled()
349 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { in dma_engine_stalled()
354 dev_warn(CARD_TO_DEV(ctrl->card), in dma_engine_stalled()
355 "SW_CMD_IDX write was lost, re-writing...\n"); in dma_engine_stalled()
356 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); in dma_engine_stalled()
357 mod_timer(&ctrl->activity_timer, in dma_engine_stalled()
360 dev_warn(CARD_TO_DEV(ctrl->card), in dma_engine_stalled()
362 ctrl->id); in dma_engine_stalled()
363 ctrl->card->dma_fault = 1; in dma_engine_stalled()
366 spin_lock(&ctrl->queue_lock); in dma_engine_stalled()
367 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); in dma_engine_stalled()
368 spin_unlock(&ctrl->queue_lock); in dma_engine_stalled()
373 dev_info(CARD_TO_DEV(ctrl->card), in dma_engine_stalled()
375 cnt, ctrl->id); in dma_engine_stalled()
387 hw_cmd_buf = ctrl->cmd.buf; in rsxx_issue_dmas()
389 if (unlikely(ctrl->card->halt) || in rsxx_issue_dmas()
390 unlikely(ctrl->card->eeh_state)) in rsxx_issue_dmas()
394 spin_lock_bh(&ctrl->queue_lock); in rsxx_issue_dmas()
395 if (list_empty(&ctrl->queue)) { in rsxx_issue_dmas()
396 spin_unlock_bh(&ctrl->queue_lock); in rsxx_issue_dmas()
399 spin_unlock_bh(&ctrl->queue_lock); in rsxx_issue_dmas()
401 tag = pop_tracker(ctrl->trackers); in rsxx_issue_dmas()
402 if (tag == -1) in rsxx_issue_dmas()
405 spin_lock_bh(&ctrl->queue_lock); in rsxx_issue_dmas()
406 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); in rsxx_issue_dmas()
407 list_del(&dma->list); in rsxx_issue_dmas()
408 ctrl->stats.sw_q_depth--; in rsxx_issue_dmas()
409 spin_unlock_bh(&ctrl->queue_lock); in rsxx_issue_dmas()
416 if (unlikely(ctrl->card->dma_fault)) { in rsxx_issue_dmas()
417 push_tracker(ctrl->trackers, tag); in rsxx_issue_dmas()
422 if (dma->cmd != HW_CMD_BLK_DISCARD) { in rsxx_issue_dmas()
423 if (dma->cmd == HW_CMD_BLK_WRITE) in rsxx_issue_dmas()
434 * Non-HWWD PCIe slot. This way the dma_map_page in rsxx_issue_dmas()
438 dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page, in rsxx_issue_dmas()
439 dma->pg_off, dma->sub_page.cnt << 9, dir); in rsxx_issue_dmas()
440 if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) { in rsxx_issue_dmas()
441 push_tracker(ctrl->trackers, tag); in rsxx_issue_dmas()
447 set_tracker_dma(ctrl->trackers, tag, dma); in rsxx_issue_dmas()
448 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; in rsxx_issue_dmas()
449 hw_cmd_buf[ctrl->cmd.idx].tag = tag; in rsxx_issue_dmas()
450 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; in rsxx_issue_dmas()
451 hw_cmd_buf[ctrl->cmd.idx].sub_page = in rsxx_issue_dmas()
452 ((dma->sub_page.cnt & 0x7) << 4) | in rsxx_issue_dmas()
453 (dma->sub_page.off & 0x7); in rsxx_issue_dmas()
455 hw_cmd_buf[ctrl->cmd.idx].device_addr = in rsxx_issue_dmas()
456 cpu_to_le32(dma->laddr); in rsxx_issue_dmas()
458 hw_cmd_buf[ctrl->cmd.idx].host_addr = in rsxx_issue_dmas()
459 cpu_to_le64(dma->dma_addr); in rsxx_issue_dmas()
461 dev_dbg(CARD_TO_DEV(ctrl->card), in rsxx_issue_dmas()
463 ctrl->id, dma->laddr, tag, ctrl->cmd.idx); in rsxx_issue_dmas()
465 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; in rsxx_issue_dmas()
468 if (dma->cmd == HW_CMD_BLK_WRITE) in rsxx_issue_dmas()
469 ctrl->stats.writes_issued++; in rsxx_issue_dmas()
470 else if (dma->cmd == HW_CMD_BLK_DISCARD) in rsxx_issue_dmas()
471 ctrl->stats.discards_issued++; in rsxx_issue_dmas()
473 ctrl->stats.reads_issued++; in rsxx_issue_dmas()
478 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); in rsxx_issue_dmas()
479 mod_timer(&ctrl->activity_timer, in rsxx_issue_dmas()
482 if (unlikely(ctrl->card->eeh_state)) { in rsxx_issue_dmas()
483 del_timer_sync(&ctrl->activity_timer); in rsxx_issue_dmas()
487 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); in rsxx_issue_dmas()
500 hw_st_buf = ctrl->status.buf; in rsxx_dma_done()
502 if (unlikely(ctrl->card->halt) || in rsxx_dma_done()
503 unlikely(ctrl->card->dma_fault) || in rsxx_dma_done()
504 unlikely(ctrl->card->eeh_state)) in rsxx_dma_done()
507 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); in rsxx_dma_done()
509 while (count == ctrl->e_cnt) { in rsxx_dma_done()
511 * The read memory-barrier is necessary to keep aggressive in rsxx_dma_done()
513 * reordering the following status-buffer tag & status read in rsxx_dma_done()
519 status = hw_st_buf[ctrl->status.idx].status; in rsxx_dma_done()
520 tag = hw_st_buf[ctrl->status.idx].tag; in rsxx_dma_done()
522 dma = get_tracker_dma(ctrl->trackers, tag); in rsxx_dma_done()
524 spin_lock_irqsave(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
525 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); in rsxx_dma_done()
526 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
528 dev_err(CARD_TO_DEV(ctrl->card), in rsxx_dma_done()
531 tag, ctrl->status.idx, ctrl->id); in rsxx_dma_done()
535 dev_dbg(CARD_TO_DEV(ctrl->card), in rsxx_dma_done()
538 ctrl->id, dma->laddr, tag, status, count, in rsxx_dma_done()
539 ctrl->status.idx); in rsxx_dma_done()
541 atomic_dec(&ctrl->stats.hw_q_depth); in rsxx_dma_done()
543 mod_timer(&ctrl->activity_timer, in rsxx_dma_done()
551 push_tracker(ctrl->trackers, tag); in rsxx_dma_done()
553 ctrl->status.idx = (ctrl->status.idx + 1) & in rsxx_dma_done()
555 ctrl->e_cnt++; in rsxx_dma_done()
557 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); in rsxx_dma_done()
560 dma_intr_coal_auto_tune(ctrl->card); in rsxx_dma_done()
562 if (atomic_read(&ctrl->stats.hw_q_depth) == 0) in rsxx_dma_done()
563 del_timer_sync(&ctrl->activity_timer); in rsxx_dma_done()
565 spin_lock_irqsave(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
566 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); in rsxx_dma_done()
567 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); in rsxx_dma_done()
569 spin_lock_bh(&ctrl->queue_lock); in rsxx_dma_done()
570 if (ctrl->stats.sw_q_depth) in rsxx_dma_done()
571 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); in rsxx_dma_done()
572 spin_unlock_bh(&ctrl->queue_lock); in rsxx_dma_done()
581 mutex_lock(&ctrl->work_lock); in rsxx_schedule_issue()
583 mutex_unlock(&ctrl->work_lock); in rsxx_schedule_issue()
592 mutex_lock(&ctrl->work_lock); in rsxx_schedule_done()
594 mutex_unlock(&ctrl->work_lock); in rsxx_schedule_done()
609 dma->cmd = HW_CMD_BLK_DISCARD; in rsxx_queue_discard()
610 dma->laddr = laddr; in rsxx_queue_discard()
611 dma->dma_addr = 0; in rsxx_queue_discard()
612 dma->sub_page.off = 0; in rsxx_queue_discard()
613 dma->sub_page.cnt = 0; in rsxx_queue_discard()
614 dma->page = NULL; in rsxx_queue_discard()
615 dma->pg_off = 0; in rsxx_queue_discard()
616 dma->cb = cb; in rsxx_queue_discard()
617 dma->cb_data = cb_data; in rsxx_queue_discard()
619 dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr); in rsxx_queue_discard()
621 list_add_tail(&dma->list, q); in rsxx_queue_discard()
643 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; in rsxx_queue_dma()
644 dma->laddr = laddr; in rsxx_queue_dma()
645 dma->sub_page.off = (dma_off >> 9); in rsxx_queue_dma()
646 dma->sub_page.cnt = (dma_len >> 9); in rsxx_queue_dma()
647 dma->page = page; in rsxx_queue_dma()
648 dma->pg_off = pg_off; in rsxx_queue_dma()
649 dma->cb = cb; in rsxx_queue_dma()
650 dma->cb_data = cb_data; in rsxx_queue_dma()
654 dir ? 'W' : 'R', dma->laddr, dma->sub_page.off, in rsxx_queue_dma()
655 dma->sub_page.cnt, dma->page, dma->pg_off); in rsxx_queue_dma()
658 list_add_tail(&dma->list, q); in rsxx_queue_dma()
683 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ in rsxx_dma_queue_bio()
686 for (i = 0; i < card->n_targets; i++) { in rsxx_dma_queue_bio()
692 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio()
706 bv_len -= RSXX_HW_BLK_SIZE; in rsxx_dma_queue_bio()
718 RSXX_HW_BLK_SIZE - dma_off); in rsxx_dma_queue_bio()
732 bv_len -= dma_len; in rsxx_dma_queue_bio()
737 for (i = 0; i < card->n_targets; i++) { in rsxx_dma_queue_bio()
739 spin_lock_bh(&card->ctrl[i].queue_lock); in rsxx_dma_queue_bio()
740 card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; in rsxx_dma_queue_bio()
741 list_splice_tail(&dma_list[i], &card->ctrl[i].queue); in rsxx_dma_queue_bio()
742 spin_unlock_bh(&card->ctrl[i].queue_lock); in rsxx_dma_queue_bio()
744 queue_work(card->ctrl[i].issue_wq, in rsxx_dma_queue_bio()
745 &card->ctrl[i].issue_dma_work); in rsxx_dma_queue_bio()
752 for (i = 0; i < card->n_targets; i++) in rsxx_dma_queue_bio()
753 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], in rsxx_dma_queue_bio()
759 /*----------------- DMA Engine Initialization & Setup -------------------*/
762 ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8, in rsxx_hw_buffers_init()
763 &ctrl->status.dma_addr, GFP_KERNEL); in rsxx_hw_buffers_init()
764 ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8, in rsxx_hw_buffers_init()
765 &ctrl->cmd.dma_addr, GFP_KERNEL); in rsxx_hw_buffers_init()
766 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) in rsxx_hw_buffers_init()
767 return -ENOMEM; in rsxx_hw_buffers_init()
769 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); in rsxx_hw_buffers_init()
770 iowrite32(lower_32_bits(ctrl->status.dma_addr), in rsxx_hw_buffers_init()
771 ctrl->regmap + SB_ADD_LO); in rsxx_hw_buffers_init()
772 iowrite32(upper_32_bits(ctrl->status.dma_addr), in rsxx_hw_buffers_init()
773 ctrl->regmap + SB_ADD_HI); in rsxx_hw_buffers_init()
775 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); in rsxx_hw_buffers_init()
776 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); in rsxx_hw_buffers_init()
777 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); in rsxx_hw_buffers_init()
779 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); in rsxx_hw_buffers_init()
780 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { in rsxx_hw_buffers_init()
781 dev_crit(&dev->dev, "Failed reading status cnt x%x\n", in rsxx_hw_buffers_init()
782 ctrl->status.idx); in rsxx_hw_buffers_init()
783 return -EINVAL; in rsxx_hw_buffers_init()
785 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); in rsxx_hw_buffers_init()
786 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); in rsxx_hw_buffers_init()
788 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); in rsxx_hw_buffers_init()
789 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { in rsxx_hw_buffers_init()
790 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", in rsxx_hw_buffers_init()
791 ctrl->status.idx); in rsxx_hw_buffers_init()
792 return -EINVAL; in rsxx_hw_buffers_init()
794 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); in rsxx_hw_buffers_init()
795 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); in rsxx_hw_buffers_init()
806 memset(&ctrl->stats, 0, sizeof(ctrl->stats)); in rsxx_dma_ctrl_init()
808 ctrl->trackers = vmalloc(struct_size(ctrl->trackers, list, in rsxx_dma_ctrl_init()
810 if (!ctrl->trackers) in rsxx_dma_ctrl_init()
811 return -ENOMEM; in rsxx_dma_ctrl_init()
813 ctrl->trackers->head = 0; in rsxx_dma_ctrl_init()
815 ctrl->trackers->list[i].next_tag = i + 1; in rsxx_dma_ctrl_init()
816 ctrl->trackers->list[i].dma = NULL; in rsxx_dma_ctrl_init()
818 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; in rsxx_dma_ctrl_init()
819 spin_lock_init(&ctrl->trackers->lock); in rsxx_dma_ctrl_init()
821 spin_lock_init(&ctrl->queue_lock); in rsxx_dma_ctrl_init()
822 mutex_init(&ctrl->work_lock); in rsxx_dma_ctrl_init()
823 INIT_LIST_HEAD(&ctrl->queue); in rsxx_dma_ctrl_init()
825 timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0); in rsxx_dma_ctrl_init()
827 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); in rsxx_dma_ctrl_init()
828 if (!ctrl->issue_wq) in rsxx_dma_ctrl_init()
829 return -ENOMEM; in rsxx_dma_ctrl_init()
831 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); in rsxx_dma_ctrl_init()
832 if (!ctrl->done_wq) in rsxx_dma_ctrl_init()
833 return -ENOMEM; in rsxx_dma_ctrl_init()
835 INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); in rsxx_dma_ctrl_init()
836 INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); in rsxx_dma_ctrl_init()
851 return -EINVAL; in rsxx_dma_stripe_setup()
854 card->_stripe.lower_mask = stripe_size8 - 1; in rsxx_dma_stripe_setup()
856 card->_stripe.upper_mask = ~(card->_stripe.lower_mask); in rsxx_dma_stripe_setup()
857 card->_stripe.upper_shift = ffs(card->n_targets) - 1; in rsxx_dma_stripe_setup()
859 card->_stripe.target_mask = card->n_targets - 1; in rsxx_dma_stripe_setup()
860 card->_stripe.target_shift = ffs(stripe_size8) - 1; in rsxx_dma_stripe_setup()
863 card->_stripe.lower_mask); in rsxx_dma_stripe_setup()
865 card->_stripe.upper_shift); in rsxx_dma_stripe_setup()
867 card->_stripe.upper_mask); in rsxx_dma_stripe_setup()
869 card->_stripe.target_mask); in rsxx_dma_stripe_setup()
871 card->_stripe.target_shift); in rsxx_dma_stripe_setup()
880 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, in rsxx_dma_configure()
881 card->config.data.intr_coal.count, in rsxx_dma_configure()
882 card->config.data.intr_coal.latency); in rsxx_dma_configure()
883 iowrite32(intr_coal, card->regmap + INTR_COAL); in rsxx_dma_configure()
885 return rsxx_dma_stripe_setup(card, card->config.data.stripe_size); in rsxx_dma_configure()
896 card->n_targets); in rsxx_dma_setup()
899 for (i = 0; i < card->n_targets; i++) in rsxx_dma_setup()
900 card->ctrl[i].regmap = card->regmap + (i * 4096); in rsxx_dma_setup()
902 card->dma_fault = 0; in rsxx_dma_setup()
908 for (i = 0; i < card->n_targets; i++) { in rsxx_dma_setup()
909 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); in rsxx_dma_setup()
913 card->ctrl[i].card = card; in rsxx_dma_setup()
914 card->ctrl[i].id = i; in rsxx_dma_setup()
917 card->scrub_hard = 1; in rsxx_dma_setup()
919 if (card->config_valid) in rsxx_dma_setup()
923 for (i = 0; i < card->n_targets; i++) { in rsxx_dma_setup()
924 spin_lock_irqsave(&card->irq_lock, flags); in rsxx_dma_setup()
926 spin_unlock_irqrestore(&card->irq_lock, flags); in rsxx_dma_setup()
932 for (i = 0; i < card->n_targets; i++) { in rsxx_dma_setup()
933 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; in rsxx_dma_setup()
935 if (ctrl->issue_wq) { in rsxx_dma_setup()
936 destroy_workqueue(ctrl->issue_wq); in rsxx_dma_setup()
937 ctrl->issue_wq = NULL; in rsxx_dma_setup()
940 if (ctrl->done_wq) { in rsxx_dma_setup()
941 destroy_workqueue(ctrl->done_wq); in rsxx_dma_setup()
942 ctrl->done_wq = NULL; in rsxx_dma_setup()
945 vfree(ctrl->trackers); in rsxx_dma_setup()
947 if (ctrl->status.buf) in rsxx_dma_setup()
948 dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8, in rsxx_dma_setup()
949 ctrl->status.buf, in rsxx_dma_setup()
950 ctrl->status.dma_addr); in rsxx_dma_setup()
951 if (ctrl->cmd.buf) in rsxx_dma_setup()
952 dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8, in rsxx_dma_setup()
953 ctrl->cmd.buf, ctrl->cmd.dma_addr); in rsxx_dma_setup()
967 dma = get_tracker_dma(ctrl->trackers, i); in rsxx_dma_cancel()
969 atomic_dec(&ctrl->stats.hw_q_depth); in rsxx_dma_cancel()
971 push_tracker(ctrl->trackers, i); in rsxx_dma_cancel()
984 for (i = 0; i < card->n_targets; i++) { in rsxx_dma_destroy()
985 ctrl = &card->ctrl[i]; in rsxx_dma_destroy()
987 if (ctrl->issue_wq) { in rsxx_dma_destroy()
988 destroy_workqueue(ctrl->issue_wq); in rsxx_dma_destroy()
989 ctrl->issue_wq = NULL; in rsxx_dma_destroy()
992 if (ctrl->done_wq) { in rsxx_dma_destroy()
993 destroy_workqueue(ctrl->done_wq); in rsxx_dma_destroy()
994 ctrl->done_wq = NULL; in rsxx_dma_destroy()
997 if (timer_pending(&ctrl->activity_timer)) in rsxx_dma_destroy()
998 del_timer_sync(&ctrl->activity_timer); in rsxx_dma_destroy()
1001 spin_lock_bh(&ctrl->queue_lock); in rsxx_dma_destroy()
1002 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); in rsxx_dma_destroy()
1003 spin_unlock_bh(&ctrl->queue_lock); in rsxx_dma_destroy()
1007 vfree(ctrl->trackers); in rsxx_dma_destroy()
1009 dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8, in rsxx_dma_destroy()
1010 ctrl->status.buf, ctrl->status.dma_addr); in rsxx_dma_destroy()
1011 dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8, in rsxx_dma_destroy()
1012 ctrl->cmd.buf, ctrl->cmd.dma_addr); in rsxx_dma_destroy()
1024 issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas), in rsxx_eeh_save_issued_dmas()
1027 return -ENOMEM; in rsxx_eeh_save_issued_dmas()
1029 for (i = 0; i < card->n_targets; i++) { in rsxx_eeh_save_issued_dmas()
1033 dma = get_tracker_dma(card->ctrl[i].trackers, j); in rsxx_eeh_save_issued_dmas()
1037 if (dma->cmd == HW_CMD_BLK_WRITE) in rsxx_eeh_save_issued_dmas()
1038 card->ctrl[i].stats.writes_issued--; in rsxx_eeh_save_issued_dmas()
1039 else if (dma->cmd == HW_CMD_BLK_DISCARD) in rsxx_eeh_save_issued_dmas()
1040 card->ctrl[i].stats.discards_issued--; in rsxx_eeh_save_issued_dmas()
1042 card->ctrl[i].stats.reads_issued--; in rsxx_eeh_save_issued_dmas()
1044 if (dma->cmd != HW_CMD_BLK_DISCARD) { in rsxx_eeh_save_issued_dmas()
1045 dma_unmap_page(&card->dev->dev, dma->dma_addr, in rsxx_eeh_save_issued_dmas()
1047 dma->cmd == HW_CMD_BLK_WRITE ? in rsxx_eeh_save_issued_dmas()
1052 list_add_tail(&dma->list, &issued_dmas[i]); in rsxx_eeh_save_issued_dmas()
1053 push_tracker(card->ctrl[i].trackers, j); in rsxx_eeh_save_issued_dmas()
1057 spin_lock_bh(&card->ctrl[i].queue_lock); in rsxx_eeh_save_issued_dmas()
1058 list_splice(&issued_dmas[i], &card->ctrl[i].queue); in rsxx_eeh_save_issued_dmas()
1060 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); in rsxx_eeh_save_issued_dmas()
1061 card->ctrl[i].stats.sw_q_depth += cnt; in rsxx_eeh_save_issued_dmas()
1062 card->ctrl[i].e_cnt = 0; in rsxx_eeh_save_issued_dmas()
1063 spin_unlock_bh(&card->ctrl[i].queue_lock); in rsxx_eeh_save_issued_dmas()
1075 return -ENOMEM; in rsxx_dma_init()