Lines Matching refs:skdev

284 static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)  in skd_reg_read32()  argument
286 u32 val = readl(skdev->mem_map[1] + offset); in skd_reg_read32()
288 if (unlikely(skdev->dbg_level >= 2)) in skd_reg_read32()
289 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); in skd_reg_read32()
293 static inline void skd_reg_write32(struct skd_device *skdev, u32 val, in skd_reg_write32() argument
296 writel(val, skdev->mem_map[1] + offset); in skd_reg_write32()
297 if (unlikely(skdev->dbg_level >= 2)) in skd_reg_write32()
298 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); in skd_reg_write32()
301 static inline void skd_reg_write64(struct skd_device *skdev, u64 val, in skd_reg_write64() argument
304 writeq(val, skdev->mem_map[1] + offset); in skd_reg_write64()
305 if (unlikely(skdev->dbg_level >= 2)) in skd_reg_write64()
306 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset, in skd_reg_write64()
355 static void skd_destruct(struct skd_device *skdev);
357 static void skd_send_fitmsg(struct skd_device *skdev,
359 static void skd_send_special_fitmsg(struct skd_device *skdev,
361 static bool skd_preop_sg_list(struct skd_device *skdev,
363 static void skd_postop_sg_list(struct skd_device *skdev,
366 static void skd_restart_device(struct skd_device *skdev);
367 static int skd_quiesce_dev(struct skd_device *skdev);
368 static int skd_unquiesce_dev(struct skd_device *skdev);
369 static void skd_disable_interrupts(struct skd_device *skdev);
370 static void skd_isr_fwstate(struct skd_device *skdev);
371 static void skd_recover_requests(struct skd_device *skdev);
372 static void skd_soft_reset(struct skd_device *skdev);
376 static void skd_log_skdev(struct skd_device *skdev, const char *event);
377 static void skd_log_skreq(struct skd_device *skdev,
392 static int skd_in_flight(struct skd_device *skdev) in skd_in_flight() argument
396 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count); in skd_in_flight()
445 struct skd_device *skdev = q->queuedata; in skd_fail_all() local
447 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); in skd_fail_all()
449 skd_log_skdev(skdev, "req_not_online"); in skd_fail_all()
450 switch (skdev->state) { in skd_fail_all()
483 struct skd_device *skdev = q->queuedata; in skd_mq_queue_rq() local
494 if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE)) in skd_mq_queue_rq()
504 dev_dbg(&skdev->pdev->dev, in skd_mq_queue_rq()
517 if (req->bio && !skd_preop_sg_list(skdev, skreq)) { in skd_mq_queue_rq()
518 dev_dbg(&skdev->pdev->dev, "error Out\n"); in skd_mq_queue_rq()
524 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address, in skd_mq_queue_rq()
533 spin_lock_irqsave(&skdev->lock, flags); in skd_mq_queue_rq()
534 skmsg = skdev->skmsg; in skd_mq_queue_rq()
537 skmsg = &skdev->skmsg_table[tag]; in skd_mq_queue_rq()
538 skdev->skmsg = skmsg; in skd_mq_queue_rq()
576 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id, in skd_mq_queue_rq()
577 skd_in_flight(skdev)); in skd_mq_queue_rq()
583 skd_send_fitmsg(skdev, skmsg); in skd_mq_queue_rq()
587 skd_send_fitmsg(skdev, skmsg); in skd_mq_queue_rq()
588 skdev->skmsg = NULL; in skd_mq_queue_rq()
590 spin_unlock_irqrestore(&skdev->lock, flags); in skd_mq_queue_rq()
599 struct skd_device *skdev = req->q->queuedata; in skd_timed_out() local
601 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n", in skd_timed_out()
614 static bool skd_preop_sg_list(struct skd_device *skdev, in skd_preop_sg_list() argument
627 n_sg = blk_rq_map_sg(skdev->queue, req, sgl); in skd_preop_sg_list()
635 n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir); in skd_preop_sg_list()
639 SKD_ASSERT(n_sg <= skdev->sgs_per_request); in skd_preop_sg_list()
658 if (unlikely(skdev->dbg_level > 1)) { in skd_preop_sg_list()
659 dev_dbg(&skdev->pdev->dev, in skd_preop_sg_list()
665 dev_dbg(&skdev->pdev->dev, in skd_preop_sg_list()
675 static void skd_postop_sg_list(struct skd_device *skdev, in skd_postop_sg_list() argument
685 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir); in skd_postop_sg_list()
694 static void skd_timer_tick_not_online(struct skd_device *skdev);
698 struct skd_device *skdev = container_of(work, typeof(*skdev), in skd_start_queue() local
706 blk_mq_start_hw_queues(skdev->queue); in skd_start_queue()
711 struct skd_device *skdev = from_timer(skdev, t, timer); in skd_timer_tick() local
715 if (skdev->state == SKD_DRVR_STATE_FAULT) in skd_timer_tick()
721 spin_lock_irqsave(&skdev->lock, reqflags); in skd_timer_tick()
723 state = SKD_READL(skdev, FIT_STATUS); in skd_timer_tick()
725 if (state != skdev->drive_state) in skd_timer_tick()
726 skd_isr_fwstate(skdev); in skd_timer_tick()
728 if (skdev->state != SKD_DRVR_STATE_ONLINE) in skd_timer_tick()
729 skd_timer_tick_not_online(skdev); in skd_timer_tick()
731 mod_timer(&skdev->timer, (jiffies + HZ)); in skd_timer_tick()
733 spin_unlock_irqrestore(&skdev->lock, reqflags); in skd_timer_tick()
736 static void skd_timer_tick_not_online(struct skd_device *skdev) in skd_timer_tick_not_online() argument
738 switch (skdev->state) { in skd_timer_tick_not_online()
743 dev_dbg(&skdev->pdev->dev, in skd_timer_tick_not_online()
745 skdev->drive_state, skdev->state); in skd_timer_tick_not_online()
749 if (skdev->timer_countdown > 0) { in skd_timer_tick_not_online()
750 skdev->timer_countdown--; in skd_timer_tick_not_online()
753 skd_recover_requests(skdev); in skd_timer_tick_not_online()
759 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n", in skd_timer_tick_not_online()
760 skdev->state, skdev->timer_countdown); in skd_timer_tick_not_online()
761 if (skdev->timer_countdown > 0) { in skd_timer_tick_not_online()
762 skdev->timer_countdown--; in skd_timer_tick_not_online()
765 dev_dbg(&skdev->pdev->dev, in skd_timer_tick_not_online()
767 skdev->state, skdev->timer_countdown); in skd_timer_tick_not_online()
768 skd_restart_device(skdev); in skd_timer_tick_not_online()
773 if (skdev->timer_countdown > 0) { in skd_timer_tick_not_online()
774 skdev->timer_countdown--; in skd_timer_tick_not_online()
779 skdev->state = SKD_DRVR_STATE_FAULT; in skd_timer_tick_not_online()
781 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n", in skd_timer_tick_not_online()
782 skdev->drive_state); in skd_timer_tick_not_online()
786 schedule_work(&skdev->start_queue); in skd_timer_tick_not_online()
787 skdev->gendisk_on = -1; in skd_timer_tick_not_online()
788 wake_up_interruptible(&skdev->waitq); in skd_timer_tick_not_online()
800 if (skdev->timer_countdown > 0) { in skd_timer_tick_not_online()
801 skdev->timer_countdown--; in skd_timer_tick_not_online()
806 skdev->state = SKD_DRVR_STATE_FAULT; in skd_timer_tick_not_online()
807 dev_err(&skdev->pdev->dev, in skd_timer_tick_not_online()
809 skdev->drive_state); in skd_timer_tick_not_online()
823 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || in skd_timer_tick_not_online()
824 (skdev->drive_state == FIT_SR_DRIVE_FAULT) || in skd_timer_tick_not_online()
825 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) in skd_timer_tick_not_online()
829 skd_recover_requests(skdev); in skd_timer_tick_not_online()
831 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n", in skd_timer_tick_not_online()
832 skdev->drive_state); in skd_timer_tick_not_online()
833 pci_disable_device(skdev->pdev); in skd_timer_tick_not_online()
834 skd_disable_interrupts(skdev); in skd_timer_tick_not_online()
835 skd_recover_requests(skdev); in skd_timer_tick_not_online()
840 schedule_work(&skdev->start_queue); in skd_timer_tick_not_online()
841 skdev->gendisk_on = -1; in skd_timer_tick_not_online()
842 wake_up_interruptible(&skdev->waitq); in skd_timer_tick_not_online()
855 static int skd_start_timer(struct skd_device *skdev) in skd_start_timer() argument
859 timer_setup(&skdev->timer, skd_timer_tick, 0); in skd_start_timer()
861 rc = mod_timer(&skdev->timer, (jiffies + HZ)); in skd_start_timer()
863 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc); in skd_start_timer()
867 static void skd_kill_timer(struct skd_device *skdev) in skd_kill_timer() argument
869 del_timer_sync(&skdev->timer); in skd_kill_timer()
878 static int skd_format_internal_skspcl(struct skd_device *skdev) in skd_format_internal_skspcl() argument
880 struct skd_special_context *skspcl = &skdev->internal_skspcl; in skd_format_internal_skspcl()
906 static void skd_send_internal_skspcl(struct skd_device *skdev, in skd_send_internal_skspcl() argument
984 skd_send_special_fitmsg(skdev, skspcl); in skd_send_internal_skspcl()
987 static void skd_refresh_device_data(struct skd_device *skdev) in skd_refresh_device_data() argument
989 struct skd_special_context *skspcl = &skdev->internal_skspcl; in skd_refresh_device_data()
991 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); in skd_refresh_device_data()
994 static int skd_chk_read_buf(struct skd_device *skdev, in skd_chk_read_buf() argument
1008 static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, in skd_log_check_status() argument
1014 dev_err(&skdev->pdev->dev, in skd_log_check_status()
1020 static void skd_complete_internal(struct skd_device *skdev, in skd_complete_internal() argument
1030 lockdep_assert_held(&skdev->lock); in skd_complete_internal()
1032 SKD_ASSERT(skspcl == &skdev->internal_skspcl); in skd_complete_internal()
1034 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]); in skd_complete_internal()
1036 dma_sync_single_for_cpu(&skdev->pdev->dev, in skd_complete_internal()
1046 skd_log_check_status(skdev, status, skerr->key, skerr->code, in skd_complete_internal()
1052 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); in skd_complete_internal()
1055 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); in skd_complete_internal()
1057 if (skdev->state == SKD_DRVR_STATE_STOPPING) { in skd_complete_internal()
1058 dev_dbg(&skdev->pdev->dev, in skd_complete_internal()
1060 skdev->state); in skd_complete_internal()
1063 dev_dbg(&skdev->pdev->dev, in skd_complete_internal()
1065 skd_send_internal_skspcl(skdev, skspcl, in skd_complete_internal()
1072 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); in skd_complete_internal()
1074 if (skdev->state == SKD_DRVR_STATE_STOPPING) { in skd_complete_internal()
1075 dev_dbg(&skdev->pdev->dev, in skd_complete_internal()
1077 skdev->state); in skd_complete_internal()
1080 dev_dbg(&skdev->pdev->dev, in skd_complete_internal()
1082 skd_send_internal_skspcl(skdev, skspcl, in skd_complete_internal()
1089 if (skd_chk_read_buf(skdev, skspcl) == 0) in skd_complete_internal()
1090 skd_send_internal_skspcl(skdev, skspcl, in skd_complete_internal()
1093 dev_err(&skdev->pdev->dev, in skd_complete_internal()
1095 skdev->connect_retries); in skd_complete_internal()
1096 if (skdev->connect_retries < in skd_complete_internal()
1098 skdev->connect_retries++; in skd_complete_internal()
1099 skd_soft_reset(skdev); in skd_complete_internal()
1101 dev_err(&skdev->pdev->dev, in skd_complete_internal()
1108 if (skdev->state == SKD_DRVR_STATE_STOPPING) { in skd_complete_internal()
1109 dev_dbg(&skdev->pdev->dev, in skd_complete_internal()
1111 skdev->state); in skd_complete_internal()
1114 dev_dbg(&skdev->pdev->dev, in skd_complete_internal()
1116 skd_send_internal_skspcl(skdev, skspcl, in skd_complete_internal()
1122 skdev->read_cap_is_valid = 0; in skd_complete_internal()
1124 skdev->read_cap_last_lba = in skd_complete_internal()
1127 skdev->read_cap_blocksize = in skd_complete_internal()
1131 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n", in skd_complete_internal()
1132 skdev->read_cap_last_lba, in skd_complete_internal()
1133 skdev->read_cap_blocksize); in skd_complete_internal()
1135 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); in skd_complete_internal()
1137 skdev->read_cap_is_valid = 1; in skd_complete_internal()
1139 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); in skd_complete_internal()
1142 skdev->read_cap_last_lba = ~0; in skd_complete_internal()
1143 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); in skd_complete_internal()
1144 …dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue … in skd_complete_internal()
1145 skd_send_internal_skspcl(skdev, skspcl, INQUIRY); in skd_complete_internal()
1147 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n"); in skd_complete_internal()
1148 skd_send_internal_skspcl(skdev, skspcl, in skd_complete_internal()
1154 skdev->inquiry_is_valid = 0; in skd_complete_internal()
1156 skdev->inquiry_is_valid = 1; in skd_complete_internal()
1159 skdev->inq_serial_num[i] = buf[i + 4]; in skd_complete_internal()
1160 skdev->inq_serial_num[12] = 0; in skd_complete_internal()
1163 if (skd_unquiesce_dev(skdev) < 0) in skd_complete_internal()
1164 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n"); in skd_complete_internal()
1166 skdev->connect_retries = 0; in skd_complete_internal()
1171 skdev->sync_done = 1; in skd_complete_internal()
1173 skdev->sync_done = -1; in skd_complete_internal()
1174 wake_up_interruptible(&skdev->waitq); in skd_complete_internal()
1188 static void skd_send_fitmsg(struct skd_device *skdev, in skd_send_fitmsg() argument
1193 dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n", in skd_send_fitmsg()
1194 &skmsg->mb_dma_address, skd_in_flight(skdev)); in skd_send_fitmsg()
1195 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); in skd_send_fitmsg()
1200 if (unlikely(skdev->dbg_level > 1)) { in skd_send_fitmsg()
1204 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i, in skd_send_fitmsg()
1225 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address, in skd_send_fitmsg()
1231 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); in skd_send_fitmsg()
1234 static void skd_send_special_fitmsg(struct skd_device *skdev, in skd_send_special_fitmsg() argument
1241 if (unlikely(skdev->dbg_level > 1)) { in skd_send_special_fitmsg()
1246 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i, in skd_send_special_fitmsg()
1252 dev_dbg(&skdev->pdev->dev, in skd_send_special_fitmsg()
1260 dev_dbg(&skdev->pdev->dev, in skd_send_special_fitmsg()
1274 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address, in skd_send_special_fitmsg()
1276 dma_sync_single_for_device(&skdev->pdev->dev, in skd_send_special_fitmsg()
1280 dma_sync_single_for_device(&skdev->pdev->dev, in skd_send_special_fitmsg()
1288 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); in skd_send_special_fitmsg()
1297 static void skd_complete_other(struct skd_device *skdev,
1347 skd_check_status(struct skd_device *skdev, in skd_check_status() argument
1352 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", in skd_check_status()
1355 dev_dbg(&skdev->pdev->dev, in skd_check_status()
1385 dev_err(&skdev->pdev->dev, in skd_check_status()
1396 dev_dbg(&skdev->pdev->dev, "status check: error\n"); in skd_check_status()
1400 dev_dbg(&skdev->pdev->dev, "status check good default\n"); in skd_check_status()
1404 static void skd_resolve_req_exception(struct skd_device *skdev, in skd_resolve_req_exception() argument
1410 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { in skd_resolve_req_exception()
1418 skd_log_skreq(skdev, skreq, "retry(busy)"); in skd_resolve_req_exception()
1419 blk_requeue_request(skdev->queue, req); in skd_resolve_req_exception()
1420 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); in skd_resolve_req_exception()
1421 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; in skd_resolve_req_exception()
1422 skdev->timer_countdown = SKD_TIMER_MINUTES(20); in skd_resolve_req_exception()
1423 skd_quiesce_dev(skdev); in skd_resolve_req_exception()
1428 skd_log_skreq(skdev, skreq, "retry"); in skd_resolve_req_exception()
1429 blk_requeue_request(skdev->queue, req); in skd_resolve_req_exception()
1442 static void skd_release_skreq(struct skd_device *skdev, in skd_release_skreq() argument
1451 static int skd_isr_completion_posted(struct skd_device *skdev, in skd_isr_completion_posted() argument
1468 lockdep_assert_held(&skdev->lock); in skd_isr_completion_posted()
1471 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); in skd_isr_completion_posted()
1473 skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; in skd_isr_completion_posted()
1479 skerr = &skdev->skerr_table[skdev->skcomp_ix]; in skd_isr_completion_posted()
1481 dev_dbg(&skdev->pdev->dev, in skd_isr_completion_posted()
1483 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle, in skd_isr_completion_posted()
1484 cmp_cntxt, cmp_status, skd_in_flight(skdev), in skd_isr_completion_posted()
1485 cmp_bytes, skdev->proto_ver); in skd_isr_completion_posted()
1487 if (cmp_cycle != skdev->skcomp_cycle) { in skd_isr_completion_posted()
1488 dev_dbg(&skdev->pdev->dev, "end of completions\n"); in skd_isr_completion_posted()
1495 skdev->skcomp_ix++; in skd_isr_completion_posted()
1496 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { in skd_isr_completion_posted()
1497 skdev->skcomp_ix = 0; in skd_isr_completion_posted()
1498 skdev->skcomp_cycle++; in skd_isr_completion_posted()
1510 if (tag >= skdev->num_req_context) { in skd_isr_completion_posted()
1514 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], in skd_isr_completion_posted()
1516 skd_complete_other(skdev, skcmp, skerr); in skd_isr_completion_posted()
1520 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); in skd_isr_completion_posted()
1530 dev_err(&skdev->pdev->dev, in skd_isr_completion_posted()
1542 skd_log_check_status(skdev, cmp_status, skerr->key, in skd_isr_completion_posted()
1548 skd_postop_sg_list(skdev, skreq); in skd_isr_completion_posted()
1550 skd_release_skreq(skdev, skreq); in skd_isr_completion_posted()
1559 skd_resolve_req_exception(skdev, skreq, rq); in skd_isr_completion_posted()
1571 if (skdev->state == SKD_DRVR_STATE_PAUSING && in skd_isr_completion_posted()
1572 skd_in_flight(skdev) == 0) { in skd_isr_completion_posted()
1573 skdev->state = SKD_DRVR_STATE_PAUSED; in skd_isr_completion_posted()
1574 wake_up_interruptible(&skdev->waitq); in skd_isr_completion_posted()
1580 static void skd_complete_other(struct skd_device *skdev, in skd_complete_other() argument
1589 lockdep_assert_held(&skdev->lock); in skd_complete_other()
1595 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table, in skd_complete_other()
1614 skspcl = &skdev->internal_skspcl; in skd_complete_other()
1617 skd_complete_internal(skdev, in skd_complete_other()
1642 static void skd_reset_skcomp(struct skd_device *skdev) in skd_reset_skcomp() argument
1644 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE); in skd_reset_skcomp()
1646 skdev->skcomp_ix = 0; in skd_reset_skcomp()
1647 skdev->skcomp_cycle = 1; in skd_reset_skcomp()
1657 struct skd_device *skdev = in skd_completion_worker() local
1662 spin_lock_irqsave(&skdev->lock, flags); in skd_completion_worker()
1668 skd_isr_completion_posted(skdev, 0, &flush_enqueued); in skd_completion_worker()
1669 schedule_work(&skdev->start_queue); in skd_completion_worker()
1671 spin_unlock_irqrestore(&skdev->lock, flags); in skd_completion_worker()
1674 static void skd_isr_msg_from_dev(struct skd_device *skdev);
1679 struct skd_device *skdev = ptr; in skd_isr() local
1686 spin_lock(&skdev->lock); in skd_isr()
1689 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); in skd_isr()
1694 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat, in skd_isr()
1706 if (likely (skdev->state in skd_isr()
1714 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); in skd_isr()
1716 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && in skd_isr()
1717 (skdev->state != SKD_DRVR_STATE_STOPPING))) { in skd_isr()
1725 skd_isr_completion_posted(skdev, in skd_isr()
1730 skd_isr_fwstate(skdev); in skd_isr()
1731 if (skdev->state == SKD_DRVR_STATE_FAULT || in skd_isr()
1732 skdev->state == in skd_isr()
1734 spin_unlock(&skdev->lock); in skd_isr()
1740 skd_isr_msg_from_dev(skdev); in skd_isr()
1745 schedule_work(&skdev->start_queue); in skd_isr()
1748 schedule_work(&skdev->completion_worker); in skd_isr()
1750 schedule_work(&skdev->start_queue); in skd_isr()
1752 spin_unlock(&skdev->lock); in skd_isr()
1757 static void skd_drive_fault(struct skd_device *skdev) in skd_drive_fault() argument
1759 skdev->state = SKD_DRVR_STATE_FAULT; in skd_drive_fault()
1760 dev_err(&skdev->pdev->dev, "Drive FAULT\n"); in skd_drive_fault()
1763 static void skd_drive_disappeared(struct skd_device *skdev) in skd_drive_disappeared() argument
1765 skdev->state = SKD_DRVR_STATE_DISAPPEARED; in skd_drive_disappeared()
1766 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n"); in skd_drive_disappeared()
1769 static void skd_isr_fwstate(struct skd_device *skdev) in skd_isr_fwstate() argument
1774 int prev_driver_state = skdev->state; in skd_isr_fwstate()
1776 sense = SKD_READL(skdev, FIT_STATUS); in skd_isr_fwstate()
1779 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n", in skd_isr_fwstate()
1780 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, in skd_isr_fwstate()
1783 skdev->drive_state = state; in skd_isr_fwstate()
1785 switch (skdev->drive_state) { in skd_isr_fwstate()
1787 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { in skd_isr_fwstate()
1788 skd_disable_interrupts(skdev); in skd_isr_fwstate()
1791 if (skdev->state == SKD_DRVR_STATE_RESTARTING) in skd_isr_fwstate()
1792 skd_recover_requests(skdev); in skd_isr_fwstate()
1793 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { in skd_isr_fwstate()
1794 skdev->timer_countdown = SKD_STARTING_TIMO; in skd_isr_fwstate()
1795 skdev->state = SKD_DRVR_STATE_STARTING; in skd_isr_fwstate()
1796 skd_soft_reset(skdev); in skd_isr_fwstate()
1800 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_fwstate()
1801 skdev->last_mtd = mtd; in skd_isr_fwstate()
1805 skdev->cur_max_queue_depth = skd_max_queue_depth; in skd_isr_fwstate()
1806 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) in skd_isr_fwstate()
1807 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; in skd_isr_fwstate()
1809 skdev->queue_low_water_mark = in skd_isr_fwstate()
1810 skdev->cur_max_queue_depth * 2 / 3 + 1; in skd_isr_fwstate()
1811 if (skdev->queue_low_water_mark < 1) in skd_isr_fwstate()
1812 skdev->queue_low_water_mark = 1; in skd_isr_fwstate()
1813 dev_info(&skdev->pdev->dev, in skd_isr_fwstate()
1815 skdev->cur_max_queue_depth, in skd_isr_fwstate()
1816 skdev->dev_max_queue_depth, in skd_isr_fwstate()
1817 skdev->queue_low_water_mark); in skd_isr_fwstate()
1819 skd_refresh_device_data(skdev); in skd_isr_fwstate()
1823 skdev->state = SKD_DRVR_STATE_BUSY; in skd_isr_fwstate()
1824 skdev->timer_countdown = SKD_BUSY_TIMO; in skd_isr_fwstate()
1825 skd_quiesce_dev(skdev); in skd_isr_fwstate()
1831 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; in skd_isr_fwstate()
1832 skdev->timer_countdown = SKD_TIMER_SECONDS(3); in skd_isr_fwstate()
1833 schedule_work(&skdev->start_queue); in skd_isr_fwstate()
1836 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; in skd_isr_fwstate()
1837 skdev->timer_countdown = SKD_BUSY_TIMO; in skd_isr_fwstate()
1840 skdev->state = SKD_DRVR_STATE_IDLE; in skd_isr_fwstate()
1843 switch (skdev->state) { in skd_isr_fwstate()
1849 skdev->state = SKD_DRVR_STATE_RESTARTING; in skd_isr_fwstate()
1854 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n"); in skd_isr_fwstate()
1855 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; in skd_isr_fwstate()
1856 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; in skd_isr_fwstate()
1865 skd_drive_fault(skdev); in skd_isr_fwstate()
1866 skd_recover_requests(skdev); in skd_isr_fwstate()
1867 schedule_work(&skdev->start_queue); in skd_isr_fwstate()
1872 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state, in skd_isr_fwstate()
1874 skd_drive_disappeared(skdev); in skd_isr_fwstate()
1875 skd_recover_requests(skdev); in skd_isr_fwstate()
1876 schedule_work(&skdev->start_queue); in skd_isr_fwstate()
1884 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", in skd_isr_fwstate()
1886 skd_skdev_state_to_str(skdev->state), skdev->state); in skd_isr_fwstate()
1891 struct skd_device *const skdev = data; in skd_recover_request() local
1897 skd_log_skreq(skdev, skreq, "recover"); in skd_recover_request()
1901 skd_postop_sg_list(skdev, skreq); in skd_recover_request()
1908 static void skd_recover_requests(struct skd_device *skdev) in skd_recover_requests() argument
1910 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev); in skd_recover_requests()
1913 static void skd_isr_msg_from_dev(struct skd_device *skdev) in skd_isr_msg_from_dev() argument
1919 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); in skd_isr_msg_from_dev()
1921 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd, in skd_isr_msg_from_dev()
1922 skdev->last_mtd); in skd_isr_msg_from_dev()
1925 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) in skd_isr_msg_from_dev()
1930 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); in skd_isr_msg_from_dev()
1932 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { in skd_isr_msg_from_dev()
1933 dev_err(&skdev->pdev->dev, "protocol mismatch\n"); in skd_isr_msg_from_dev()
1934 dev_err(&skdev->pdev->dev, " got=%d support=%d\n", in skd_isr_msg_from_dev()
1935 skdev->proto_ver, FIT_PROTOCOL_VERSION_1); in skd_isr_msg_from_dev()
1936 dev_err(&skdev->pdev->dev, " please upgrade driver\n"); in skd_isr_msg_from_dev()
1937 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; in skd_isr_msg_from_dev()
1938 skd_soft_reset(skdev); in skd_isr_msg_from_dev()
1942 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1943 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1947 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); in skd_isr_msg_from_dev()
1950 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1951 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1955 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); in skd_isr_msg_from_dev()
1957 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1958 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1962 skd_reset_skcomp(skdev); in skd_isr_msg_from_dev()
1963 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); in skd_isr_msg_from_dev()
1964 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1965 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1970 skdev->connect_time_stamp = (u32)ktime_get_real_seconds(); in skd_isr_msg_from_dev()
1971 data = skdev->connect_time_stamp & 0xFFFF; in skd_isr_msg_from_dev()
1973 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1974 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1978 skdev->drive_jiffies = FIT_MXD_DATA(mfd); in skd_isr_msg_from_dev()
1979 data = (skdev->connect_time_stamp >> 16) & 0xFFFF; in skd_isr_msg_from_dev()
1981 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1982 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1986 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); in skd_isr_msg_from_dev()
1988 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); in skd_isr_msg_from_dev()
1989 skdev->last_mtd = mtd; in skd_isr_msg_from_dev()
1991 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n", in skd_isr_msg_from_dev()
1992 skdev->connect_time_stamp, skdev->drive_jiffies); in skd_isr_msg_from_dev()
1996 skdev->last_mtd = 0; in skd_isr_msg_from_dev()
2007 static void skd_disable_interrupts(struct skd_device *skdev) in skd_disable_interrupts() argument
2011 sense = SKD_READL(skdev, FIT_CONTROL); in skd_disable_interrupts()
2013 SKD_WRITEL(skdev, sense, FIT_CONTROL); in skd_disable_interrupts()
2014 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense); in skd_disable_interrupts()
2019 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); in skd_disable_interrupts()
2022 static void skd_enable_interrupts(struct skd_device *skdev) in skd_enable_interrupts() argument
2032 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); in skd_enable_interrupts()
2033 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val); in skd_enable_interrupts()
2035 val = SKD_READL(skdev, FIT_CONTROL); in skd_enable_interrupts()
2037 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); in skd_enable_interrupts()
2038 SKD_WRITEL(skdev, val, FIT_CONTROL); in skd_enable_interrupts()
2047 static void skd_soft_reset(struct skd_device *skdev) in skd_soft_reset() argument
2051 val = SKD_READL(skdev, FIT_CONTROL); in skd_soft_reset()
2053 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); in skd_soft_reset()
2054 SKD_WRITEL(skdev, val, FIT_CONTROL); in skd_soft_reset()
2057 static void skd_start_device(struct skd_device *skdev) in skd_start_device() argument
2063 spin_lock_irqsave(&skdev->lock, flags); in skd_start_device()
2066 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); in skd_start_device()
2068 sense = SKD_READL(skdev, FIT_STATUS); in skd_start_device()
2070 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense); in skd_start_device()
2073 skdev->drive_state = state; in skd_start_device()
2074 skdev->last_mtd = 0; in skd_start_device()
2076 skdev->state = SKD_DRVR_STATE_STARTING; in skd_start_device()
2077 skdev->timer_countdown = SKD_STARTING_TIMO; in skd_start_device()
2079 skd_enable_interrupts(skdev); in skd_start_device()
2081 switch (skdev->drive_state) { in skd_start_device()
2083 dev_err(&skdev->pdev->dev, "Drive offline...\n"); in skd_start_device()
2087 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n"); in skd_start_device()
2088 skdev->state = SKD_DRVR_STATE_WAIT_BOOT; in skd_start_device()
2089 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; in skd_start_device()
2093 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n"); in skd_start_device()
2094 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; in skd_start_device()
2095 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; in skd_start_device()
2099 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n"); in skd_start_device()
2100 skdev->state = SKD_DRVR_STATE_BUSY_ERASE; in skd_start_device()
2101 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; in skd_start_device()
2106 skd_soft_reset(skdev); in skd_start_device()
2110 dev_err(&skdev->pdev->dev, "Drive Busy...\n"); in skd_start_device()
2111 skdev->state = SKD_DRVR_STATE_BUSY; in skd_start_device()
2112 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; in skd_start_device()
2116 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n"); in skd_start_device()
2124 skd_drive_fault(skdev); in skd_start_device()
2126 dev_dbg(&skdev->pdev->dev, "starting queue\n"); in skd_start_device()
2127 schedule_work(&skdev->start_queue); in skd_start_device()
2128 skdev->gendisk_on = -1; in skd_start_device()
2129 wake_up_interruptible(&skdev->waitq); in skd_start_device()
2135 skd_drive_disappeared(skdev); in skd_start_device()
2137 dev_dbg(&skdev->pdev->dev, in skd_start_device()
2139 schedule_work(&skdev->start_queue); in skd_start_device()
2140 skdev->gendisk_on = -1; in skd_start_device()
2141 wake_up_interruptible(&skdev->waitq); in skd_start_device()
2145 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n", in skd_start_device()
2146 skdev->drive_state); in skd_start_device()
2150 state = SKD_READL(skdev, FIT_CONTROL); in skd_start_device()
2151 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state); in skd_start_device()
2153 state = SKD_READL(skdev, FIT_INT_STATUS_HOST); in skd_start_device()
2154 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state); in skd_start_device()
2156 state = SKD_READL(skdev, FIT_INT_MASK_HOST); in skd_start_device()
2157 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state); in skd_start_device()
2159 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); in skd_start_device()
2160 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state); in skd_start_device()
2162 state = SKD_READL(skdev, FIT_HW_VERSION); in skd_start_device()
2163 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state); in skd_start_device()
2165 spin_unlock_irqrestore(&skdev->lock, flags); in skd_start_device()
2168 static void skd_stop_device(struct skd_device *skdev) in skd_stop_device() argument
2171 struct skd_special_context *skspcl = &skdev->internal_skspcl; in skd_stop_device()
2175 spin_lock_irqsave(&skdev->lock, flags); in skd_stop_device()
2177 if (skdev->state != SKD_DRVR_STATE_ONLINE) { in skd_stop_device()
2178 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__); in skd_stop_device()
2183 dev_err(&skdev->pdev->dev, "%s no special\n", __func__); in skd_stop_device()
2187 skdev->state = SKD_DRVR_STATE_SYNCING; in skd_stop_device()
2188 skdev->sync_done = 0; in skd_stop_device()
2190 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); in skd_stop_device()
2192 spin_unlock_irqrestore(&skdev->lock, flags); in skd_stop_device()
2194 wait_event_interruptible_timeout(skdev->waitq, in skd_stop_device()
2195 (skdev->sync_done), (10 * HZ)); in skd_stop_device()
2197 spin_lock_irqsave(&skdev->lock, flags); in skd_stop_device()
2199 switch (skdev->sync_done) { in skd_stop_device()
2201 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__); in skd_stop_device()
2204 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__); in skd_stop_device()
2207 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__); in skd_stop_device()
2211 skdev->state = SKD_DRVR_STATE_STOPPING; in skd_stop_device()
2212 spin_unlock_irqrestore(&skdev->lock, flags); in skd_stop_device()
2214 skd_kill_timer(skdev); in skd_stop_device()
2216 spin_lock_irqsave(&skdev->lock, flags); in skd_stop_device()
2217 skd_disable_interrupts(skdev); in skd_stop_device()
2221 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); in skd_stop_device()
2222 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); in skd_stop_device()
2224 spin_unlock_irqrestore(&skdev->lock, flags); in skd_stop_device()
2229 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; in skd_stop_device()
2237 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__, in skd_stop_device()
2242 static void skd_restart_device(struct skd_device *skdev) in skd_restart_device() argument
2247 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); in skd_restart_device()
2249 state = SKD_READL(skdev, FIT_STATUS); in skd_restart_device()
2251 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state); in skd_restart_device()
2254 skdev->drive_state = state; in skd_restart_device()
2255 skdev->last_mtd = 0; in skd_restart_device()
2257 skdev->state = SKD_DRVR_STATE_RESTARTING; in skd_restart_device()
2258 skdev->timer_countdown = SKD_RESTARTING_TIMO; in skd_restart_device()
2260 skd_soft_reset(skdev); in skd_restart_device()
2264 static int skd_quiesce_dev(struct skd_device *skdev) in skd_quiesce_dev() argument
2268 switch (skdev->state) { in skd_quiesce_dev()
2271 dev_dbg(&skdev->pdev->dev, "stopping queue\n"); in skd_quiesce_dev()
2272 blk_mq_stop_hw_queues(skdev->queue); in skd_quiesce_dev()
2284 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n", in skd_quiesce_dev()
2285 skdev->state); in skd_quiesce_dev()
2291 static int skd_unquiesce_dev(struct skd_device *skdev) in skd_unquiesce_dev() argument
2293 int prev_driver_state = skdev->state; in skd_unquiesce_dev()
2295 skd_log_skdev(skdev, "unquiesce"); in skd_unquiesce_dev()
2296 if (skdev->state == SKD_DRVR_STATE_ONLINE) { in skd_unquiesce_dev()
2297 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n"); in skd_unquiesce_dev()
2300 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { in skd_unquiesce_dev()
2309 skdev->state = SKD_DRVR_STATE_BUSY; in skd_unquiesce_dev()
2310 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n"); in skd_unquiesce_dev()
2318 switch (skdev->state) { in skd_unquiesce_dev()
2328 skdev->state = SKD_DRVR_STATE_ONLINE; in skd_unquiesce_dev()
2329 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", in skd_unquiesce_dev()
2331 prev_driver_state, skd_skdev_state_to_str(skdev->state), in skd_unquiesce_dev()
2332 skdev->state); in skd_unquiesce_dev()
2333 dev_dbg(&skdev->pdev->dev, in skd_unquiesce_dev()
2335 dev_dbg(&skdev->pdev->dev, "starting queue\n"); in skd_unquiesce_dev()
2336 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n"); in skd_unquiesce_dev()
2337 schedule_work(&skdev->start_queue); in skd_unquiesce_dev()
2338 skdev->gendisk_on = 1; in skd_unquiesce_dev()
2339 wake_up_interruptible(&skdev->waitq); in skd_unquiesce_dev()
2344 dev_dbg(&skdev->pdev->dev, in skd_unquiesce_dev()
2346 skdev->state); in skd_unquiesce_dev()
2360 struct skd_device *skdev = skd_host_data; in skd_reserved_isr() local
2363 spin_lock_irqsave(&skdev->lock, flags); in skd_reserved_isr()
2364 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", in skd_reserved_isr()
2365 SKD_READL(skdev, FIT_INT_STATUS_HOST)); in skd_reserved_isr()
2366 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq, in skd_reserved_isr()
2367 SKD_READL(skdev, FIT_INT_STATUS_HOST)); in skd_reserved_isr()
2368 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); in skd_reserved_isr()
2369 spin_unlock_irqrestore(&skdev->lock, flags); in skd_reserved_isr()
2375 struct skd_device *skdev = skd_host_data; in skd_statec_isr() local
2378 spin_lock_irqsave(&skdev->lock, flags); in skd_statec_isr()
2379 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", in skd_statec_isr()
2380 SKD_READL(skdev, FIT_INT_STATUS_HOST)); in skd_statec_isr()
2381 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); in skd_statec_isr()
2382 skd_isr_fwstate(skdev); in skd_statec_isr()
2383 spin_unlock_irqrestore(&skdev->lock, flags); in skd_statec_isr()
2389 struct skd_device *skdev = skd_host_data; in skd_comp_q() local
2394 spin_lock_irqsave(&skdev->lock, flags); in skd_comp_q()
2395 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", in skd_comp_q()
2396 SKD_READL(skdev, FIT_INT_STATUS_HOST)); in skd_comp_q()
2397 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); in skd_comp_q()
2398 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, in skd_comp_q()
2401 schedule_work(&skdev->start_queue); in skd_comp_q()
2404 schedule_work(&skdev->completion_worker); in skd_comp_q()
2406 schedule_work(&skdev->start_queue); in skd_comp_q()
2408 spin_unlock_irqrestore(&skdev->lock, flags); in skd_comp_q()
2415 struct skd_device *skdev = skd_host_data; in skd_msg_isr() local
2418 spin_lock_irqsave(&skdev->lock, flags); in skd_msg_isr()
2419 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", in skd_msg_isr()
2420 SKD_READL(skdev, FIT_INT_STATUS_HOST)); in skd_msg_isr()
2421 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); in skd_msg_isr()
2422 skd_isr_msg_from_dev(skdev); in skd_msg_isr()
2423 spin_unlock_irqrestore(&skdev->lock, flags); in skd_msg_isr()
2429 struct skd_device *skdev = skd_host_data; in skd_qfull_isr() local
2432 spin_lock_irqsave(&skdev->lock, flags); in skd_qfull_isr()
2433 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", in skd_qfull_isr()
2434 SKD_READL(skdev, FIT_INT_STATUS_HOST)); in skd_qfull_isr()
2435 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); in skd_qfull_isr()
2436 spin_unlock_irqrestore(&skdev->lock, flags); in skd_qfull_isr()
2475 static int skd_acquire_msix(struct skd_device *skdev) in skd_acquire_msix() argument
2478 struct pci_dev *pdev = skdev->pdev; in skd_acquire_msix()
2483 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc); in skd_acquire_msix()
2487 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT, in skd_acquire_msix()
2489 if (!skdev->msix_entries) { in skd_acquire_msix()
2491 dev_err(&skdev->pdev->dev, "msix table allocation error\n"); in skd_acquire_msix()
2497 struct skd_msix_entry *qentry = &skdev->msix_entries[i]; in skd_acquire_msix()
2500 "%s%d-msix %s", DRV_NAME, skdev->devno, in skd_acquire_msix()
2503 rc = devm_request_irq(&skdev->pdev->dev, in skd_acquire_msix()
2504 pci_irq_vector(skdev->pdev, i), in skd_acquire_msix()
2506 qentry->isr_name, skdev); in skd_acquire_msix()
2508 dev_err(&skdev->pdev->dev, in skd_acquire_msix()
2515 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n", in skd_acquire_msix()
2521 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev); in skd_acquire_msix()
2523 kfree(skdev->msix_entries); in skd_acquire_msix()
2524 skdev->msix_entries = NULL; in skd_acquire_msix()
2528 static int skd_acquire_irq(struct skd_device *skdev) in skd_acquire_irq() argument
2530 struct pci_dev *pdev = skdev->pdev; in skd_acquire_irq()
2535 rc = skd_acquire_msix(skdev); in skd_acquire_irq()
2539 dev_err(&skdev->pdev->dev, in skd_acquire_irq()
2543 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, in skd_acquire_irq()
2544 skdev->devno); in skd_acquire_irq()
2550 dev_err(&skdev->pdev->dev, in skd_acquire_irq()
2557 skdev->isr_name, skdev); in skd_acquire_irq()
2560 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n", in skd_acquire_irq()
2568 static void skd_release_irq(struct skd_device *skdev) in skd_release_irq() argument
2570 struct pci_dev *pdev = skdev->pdev; in skd_release_irq()
2572 if (skdev->msix_entries) { in skd_release_irq()
2577 skdev); in skd_release_irq()
2580 kfree(skdev->msix_entries); in skd_release_irq()
2581 skdev->msix_entries = NULL; in skd_release_irq()
2583 devm_free_irq(&pdev->dev, pdev->irq, skdev); in skd_release_irq()
2595 static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, in skd_alloc_dma() argument
2599 struct device *dev = &skdev->pdev->dev; in skd_alloc_dma()
2614 static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s, in skd_free_dma() argument
2621 dma_unmap_single(&skdev->pdev->dev, dma_handle, in skd_free_dma()
2626 static int skd_cons_skcomp(struct skd_device *skdev) in skd_cons_skcomp() argument
2631 dev_dbg(&skdev->pdev->dev, in skd_cons_skcomp()
2635 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE, in skd_cons_skcomp()
2636 &skdev->cq_dma_address); in skd_cons_skcomp()
2643 skdev->skcomp_table = skcomp; in skd_cons_skcomp()
2644 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + in skd_cons_skcomp()
2652 static int skd_cons_skmsg(struct skd_device *skdev) in skd_cons_skmsg() argument
2657 dev_dbg(&skdev->pdev->dev, in skd_cons_skmsg()
2659 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context, in skd_cons_skmsg()
2660 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); in skd_cons_skmsg()
2662 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context, in skd_cons_skmsg()
2665 if (skdev->skmsg_table == NULL) { in skd_cons_skmsg()
2670 for (i = 0; i < skdev->num_fitmsg_context; i++) { in skd_cons_skmsg()
2673 skmsg = &skdev->skmsg_table[i]; in skd_cons_skmsg()
2677 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, in skd_cons_skmsg()
2697 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, in skd_cons_sg_list() argument
2703 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr, in skd_cons_sg_list()
2722 static void skd_free_sg_list(struct skd_device *skdev, in skd_free_sg_list() argument
2729 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr, in skd_free_sg_list()
2736 struct skd_device *skdev = set->driver_data; in skd_init_request() local
2742 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request, in skd_init_request()
2751 struct skd_device *skdev = set->driver_data; in skd_exit_request() local
2754 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address); in skd_exit_request()
2757 static int skd_cons_sksb(struct skd_device *skdev) in skd_cons_sksb() argument
2762 skspcl = &skdev->internal_skspcl; in skd_cons_sksb()
2767 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache, in skd_cons_sksb()
2776 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache, in skd_cons_sksb()
2784 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, in skd_cons_sksb()
2791 if (!skd_format_internal_skspcl(skdev)) { in skd_cons_sksb()
2808 static int skd_cons_disk(struct skd_device *skdev) in skd_cons_disk() argument
2821 skdev->disk = disk; in skd_cons_disk()
2822 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); in skd_cons_disk()
2824 disk->major = skdev->major; in skd_cons_disk()
2825 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; in skd_cons_disk()
2827 disk->private_data = skdev; in skd_cons_disk()
2829 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set)); in skd_cons_disk()
2830 skdev->tag_set.ops = &skd_mq_ops; in skd_cons_disk()
2831 skdev->tag_set.nr_hw_queues = 1; in skd_cons_disk()
2832 skdev->tag_set.queue_depth = skd_max_queue_depth; in skd_cons_disk()
2833 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) + in skd_cons_disk()
2834 skdev->sgs_per_request * sizeof(struct scatterlist); in skd_cons_disk()
2835 skdev->tag_set.numa_node = NUMA_NO_NODE; in skd_cons_disk()
2836 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | in skd_cons_disk()
2839 skdev->tag_set.driver_data = skdev; in skd_cons_disk()
2840 rc = blk_mq_alloc_tag_set(&skdev->tag_set); in skd_cons_disk()
2843 q = blk_mq_init_queue(&skdev->tag_set); in skd_cons_disk()
2845 blk_mq_free_tag_set(&skdev->tag_set); in skd_cons_disk()
2849 q->queuedata = skdev; in skd_cons_disk()
2851 skdev->queue = q; in skd_cons_disk()
2855 blk_queue_max_segments(q, skdev->sgs_per_request); in skd_cons_disk()
2866 spin_lock_irqsave(&skdev->lock, flags); in skd_cons_disk()
2867 dev_dbg(&skdev->pdev->dev, "stopping queue\n"); in skd_cons_disk()
2868 blk_mq_stop_hw_queues(skdev->queue); in skd_cons_disk()
2869 spin_unlock_irqrestore(&skdev->lock, flags); in skd_cons_disk()
2880 struct skd_device *skdev; in skd_construct() local
2885 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); in skd_construct()
2887 if (!skdev) { in skd_construct()
2892 skdev->state = SKD_DRVR_STATE_LOAD; in skd_construct()
2893 skdev->pdev = pdev; in skd_construct()
2894 skdev->devno = skd_next_devno++; in skd_construct()
2895 skdev->major = blk_major; in skd_construct()
2896 skdev->dev_max_queue_depth = 0; in skd_construct()
2898 skdev->num_req_context = skd_max_queue_depth; in skd_construct()
2899 skdev->num_fitmsg_context = skd_max_queue_depth; in skd_construct()
2900 skdev->cur_max_queue_depth = 1; in skd_construct()
2901 skdev->queue_low_water_mark = 1; in skd_construct()
2902 skdev->proto_ver = 99; in skd_construct()
2903 skdev->sgs_per_request = skd_sgs_per_request; in skd_construct()
2904 skdev->dbg_level = skd_dbg_level; in skd_construct()
2906 spin_lock_init(&skdev->lock); in skd_construct()
2908 INIT_WORK(&skdev->start_queue, skd_start_queue); in skd_construct()
2909 INIT_WORK(&skdev->completion_worker, skd_completion_worker); in skd_construct()
2912 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0, in skd_construct()
2914 if (!skdev->msgbuf_cache) in skd_construct()
2916 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size, in skd_construct()
2918 kmem_cache_size(skdev->msgbuf_cache), size); in skd_construct()
2920 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0, in skd_construct()
2922 if (!skdev->sglist_cache) in skd_construct()
2924 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size, in skd_construct()
2926 kmem_cache_size(skdev->sglist_cache), size); in skd_construct()
2928 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0, in skd_construct()
2930 if (!skdev->databuf_cache) in skd_construct()
2932 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size, in skd_construct()
2934 kmem_cache_size(skdev->databuf_cache), size); in skd_construct()
2936 dev_dbg(&skdev->pdev->dev, "skcomp\n"); in skd_construct()
2937 rc = skd_cons_skcomp(skdev); in skd_construct()
2941 dev_dbg(&skdev->pdev->dev, "skmsg\n"); in skd_construct()
2942 rc = skd_cons_skmsg(skdev); in skd_construct()
2946 dev_dbg(&skdev->pdev->dev, "sksb\n"); in skd_construct()
2947 rc = skd_cons_sksb(skdev); in skd_construct()
2951 dev_dbg(&skdev->pdev->dev, "disk\n"); in skd_construct()
2952 rc = skd_cons_disk(skdev); in skd_construct()
2956 dev_dbg(&skdev->pdev->dev, "VICTORY\n"); in skd_construct()
2957 return skdev; in skd_construct()
2960 dev_dbg(&skdev->pdev->dev, "construct failed\n"); in skd_construct()
2961 skd_destruct(skdev); in skd_construct()
2971 static void skd_free_skcomp(struct skd_device *skdev) in skd_free_skcomp() argument
2973 if (skdev->skcomp_table) in skd_free_skcomp()
2974 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE, in skd_free_skcomp()
2975 skdev->skcomp_table, skdev->cq_dma_address); in skd_free_skcomp()
2977 skdev->skcomp_table = NULL; in skd_free_skcomp()
2978 skdev->cq_dma_address = 0; in skd_free_skcomp()
2981 static void skd_free_skmsg(struct skd_device *skdev) in skd_free_skmsg() argument
2985 if (skdev->skmsg_table == NULL) in skd_free_skmsg()
2988 for (i = 0; i < skdev->num_fitmsg_context; i++) { in skd_free_skmsg()
2991 skmsg = &skdev->skmsg_table[i]; in skd_free_skmsg()
2994 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, in skd_free_skmsg()
3002 kfree(skdev->skmsg_table); in skd_free_skmsg()
3003 skdev->skmsg_table = NULL; in skd_free_skmsg()
3006 static void skd_free_sksb(struct skd_device *skdev) in skd_free_sksb() argument
3008 struct skd_special_context *skspcl = &skdev->internal_skspcl; in skd_free_sksb()
3010 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf, in skd_free_sksb()
3016 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf, in skd_free_sksb()
3022 skd_free_sg_list(skdev, skspcl->req.sksg_list, in skd_free_sksb()
3029 static void skd_free_disk(struct skd_device *skdev) in skd_free_disk() argument
3031 struct gendisk *disk = skdev->disk; in skd_free_disk()
3036 if (skdev->queue) { in skd_free_disk()
3037 blk_cleanup_queue(skdev->queue); in skd_free_disk()
3038 skdev->queue = NULL; in skd_free_disk()
3043 if (skdev->tag_set.tags) in skd_free_disk()
3044 blk_mq_free_tag_set(&skdev->tag_set); in skd_free_disk()
3047 skdev->disk = NULL; in skd_free_disk()
3050 static void skd_destruct(struct skd_device *skdev) in skd_destruct() argument
3052 if (skdev == NULL) in skd_destruct()
3055 cancel_work_sync(&skdev->start_queue); in skd_destruct()
3057 dev_dbg(&skdev->pdev->dev, "disk\n"); in skd_destruct()
3058 skd_free_disk(skdev); in skd_destruct()
3060 dev_dbg(&skdev->pdev->dev, "sksb\n"); in skd_destruct()
3061 skd_free_sksb(skdev); in skd_destruct()
3063 dev_dbg(&skdev->pdev->dev, "skmsg\n"); in skd_destruct()
3064 skd_free_skmsg(skdev); in skd_destruct()
3066 dev_dbg(&skdev->pdev->dev, "skcomp\n"); in skd_destruct()
3067 skd_free_skcomp(skdev); in skd_destruct()
3069 kmem_cache_destroy(skdev->databuf_cache); in skd_destruct()
3070 kmem_cache_destroy(skdev->sglist_cache); in skd_destruct()
3071 kmem_cache_destroy(skdev->msgbuf_cache); in skd_destruct()
3073 dev_dbg(&skdev->pdev->dev, "skdev\n"); in skd_destruct()
3074 kfree(skdev); in skd_destruct()
3085 struct skd_device *skdev; in skd_bdev_getgeo() local
3088 skdev = bdev->bd_disk->private_data; in skd_bdev_getgeo()
3090 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n", in skd_bdev_getgeo()
3093 if (skdev->read_cap_is_valid) { in skd_bdev_getgeo()
3094 capacity = get_capacity(skdev->disk); in skd_bdev_getgeo()
3104 static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) in skd_bdev_attach() argument
3106 dev_dbg(&skdev->pdev->dev, "add_disk\n"); in skd_bdev_attach()
3107 device_add_disk(parent, skdev->disk); in skd_bdev_attach()
3130 static char *skd_pci_info(struct skd_device *skdev, char *str) in skd_pci_info() argument
3135 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); in skd_pci_info()
3143 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); in skd_pci_info()
3164 struct skd_device *skdev; in skd_pci_probe() local
3197 skdev = skd_construct(pdev); in skd_pci_probe()
3198 if (skdev == NULL) { in skd_pci_probe()
3203 skd_pci_info(skdev, pci_str); in skd_pci_probe()
3211 skdev->pcie_error_reporting_is_enabled = 0; in skd_pci_probe()
3213 skdev->pcie_error_reporting_is_enabled = 1; in skd_pci_probe()
3215 pci_set_drvdata(pdev, skdev); in skd_pci_probe()
3218 skdev->mem_phys[i] = pci_resource_start(pdev, i); in skd_pci_probe()
3219 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); in skd_pci_probe()
3220 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], in skd_pci_probe()
3221 skdev->mem_size[i]); in skd_pci_probe()
3222 if (!skdev->mem_map[i]) { in skd_pci_probe()
3229 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], in skd_pci_probe()
3230 skdev->mem_size[i]); in skd_pci_probe()
3233 rc = skd_acquire_irq(skdev); in skd_pci_probe()
3239 rc = skd_start_timer(skdev); in skd_pci_probe()
3243 init_waitqueue_head(&skdev->waitq); in skd_pci_probe()
3245 skd_start_device(skdev); in skd_pci_probe()
3247 rc = wait_event_interruptible_timeout(skdev->waitq, in skd_pci_probe()
3248 (skdev->gendisk_on), in skd_pci_probe()
3250 if (skdev->gendisk_on > 0) { in skd_pci_probe()
3252 skd_bdev_attach(&pdev->dev, skdev); in skd_pci_probe()
3268 skd_stop_device(skdev); in skd_pci_probe()
3269 skd_release_irq(skdev); in skd_pci_probe()
3273 if (skdev->mem_map[i]) in skd_pci_probe()
3274 iounmap(skdev->mem_map[i]); in skd_pci_probe()
3276 if (skdev->pcie_error_reporting_is_enabled) in skd_pci_probe()
3279 skd_destruct(skdev); in skd_pci_probe()
3293 struct skd_device *skdev; in skd_pci_remove() local
3295 skdev = pci_get_drvdata(pdev); in skd_pci_remove()
3296 if (!skdev) { in skd_pci_remove()
3300 skd_stop_device(skdev); in skd_pci_remove()
3301 skd_release_irq(skdev); in skd_pci_remove()
3304 if (skdev->mem_map[i]) in skd_pci_remove()
3305 iounmap(skdev->mem_map[i]); in skd_pci_remove()
3307 if (skdev->pcie_error_reporting_is_enabled) in skd_pci_remove()
3310 skd_destruct(skdev); in skd_pci_remove()
3322 struct skd_device *skdev; in skd_pci_suspend() local
3324 skdev = pci_get_drvdata(pdev); in skd_pci_suspend()
3325 if (!skdev) { in skd_pci_suspend()
3330 skd_stop_device(skdev); in skd_pci_suspend()
3332 skd_release_irq(skdev); in skd_pci_suspend()
3335 if (skdev->mem_map[i]) in skd_pci_suspend()
3336 iounmap(skdev->mem_map[i]); in skd_pci_suspend()
3338 if (skdev->pcie_error_reporting_is_enabled) in skd_pci_suspend()
3352 struct skd_device *skdev; in skd_pci_resume() local
3354 skdev = pci_get_drvdata(pdev); in skd_pci_resume()
3355 if (!skdev) { in skd_pci_resume()
3391 skdev->pcie_error_reporting_is_enabled = 0; in skd_pci_resume()
3393 skdev->pcie_error_reporting_is_enabled = 1; in skd_pci_resume()
3397 skdev->mem_phys[i] = pci_resource_start(pdev, i); in skd_pci_resume()
3398 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); in skd_pci_resume()
3399 skdev->mem_map[i] = ioremap(skdev->mem_phys[i], in skd_pci_resume()
3400 skdev->mem_size[i]); in skd_pci_resume()
3401 if (!skdev->mem_map[i]) { in skd_pci_resume()
3407 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], in skd_pci_resume()
3408 skdev->mem_size[i]); in skd_pci_resume()
3410 rc = skd_acquire_irq(skdev); in skd_pci_resume()
3416 rc = skd_start_timer(skdev); in skd_pci_resume()
3420 init_waitqueue_head(&skdev->waitq); in skd_pci_resume()
3422 skd_start_device(skdev); in skd_pci_resume()
3427 skd_stop_device(skdev); in skd_pci_resume()
3428 skd_release_irq(skdev); in skd_pci_resume()
3432 if (skdev->mem_map[i]) in skd_pci_resume()
3433 iounmap(skdev->mem_map[i]); in skd_pci_resume()
3435 if (skdev->pcie_error_reporting_is_enabled) in skd_pci_resume()
3448 struct skd_device *skdev; in skd_pci_shutdown() local
3452 skdev = pci_get_drvdata(pdev); in skd_pci_shutdown()
3453 if (!skdev) { in skd_pci_shutdown()
3459 skd_stop_device(skdev); in skd_pci_shutdown()
3573 static void skd_log_skdev(struct skd_device *skdev, const char *event) in skd_log_skdev() argument
3575 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); in skd_log_skdev()
3576 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n", in skd_log_skdev()
3577 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, in skd_log_skdev()
3578 skd_skdev_state_to_str(skdev->state), skdev->state); in skd_log_skdev()
3579 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n", in skd_log_skdev()
3580 skd_in_flight(skdev), skdev->cur_max_queue_depth, in skd_log_skdev()
3581 skdev->dev_max_queue_depth, skdev->queue_low_water_mark); in skd_log_skdev()
3582 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n", in skd_log_skdev()
3583 skdev->skcomp_cycle, skdev->skcomp_ix); in skd_log_skdev()
3586 static void skd_log_skreq(struct skd_device *skdev, in skd_log_skreq() argument
3593 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); in skd_log_skreq()
3594 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", in skd_log_skreq()
3597 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n", in skd_log_skreq()
3600 dev_dbg(&skdev->pdev->dev, in skd_log_skreq()