Lines Matching refs:vk
140 struct bcm_vk *vk = dev_id; in bcm_vk_notf_irqhandler() local
142 if (!bcm_vk_drv_access_ok(vk)) { in bcm_vk_notf_irqhandler()
143 dev_err(&vk->pdev->dev, in bcm_vk_notf_irqhandler()
149 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0) in bcm_vk_notf_irqhandler()
150 queue_work(vk->wq_thread, &vk->wq_work); in bcm_vk_notf_irqhandler()
156 static int bcm_vk_intf_ver_chk(struct bcm_vk *vk) in bcm_vk_intf_ver_chk() argument
158 struct device *dev = &vk->pdev->dev; in bcm_vk_intf_ver_chk()
164 reg = vkread32(vk, BAR_0, BAR_INTF_VER); in bcm_vk_intf_ver_chk()
179 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_INTF_V_FAIL); in bcm_vk_intf_ver_chk()
189 static void bcm_vk_log_notf(struct bcm_vk *vk, in bcm_vk_log_notf() argument
199 struct device *dev = &vk->pdev->dev; in bcm_vk_log_notf()
211 reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM); in bcm_vk_log_notf()
215 if ((uecc_mem_err != vk->alert_cnts.uecc) && in bcm_vk_log_notf()
219 DRV_MODULE_NAME, vk->devid, in bcm_vk_log_notf()
221 vk->alert_cnts.uecc = uecc_mem_err; in bcm_vk_log_notf()
223 reg = vkread32(vk, BAR_0, BAR_CARD_ERR_MEM); in bcm_vk_log_notf()
227 if ((ecc_mem_err != vk->alert_cnts.ecc) && in bcm_vk_log_notf()
230 DRV_MODULE_NAME, vk->devid, in bcm_vk_log_notf()
232 vk->alert_cnts.ecc = ecc_mem_err; in bcm_vk_log_notf()
236 DRV_MODULE_NAME, vk->devid, entry->str, in bcm_vk_log_notf()
242 static void bcm_vk_dump_peer_log(struct bcm_vk *vk) in bcm_vk_dump_peer_log() argument
245 struct bcm_vk_peer_log *log_info = &vk->peerlog_info; in bcm_vk_dump_peer_log()
248 struct device *dev = &vk->pdev->dev; in bcm_vk_dump_peer_log()
251 memcpy_fromio(&log, vk->bar[BAR_2] + vk->peerlog_off, sizeof(log)); in bcm_vk_dump_peer_log()
275 data_offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log); in bcm_vk_dump_peer_log()
278 loc_buf[cnt] = vkread8(vk, BAR_2, data_offset + log.rd_idx); in bcm_vk_dump_peer_log()
290 vkwrite32(vk, log.rd_idx, BAR_2, in bcm_vk_dump_peer_log()
291 vk->peerlog_off + offsetof(struct bcm_vk_peer_log, rd_idx)); in bcm_vk_dump_peer_log()
294 void bcm_vk_handle_notf(struct bcm_vk *vk) in bcm_vk_handle_notf() argument
302 reg = vkread32(vk, BAR_0, BAR_CARD_ERR_LOG); in bcm_vk_handle_notf()
305 vk->peer_alert.notfs = reg; in bcm_vk_handle_notf()
306 bcm_vk_log_notf(vk, &vk->peer_alert, bcm_vk_peer_err, in bcm_vk_handle_notf()
308 vk->peer_alert.flags = vk->peer_alert.notfs; in bcm_vk_handle_notf()
311 bcm_vk_blk_drv_access(vk); in bcm_vk_handle_notf()
315 spin_lock_irqsave(&vk->host_alert_lock, flags); in bcm_vk_handle_notf()
317 vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN; in bcm_vk_handle_notf()
319 alert = vk->host_alert; in bcm_vk_handle_notf()
320 vk->host_alert.flags = vk->host_alert.notfs; in bcm_vk_handle_notf()
321 spin_unlock_irqrestore(&vk->host_alert_lock, flags); in bcm_vk_handle_notf()
324 bcm_vk_log_notf(vk, &alert, bcm_vk_host_err, in bcm_vk_handle_notf()
332 ((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) || in bcm_vk_handle_notf()
333 (vk->peer_alert.flags & ERR_LOG_SYS_FAULT))) in bcm_vk_handle_notf()
334 bcm_vk_dump_peer_log(vk); in bcm_vk_handle_notf()
337 static inline int bcm_vk_wait(struct bcm_vk *vk, enum pci_barno bar, in bcm_vk_wait() argument
341 struct device *dev = &vk->pdev->dev; in bcm_vk_wait()
350 rd_val = vkread32(vk, bar, offset); in bcm_vk_wait()
355 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); in bcm_vk_wait()
374 static void bcm_vk_get_card_info(struct bcm_vk *vk) in bcm_vk_get_card_info() argument
376 struct device *dev = &vk->pdev->dev; in bcm_vk_get_card_info()
380 struct bcm_vk_card_info *info = &vk->card_info; in bcm_vk_get_card_info()
383 offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO); in bcm_vk_get_card_info()
384 offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1); in bcm_vk_get_card_info()
389 *dst++ = vkread8(vk, BAR_2, offset++); in bcm_vk_get_card_info()
409 vk->peerlog_off = offset; in bcm_vk_get_card_info()
410 memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off, in bcm_vk_get_card_info()
411 sizeof(vk->peerlog_info)); in bcm_vk_get_card_info()
418 if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) || in bcm_vk_get_card_info()
419 (vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) || in bcm_vk_get_card_info()
420 (vk->peerlog_info.rd_idx > vk->peerlog_info.mask) || in bcm_vk_get_card_info()
421 (vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) { in bcm_vk_get_card_info()
423 vk->peerlog_info.buf_size, in bcm_vk_get_card_info()
424 vk->peerlog_info.mask, in bcm_vk_get_card_info()
425 vk->peerlog_info.rd_idx, in bcm_vk_get_card_info()
426 vk->peerlog_info.wr_idx); in bcm_vk_get_card_info()
427 memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info)); in bcm_vk_get_card_info()
430 vk->peerlog_info.buf_size, in bcm_vk_get_card_info()
431 vk->peerlog_info.mask, in bcm_vk_get_card_info()
432 vk->peerlog_info.rd_idx, in bcm_vk_get_card_info()
433 vk->peerlog_info.wr_idx); in bcm_vk_get_card_info()
437 static void bcm_vk_get_proc_mon_info(struct bcm_vk *vk) in bcm_vk_get_proc_mon_info() argument
439 struct device *dev = &vk->pdev->dev; in bcm_vk_get_proc_mon_info()
440 struct bcm_vk_proc_mon_info *mon = &vk->proc_mon_info; in bcm_vk_get_proc_mon_info()
445 buf_size = vkread32(vk, BAR_2, in bcm_vk_get_proc_mon_info()
446 vk->peerlog_off in bcm_vk_get_proc_mon_info()
448 offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log) in bcm_vk_get_proc_mon_info()
452 num = vkread32(vk, BAR_2, offset); in bcm_vk_get_proc_mon_info()
453 entry_size = vkread32(vk, BAR_2, offset + sizeof(num)); in bcm_vk_get_proc_mon_info()
464 vk->proc_mon_off = offset; in bcm_vk_get_proc_mon_info()
469 memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size); in bcm_vk_get_proc_mon_info()
472 static int bcm_vk_sync_card_info(struct bcm_vk *vk) in bcm_vk_sync_card_info() argument
474 u32 rdy_marker = vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY); in bcm_vk_sync_card_info()
477 if (!bcm_vk_msgq_marker_valid(vk)) in bcm_vk_sync_card_info()
485 if (vk->tdma_addr) { in bcm_vk_sync_card_info()
486 vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1, in bcm_vk_sync_card_info()
488 vkwrite32(vk, (u32)vk->tdma_addr, BAR_1, in bcm_vk_sync_card_info()
490 vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1, in bcm_vk_sync_card_info()
495 bcm_vk_get_card_info(vk); in bcm_vk_sync_card_info()
498 bcm_vk_get_proc_mon_info(vk); in bcm_vk_sync_card_info()
503 void bcm_vk_blk_drv_access(struct bcm_vk *vk) in bcm_vk_blk_drv_access() argument
512 spin_lock(&vk->ctx_lock); in bcm_vk_blk_drv_access()
515 atomic_set(&vk->msgq_inited, 0); in bcm_vk_blk_drv_access()
520 list_for_each_entry(ctx, &vk->pid_ht[i].head, node) { in bcm_vk_blk_drv_access()
521 if (ctx->pid != vk->reset_pid) { in bcm_vk_blk_drv_access()
522 dev_dbg(&vk->pdev->dev, in bcm_vk_blk_drv_access()
529 bcm_vk_tty_terminate_tty_user(vk); in bcm_vk_blk_drv_access()
530 spin_unlock(&vk->ctx_lock); in bcm_vk_blk_drv_access()
533 static void bcm_vk_buf_notify(struct bcm_vk *vk, void *bufp, in bcm_vk_buf_notify() argument
537 vkwrite32(vk, (u64)host_buf_addr >> 32, BAR_1, in bcm_vk_buf_notify()
539 vkwrite32(vk, (u32)host_buf_addr, BAR_1, in bcm_vk_buf_notify()
541 vkwrite32(vk, buf_size, BAR_1, VK_BAR1_DMA_BUF_SZ); in bcm_vk_buf_notify()
544 static int bcm_vk_load_image_by_type(struct bcm_vk *vk, u32 load_type, in bcm_vk_load_image_by_type() argument
547 struct device *dev = &vk->pdev->dev; in bcm_vk_load_image_by_type()
563 value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT); in bcm_vk_load_image_by_type()
565 vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT); in bcm_vk_load_image_by_type()
571 vkwrite32(vk, CODEPUSH_BOOTSTART, BAR_0, offset_codepush); in bcm_vk_load_image_by_type()
574 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, SRAM_OPEN, in bcm_vk_load_image_by_type()
595 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, DDR_OPEN, in bcm_vk_load_image_by_type()
613 bcm_vk_buf_notify(vk, bufp, boot_dma_addr, max_buf); in bcm_vk_load_image_by_type()
630 memcpy_toio(vk->bar[BAR_1] + BAR1_CODEPUSH_BASE_BOOT1, in bcm_vk_load_image_by_type()
635 vkwrite32(vk, codepush, BAR_0, offset_codepush); in bcm_vk_load_image_by_type()
641 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, in bcm_vk_load_image_by_type()
646 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); in bcm_vk_load_image_by_type()
657 reg = vkread32(vk, BAR_0, BAR_BOOT1_STDALONE_PROGRESS); in bcm_vk_load_image_by_type()
681 ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS, in bcm_vk_load_image_by_type()
701 ret = bcm_vk_wait(vk, BAR_0, offset_codepush, in bcm_vk_load_image_by_type()
721 vkwrite32(vk, codepush, BAR_0, offset_codepush); in bcm_vk_load_image_by_type()
732 ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS, in bcm_vk_load_image_by_type()
741 is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) & in bcm_vk_load_image_by_type()
744 ret = bcm_vk_intf_ver_chk(vk); in bcm_vk_load_image_by_type()
754 ret = bcm_vk_sync_msgq(vk, true); in bcm_vk_load_image_by_type()
762 ret = bcm_vk_sync_card_info(vk); in bcm_vk_load_image_by_type()
780 static u32 bcm_vk_next_boot_image(struct bcm_vk *vk) in bcm_vk_next_boot_image() argument
786 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); in bcm_vk_next_boot_image()
787 fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); in bcm_vk_next_boot_image()
795 dev_info(&vk->pdev->dev, in bcm_vk_next_boot_image()
802 static enum soc_idx get_soc_idx(struct bcm_vk *vk) in get_soc_idx() argument
804 struct pci_dev *pdev = vk->pdev; in get_soc_idx()
812 rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID)); in get_soc_idx()
834 static const char *get_load_fw_name(struct bcm_vk *vk, in get_load_fw_name() argument
838 struct device *dev = &vk->pdev->dev; in get_load_fw_name()
857 int bcm_vk_auto_load_all_images(struct bcm_vk *vk) in bcm_vk_auto_load_all_images() argument
861 struct device *dev = &vk->pdev->dev; in bcm_vk_auto_load_all_images()
865 idx = get_soc_idx(vk); in bcm_vk_auto_load_all_images()
870 dev_dbg(dev, "Load All for device %d\n", vk->devid); in bcm_vk_auto_load_all_images()
874 if (bcm_vk_next_boot_image(vk) == curr_type) { in bcm_vk_auto_load_all_images()
875 curr_name = get_load_fw_name(vk, &image_tab[idx][i]); in bcm_vk_auto_load_all_images()
882 ret = bcm_vk_load_image_by_type(vk, curr_type, in bcm_vk_auto_load_all_images()
899 static int bcm_vk_trigger_autoload(struct bcm_vk *vk) in bcm_vk_trigger_autoload() argument
901 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) in bcm_vk_trigger_autoload()
904 set_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); in bcm_vk_trigger_autoload()
905 queue_work(vk->wq_thread, &vk->wq_work); in bcm_vk_trigger_autoload()
915 struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work); in bcm_vk_wq_handler() local
916 struct device *dev = &vk->pdev->dev; in bcm_vk_wq_handler()
920 if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) { in bcm_vk_wq_handler()
922 clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload); in bcm_vk_wq_handler()
923 bcm_vk_handle_notf(vk); in bcm_vk_wq_handler()
925 if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) { in bcm_vk_wq_handler()
926 bcm_vk_auto_load_all_images(vk); in bcm_vk_wq_handler()
932 clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload); in bcm_vk_wq_handler()
933 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); in bcm_vk_wq_handler()
937 ret = bcm_to_h_msg_dequeue(vk); in bcm_vk_wq_handler()
942 bcm_vk_blk_drv_access(vk); in bcm_vk_wq_handler()
945 static long bcm_vk_load_image(struct bcm_vk *vk, in bcm_vk_load_image() argument
948 struct device *dev = &vk->pdev->dev; in bcm_vk_load_image()
965 next_loadable = bcm_vk_next_boot_image(vk); in bcm_vk_load_image()
977 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) { in bcm_vk_load_image()
985 idx = get_soc_idx(vk); in bcm_vk_load_image()
991 image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]); in bcm_vk_load_image()
1002 ret = bcm_vk_load_image_by_type(vk, image.type, image_name); in bcm_vk_load_image()
1005 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); in bcm_vk_load_image()
1010 static int bcm_vk_reset_successful(struct bcm_vk *vk) in bcm_vk_reset_successful() argument
1012 struct device *dev = &vk->pdev->dev; in bcm_vk_reset_successful()
1027 fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); in bcm_vk_reset_successful()
1053 static void bcm_to_v_reset_doorbell(struct bcm_vk *vk, u32 db_val) in bcm_to_v_reset_doorbell() argument
1055 vkwrite32(vk, db_val, BAR_0, VK_BAR0_RESET_DB_BASE); in bcm_to_v_reset_doorbell()
1058 static int bcm_vk_trigger_reset(struct bcm_vk *vk) in bcm_vk_trigger_reset() argument
1070 bcm_vk_drain_msg_on_reset(vk); in bcm_vk_trigger_reset()
1071 vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY); in bcm_vk_trigger_reset()
1073 vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG); in bcm_vk_trigger_reset()
1076 vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i)); in bcm_vk_trigger_reset()
1077 vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i)); in bcm_vk_trigger_reset()
1080 vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i)); in bcm_vk_trigger_reset()
1082 memset(&vk->card_info, 0, sizeof(vk->card_info)); in bcm_vk_trigger_reset()
1083 memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info)); in bcm_vk_trigger_reset()
1084 memset(&vk->proc_mon_info, 0, sizeof(vk->proc_mon_info)); in bcm_vk_trigger_reset()
1085 memset(&vk->alert_cnts, 0, sizeof(vk->alert_cnts)); in bcm_vk_trigger_reset()
1095 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); in bcm_vk_trigger_reset()
1097 dev_info(&vk->pdev->dev, in bcm_vk_trigger_reset()
1102 value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL); in bcm_vk_trigger_reset()
1105 vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL); in bcm_vk_trigger_reset()
1110 if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) { in bcm_vk_trigger_reset()
1116 vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS); in bcm_vk_trigger_reset()
1119 dev_info(&vk->pdev->dev, "Hard reset on Standalone mode"); in bcm_vk_trigger_reset()
1120 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD); in bcm_vk_trigger_reset()
1125 vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS); in bcm_vk_trigger_reset()
1126 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT); in bcm_vk_trigger_reset()
1130 vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]); in bcm_vk_trigger_reset()
1131 memset(&vk->host_alert, 0, sizeof(vk->host_alert)); in bcm_vk_trigger_reset()
1132 memset(&vk->peer_alert, 0, sizeof(vk->peer_alert)); in bcm_vk_trigger_reset()
1134 bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE); in bcm_vk_trigger_reset()
1139 static long bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg) in bcm_vk_reset() argument
1141 struct device *dev = &vk->pdev->dev; in bcm_vk_reset()
1151 if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) { in bcm_vk_reset()
1156 ramdump_reset = vk->peer_alert.flags & ERR_LOG_RAMDUMP; in bcm_vk_reset()
1167 bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0); in bcm_vk_reset()
1169 spin_lock(&vk->ctx_lock); in bcm_vk_reset()
1170 if (!vk->reset_pid) { in bcm_vk_reset()
1171 vk->reset_pid = task_pid_nr(current); in bcm_vk_reset()
1174 vk->reset_pid); in bcm_vk_reset()
1177 spin_unlock(&vk->ctx_lock); in bcm_vk_reset()
1181 bcm_vk_blk_drv_access(vk); in bcm_vk_reset()
1182 special_reset = bcm_vk_trigger_reset(vk); in bcm_vk_reset()
1196 ret = bcm_vk_reset_successful(vk); in bcm_vk_reset()
1200 clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload); in bcm_vk_reset()
1207 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_mmap() local
1213 pg_size = ((pci_resource_len(vk->pdev, VK_MMAPABLE_BAR) - 1) in bcm_vk_mmap()
1218 vma->vm_pgoff += (pci_resource_start(vk->pdev, VK_MMAPABLE_BAR) in bcm_vk_mmap()
1231 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_ioctl() local
1234 dev_dbg(&vk->pdev->dev, in bcm_vk_ioctl()
1238 mutex_lock(&vk->mutex); in bcm_vk_ioctl()
1242 ret = bcm_vk_load_image(vk, argp); in bcm_vk_ioctl()
1246 ret = bcm_vk_reset(vk, argp); in bcm_vk_ioctl()
1253 mutex_unlock(&vk->mutex); in bcm_vk_ioctl()
1272 struct bcm_vk *vk = container_of(nb, struct bcm_vk, panic_nb); in bcm_vk_on_panic() local
1274 bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD); in bcm_vk_on_panic()
1286 struct bcm_vk *vk; in bcm_vk_probe() local
1292 vk = kzalloc(sizeof(*vk), GFP_KERNEL); in bcm_vk_probe()
1293 if (!vk) in bcm_vk_probe()
1296 kref_init(&vk->kref); in bcm_vk_probe()
1302 vk->ib_sgl_size = nr_ib_sgl_blk * VK_MSGQ_BLK_SIZE; in bcm_vk_probe()
1303 mutex_init(&vk->mutex); in bcm_vk_probe()
1310 vk->pdev = pci_dev_get(pdev); in bcm_vk_probe()
1328 vk->tdma_vaddr = dma_alloc_coherent in bcm_vk_probe()
1331 &vk->tdma_addr, GFP_KERNEL); in bcm_vk_probe()
1332 if (!vk->tdma_vaddr) { in bcm_vk_probe()
1339 pci_set_drvdata(pdev, vk); in bcm_vk_probe()
1359 vk->bar[i] = pci_ioremap_bar(pdev, i * 2); in bcm_vk_probe()
1360 if (!vk->bar[i]) { in bcm_vk_probe()
1367 for (vk->num_irqs = 0; in bcm_vk_probe()
1368 vk->num_irqs < VK_MSIX_MSGQ_MAX; in bcm_vk_probe()
1369 vk->num_irqs++) { in bcm_vk_probe()
1370 err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs), in bcm_vk_probe()
1372 IRQF_SHARED, DRV_MODULE_NAME, vk); in bcm_vk_probe()
1375 pdev->irq + vk->num_irqs, vk->num_irqs + 1); in bcm_vk_probe()
1380 err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs), in bcm_vk_probe()
1382 IRQF_SHARED, DRV_MODULE_NAME, vk); in bcm_vk_probe()
1385 pdev->irq + vk->num_irqs, vk->num_irqs + 1); in bcm_vk_probe()
1388 vk->num_irqs++; in bcm_vk_probe()
1391 (i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq); in bcm_vk_probe()
1392 i++, vk->num_irqs++) { in bcm_vk_probe()
1393 err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs), in bcm_vk_probe()
1395 IRQF_SHARED, DRV_MODULE_NAME, vk); in bcm_vk_probe()
1398 pdev->irq + vk->num_irqs, vk->num_irqs + 1); in bcm_vk_probe()
1401 bcm_vk_tty_set_irq_enabled(vk, i); in bcm_vk_probe()
1411 vk->devid = id; in bcm_vk_probe()
1413 misc_device = &vk->miscdev; in bcm_vk_probe()
1428 INIT_WORK(&vk->wq_work, bcm_vk_wq_handler); in bcm_vk_probe()
1431 vk->wq_thread = create_singlethread_workqueue(name); in bcm_vk_probe()
1432 if (!vk->wq_thread) { in bcm_vk_probe()
1438 err = bcm_vk_msg_init(vk); in bcm_vk_probe()
1445 bcm_vk_sync_card_info(vk); in bcm_vk_probe()
1448 vk->panic_nb.notifier_call = bcm_vk_on_panic; in bcm_vk_probe()
1450 &vk->panic_nb); in bcm_vk_probe()
1457 err = bcm_vk_tty_init(vk, name); in bcm_vk_probe()
1465 boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); in bcm_vk_probe()
1468 err = bcm_vk_trigger_autoload(vk); in bcm_vk_probe()
1479 bcm_vk_hb_init(vk); in bcm_vk_probe()
1486 bcm_vk_tty_exit(vk); in bcm_vk_probe()
1490 &vk->panic_nb); in bcm_vk_probe()
1493 destroy_workqueue(vk->wq_thread); in bcm_vk_probe()
1506 for (i = 0; i < vk->num_irqs; i++) in bcm_vk_probe()
1507 devm_free_irq(dev, pci_irq_vector(pdev, i), vk); in bcm_vk_probe()
1514 if (vk->bar[i]) in bcm_vk_probe()
1515 pci_iounmap(pdev, vk->bar[i]); in bcm_vk_probe()
1520 if (vk->tdma_vaddr) in bcm_vk_probe()
1522 vk->tdma_vaddr, vk->tdma_addr); in bcm_vk_probe()
1529 kfree(vk); in bcm_vk_probe()
1536 struct bcm_vk *vk = container_of(kref, struct bcm_vk, kref); in bcm_vk_release_data() local
1537 struct pci_dev *pdev = vk->pdev; in bcm_vk_release_data()
1539 dev_dbg(&pdev->dev, "BCM-VK:%d release data 0x%p\n", vk->devid, vk); in bcm_vk_release_data()
1541 kfree(vk); in bcm_vk_release_data()
1547 struct bcm_vk *vk = pci_get_drvdata(pdev); in bcm_vk_remove() local
1548 struct miscdevice *misc_device = &vk->miscdev; in bcm_vk_remove()
1550 bcm_vk_hb_deinit(vk); in bcm_vk_remove()
1558 bcm_vk_trigger_reset(vk); in bcm_vk_remove()
1563 &vk->panic_nb); in bcm_vk_remove()
1565 bcm_vk_msg_remove(vk); in bcm_vk_remove()
1566 bcm_vk_tty_exit(vk); in bcm_vk_remove()
1568 if (vk->tdma_vaddr) in bcm_vk_remove()
1570 vk->tdma_vaddr, vk->tdma_addr); in bcm_vk_remove()
1576 ida_free(&bcm_vk_ida, vk->devid); in bcm_vk_remove()
1578 for (i = 0; i < vk->num_irqs; i++) in bcm_vk_remove()
1579 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk); in bcm_vk_remove()
1584 cancel_work_sync(&vk->wq_work); in bcm_vk_remove()
1585 destroy_workqueue(vk->wq_thread); in bcm_vk_remove()
1586 bcm_vk_tty_wq_exit(vk); in bcm_vk_remove()
1589 if (vk->bar[i]) in bcm_vk_remove()
1590 pci_iounmap(pdev, vk->bar[i]); in bcm_vk_remove()
1593 dev_dbg(&pdev->dev, "BCM-VK:%d released\n", vk->devid); in bcm_vk_remove()
1599 kref_put(&vk->kref, bcm_vk_release_data); in bcm_vk_remove()
1604 struct bcm_vk *vk = pci_get_drvdata(pdev); in bcm_vk_shutdown() local
1607 reg = vkread32(vk, BAR_0, BAR_BOOT_STATUS); in bcm_vk_shutdown()
1612 bcm_vk_trigger_reset(vk); in bcm_vk_shutdown()
1629 vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS); in bcm_vk_shutdown()