Lines Matching refs:vk
109 bool bcm_vk_drv_access_ok(struct bcm_vk *vk) in bcm_vk_drv_access_ok() argument
111 return (!!atomic_read(&vk->msgq_inited)); in bcm_vk_drv_access_ok()
114 void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask) in bcm_vk_set_host_alert() argument
116 struct bcm_vk_alert *alert = &vk->host_alert; in bcm_vk_set_host_alert()
120 spin_lock_irqsave(&vk->host_alert_lock, flags); in bcm_vk_set_host_alert()
122 spin_unlock_irqrestore(&vk->host_alert_lock, flags); in bcm_vk_set_host_alert()
124 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0) in bcm_vk_set_host_alert()
125 queue_work(vk->wq_thread, &vk->wq_work); in bcm_vk_set_host_alert()
145 struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl); in bcm_vk_hb_poll() local
147 if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) { in bcm_vk_hb_poll()
149 uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME); in bcm_vk_hb_poll()
156 dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n", in bcm_vk_hb_poll()
172 dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n", in bcm_vk_hb_poll()
176 bcm_vk_blk_drv_access(vk); in bcm_vk_hb_poll()
177 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL); in bcm_vk_hb_poll()
183 void bcm_vk_hb_init(struct bcm_vk *vk) in bcm_vk_hb_init() argument
185 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; in bcm_vk_hb_init()
191 void bcm_vk_hb_deinit(struct bcm_vk *vk) in bcm_vk_hb_deinit() argument
193 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl; in bcm_vk_hb_deinit()
198 static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk, in bcm_vk_msgid_bitmap_clear() argument
202 spin_lock(&vk->msg_id_lock); in bcm_vk_msgid_bitmap_clear()
203 bitmap_clear(vk->bmap, start, nbits); in bcm_vk_msgid_bitmap_clear()
204 spin_unlock(&vk->msg_id_lock); in bcm_vk_msgid_bitmap_clear()
210 static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid) in bcm_vk_get_ctx() argument
216 spin_lock(&vk->ctx_lock); in bcm_vk_get_ctx()
219 if (vk->reset_pid) { in bcm_vk_get_ctx()
220 dev_err(&vk->pdev->dev, in bcm_vk_get_ctx()
222 vk->reset_pid); in bcm_vk_get_ctx()
227 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { in bcm_vk_get_ctx()
228 if (!vk->ctx[i].in_use) { in bcm_vk_get_ctx()
229 vk->ctx[i].in_use = true; in bcm_vk_get_ctx()
230 ctx = &vk->ctx[i]; in bcm_vk_get_ctx()
236 dev_err(&vk->pdev->dev, "All context in use\n"); in bcm_vk_get_ctx()
244 list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head); in bcm_vk_get_ctx()
247 kref_get(&vk->kref); in bcm_vk_get_ctx()
256 spin_unlock(&vk->ctx_lock); in bcm_vk_get_ctx()
261 static u16 bcm_vk_get_msg_id(struct bcm_vk *vk) in bcm_vk_get_msg_id() argument
266 spin_lock(&vk->msg_id_lock); in bcm_vk_get_msg_id()
274 vk->msg_id++; in bcm_vk_get_msg_id()
275 if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE) in bcm_vk_get_msg_id()
276 vk->msg_id = 1; in bcm_vk_get_msg_id()
278 if (test_bit(vk->msg_id, vk->bmap)) { in bcm_vk_get_msg_id()
282 rc = vk->msg_id; in bcm_vk_get_msg_id()
283 bitmap_set(vk->bmap, vk->msg_id, 1); in bcm_vk_get_msg_id()
286 spin_unlock(&vk->msg_id_lock); in bcm_vk_get_msg_id()
291 static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx) in bcm_vk_free_ctx() argument
300 dev_err(&vk->pdev->dev, "NULL context detected\n"); in bcm_vk_free_ctx()
306 spin_lock(&vk->ctx_lock); in bcm_vk_free_ctx()
308 if (!vk->ctx[idx].in_use) { in bcm_vk_free_ctx()
309 dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx); in bcm_vk_free_ctx()
311 vk->ctx[idx].in_use = false; in bcm_vk_free_ctx()
312 vk->ctx[idx].miscdev = NULL; in bcm_vk_free_ctx()
317 list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) { in bcm_vk_free_ctx()
323 spin_unlock(&vk->ctx_lock); in bcm_vk_free_ctx()
346 struct bcm_vk *vk; in bcm_vk_drain_all_pend() local
350 vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_drain_all_pend()
377 bit_set = test_bit(msg_id, vk->bmap); in bcm_vk_drain_all_pend()
390 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1); in bcm_vk_drain_all_pend()
399 void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk) in bcm_vk_drain_msg_on_reset() argument
401 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL); in bcm_vk_drain_msg_on_reset()
402 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL); in bcm_vk_drain_msg_on_reset()
408 int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync) in bcm_vk_sync_msgq() argument
411 struct device *dev = &vk->pdev->dev; in bcm_vk_sync_msgq()
414 struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan, in bcm_vk_sync_msgq()
415 &vk->to_h_msg_chan}; in bcm_vk_sync_msgq()
426 if (!bcm_vk_msgq_marker_valid(vk)) { in bcm_vk_sync_msgq()
431 msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF); in bcm_vk_sync_msgq()
434 num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2; in bcm_vk_sync_msgq()
442 vk->to_v_msg_chan.q_nr = num_q; in bcm_vk_sync_msgq()
443 vk->to_h_msg_chan.q_nr = num_q; in bcm_vk_sync_msgq()
446 msgq = vk->bar[BAR_1] + msgq_off; in bcm_vk_sync_msgq()
452 if (bcm_vk_drv_access_ok(vk) && !force_sync) { in bcm_vk_sync_msgq()
494 qinfo->q_start = vk->bar[BAR_1] + msgq_start; in bcm_vk_sync_msgq()
504 atomic_set(&vk->msgq_inited, 1); in bcm_vk_sync_msgq()
536 static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk, in bcm_vk_append_ib_sgl() argument
543 struct device *dev = &vk->pdev->dev; in bcm_vk_append_ib_sgl()
544 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; in bcm_vk_append_ib_sgl()
566 (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) { in bcm_vk_append_ib_sgl()
575 item_cnt, ib_sgl_size, vk->ib_sgl_size); in bcm_vk_append_ib_sgl()
584 void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val) in bcm_to_v_q_doorbell() argument
586 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; in bcm_to_v_q_doorbell()
589 vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset); in bcm_to_v_q_doorbell()
592 static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry) in bcm_to_v_msg_enqueue() argument
595 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan; in bcm_to_v_msg_enqueue()
596 struct device *dev = &vk->pdev->dev; in bcm_to_v_msg_enqueue()
647 bcm_vk_blk_drv_access(vk); in bcm_to_v_msg_enqueue()
648 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN); in bcm_to_v_msg_enqueue()
679 bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1); in bcm_to_v_msg_enqueue()
685 int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type, in bcm_vk_send_shutdown_msg() argument
690 struct device *dev = &vk->pdev->dev; in bcm_vk_send_shutdown_msg()
697 if (!bcm_vk_msgq_marker_valid(vk)) { in bcm_vk_send_shutdown_msg()
699 vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY)); in bcm_vk_send_shutdown_msg()
716 rc = bcm_to_v_msg_enqueue(vk, entry); in bcm_vk_send_shutdown_msg()
727 static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid, in bcm_vk_handle_last_sess() argument
731 struct device *dev = &vk->pdev->dev; in bcm_vk_handle_last_sess()
737 if (!bcm_vk_drv_access_ok(vk)) { in bcm_vk_handle_last_sess()
738 if (vk->reset_pid == pid) in bcm_vk_handle_last_sess()
739 vk->reset_pid = 0; in bcm_vk_handle_last_sess()
746 if (vk->reset_pid != pid) in bcm_vk_handle_last_sess()
747 rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num); in bcm_vk_handle_last_sess()
750 vk->reset_pid = 0; in bcm_vk_handle_last_sess()
755 static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk, in bcm_vk_dequeue_pending() argument
767 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1); in bcm_vk_dequeue_pending()
775 s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk) in bcm_to_h_msg_dequeue() argument
777 struct device *dev = &vk->pdev->dev; in bcm_to_h_msg_dequeue()
778 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; in bcm_to_h_msg_dequeue()
830 bcm_vk_blk_drv_access(vk); in bcm_to_h_msg_dequeue()
831 bcm_vk_set_host_alert(vk, in bcm_to_h_msg_dequeue()
886 entry = bcm_vk_dequeue_pending(vk, in bcm_to_h_msg_dequeue()
887 &vk->to_v_msg_chan, in bcm_to_h_msg_dequeue()
898 bcm_vk_append_pendq(&vk->to_h_msg_chan, in bcm_to_h_msg_dequeue()
906 test_bit(msg_id, vk->bmap)); in bcm_to_h_msg_dequeue()
934 static int bcm_vk_data_init(struct bcm_vk *vk) in bcm_vk_data_init() argument
938 spin_lock_init(&vk->ctx_lock); in bcm_vk_data_init()
939 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) { in bcm_vk_data_init()
940 vk->ctx[i].in_use = false; in bcm_vk_data_init()
941 vk->ctx[i].idx = i; /* self identity */ in bcm_vk_data_init()
942 vk->ctx[i].miscdev = NULL; in bcm_vk_data_init()
944 spin_lock_init(&vk->msg_id_lock); in bcm_vk_data_init()
945 spin_lock_init(&vk->host_alert_lock); in bcm_vk_data_init()
946 vk->msg_id = 0; in bcm_vk_data_init()
950 INIT_LIST_HEAD(&vk->pid_ht[i].head); in bcm_vk_data_init()
957 struct bcm_vk *vk = dev_id; in bcm_vk_msgq_irqhandler() local
959 if (!bcm_vk_drv_access_ok(vk)) { in bcm_vk_msgq_irqhandler()
960 dev_err(&vk->pdev->dev, in bcm_vk_msgq_irqhandler()
965 queue_work(vk->wq_thread, &vk->wq_work); in bcm_vk_msgq_irqhandler()
975 struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev); in bcm_vk_open() local
976 struct device *dev = &vk->pdev->dev; in bcm_vk_open()
980 ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current)); in bcm_vk_open()
1008 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, in bcm_vk_read() local
1010 struct device *dev = &vk->pdev->dev; in bcm_vk_read()
1011 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan; in bcm_vk_read()
1016 if (!bcm_vk_drv_access_ok(vk)) in bcm_vk_read()
1078 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, in bcm_vk_write() local
1081 struct device *dev = &vk->pdev->dev; in bcm_vk_write()
1088 if (!bcm_vk_drv_access_ok(vk)) in bcm_vk_write()
1102 entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size, in bcm_vk_write()
1120 msgq = vk->to_v_msg_chan.msgq[q_num]; in bcm_vk_write()
1122 if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT) in bcm_vk_write()
1132 rc = bcm_vk_get_msg_id(vk); in bcm_vk_write()
1156 if (vk->reset_pid) { in bcm_vk_write()
1189 sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data, in bcm_vk_write()
1224 bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry); in bcm_vk_write()
1226 rc = bcm_to_v_msg_enqueue(vk, entry); in bcm_vk_write()
1232 (vk, in bcm_vk_write()
1233 &vk->to_v_msg_chan, in bcm_vk_write()
1242 bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1); in bcm_vk_write()
1254 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_poll() local
1255 struct device *dev = &vk->pdev->dev; in bcm_vk_poll()
1275 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev); in bcm_vk_release() local
1276 struct device *dev = &vk->pdev->dev; in bcm_vk_release()
1304 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx); in bcm_vk_release()
1305 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx); in bcm_vk_release()
1307 ret = bcm_vk_free_ctx(vk, ctx); in bcm_vk_release()
1309 ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num); in bcm_vk_release()
1313 kref_put(&vk->kref, bcm_vk_release_data); in bcm_vk_release()
1318 int bcm_vk_msg_init(struct bcm_vk *vk) in bcm_vk_msg_init() argument
1320 struct device *dev = &vk->pdev->dev; in bcm_vk_msg_init()
1323 if (bcm_vk_data_init(vk)) { in bcm_vk_msg_init()
1328 if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) || in bcm_vk_msg_init()
1329 bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) { in bcm_vk_msg_init()
1335 ret = bcm_vk_sync_msgq(vk, false); in bcm_vk_msg_init()
1344 void bcm_vk_msg_remove(struct bcm_vk *vk) in bcm_vk_msg_remove() argument
1346 bcm_vk_blk_drv_access(vk); in bcm_vk_msg_remove()
1349 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL); in bcm_vk_msg_remove()
1350 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL); in bcm_vk_msg_remove()