Lines Matching refs:rvu

25 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid)		\
30 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
43 static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) in cgxlmac_to_pfmap() argument
45 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; in cgxlmac_to_pfmap()
53 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) in rvu_cgx_pdata() argument
55 if (cgx_id >= rvu->cgx_cnt_max) in rvu_cgx_pdata()
58 return rvu->cgx_idmap[cgx_id]; in rvu_cgx_pdata()
61 static int rvu_map_cgx_lmac_pf(struct rvu *rvu) in rvu_map_cgx_lmac_pf() argument
63 struct npc_pkind *pkind = &rvu->hw->pkind; in rvu_map_cgx_lmac_pf()
64 int cgx_cnt_max = rvu->cgx_cnt_max; in rvu_map_cgx_lmac_pf()
80 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); in rvu_map_cgx_lmac_pf()
81 if (!rvu->pf2cgxlmac_map) in rvu_map_cgx_lmac_pf()
85 memset(rvu->pf2cgxlmac_map, 0xFF, size); in rvu_map_cgx_lmac_pf()
88 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, in rvu_map_cgx_lmac_pf()
91 if (!rvu->cgxlmac2pf_map) in rvu_map_cgx_lmac_pf()
94 rvu->cgx_mapped_pfs = 0; in rvu_map_cgx_lmac_pf()
96 if (!rvu_cgx_pdata(cgx, rvu)) in rvu_map_cgx_lmac_pf()
98 lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); in rvu_map_cgx_lmac_pf()
100 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); in rvu_map_cgx_lmac_pf()
101 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; in rvu_map_cgx_lmac_pf()
104 rvu->cgx_mapped_pfs++; in rvu_map_cgx_lmac_pf()
110 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) in rvu_cgx_send_link_info() argument
121 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); in rvu_cgx_send_link_info()
122 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, in rvu_cgx_send_link_info()
128 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); in rvu_cgx_send_link_info()
130 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); in rvu_cgx_send_link_info()
133 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); in rvu_cgx_send_link_info()
142 struct rvu *rvu = data; in cgx_lmac_postevent() local
149 spin_lock(&rvu->cgx_evq_lock); in cgx_lmac_postevent()
150 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); in cgx_lmac_postevent()
151 spin_unlock(&rvu->cgx_evq_lock); in cgx_lmac_postevent()
154 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); in cgx_lmac_postevent()
159 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) in cgx_notify_pfs() argument
167 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); in cgx_notify_pfs()
174 if (!test_bit(pfid, &rvu->pf_notify_bmap)) { in cgx_notify_pfs()
175 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", in cgx_notify_pfs()
182 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); in cgx_notify_pfs()
186 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); in cgx_notify_pfs()
187 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); in cgx_notify_pfs()
189 dev_warn(rvu->dev, "notification to pf %d failed\n", in cgx_notify_pfs()
196 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); in cgx_evhandler_task() local
203 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); in cgx_evhandler_task()
204 qentry = list_first_entry_or_null(&rvu->cgx_evq_head, in cgx_evhandler_task()
209 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); in cgx_evhandler_task()
216 cgx_notify_pfs(event, rvu); in cgx_evhandler_task()
221 static int cgx_lmac_event_handler_init(struct rvu *rvu) in cgx_lmac_event_handler_init() argument
227 spin_lock_init(&rvu->cgx_evq_lock); in cgx_lmac_event_handler_init()
228 INIT_LIST_HEAD(&rvu->cgx_evq_head); in cgx_lmac_event_handler_init()
229 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); in cgx_lmac_event_handler_init()
230 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); in cgx_lmac_event_handler_init()
231 if (!rvu->cgx_evh_wq) { in cgx_lmac_event_handler_init()
232 dev_err(rvu->dev, "alloc workqueue failed"); in cgx_lmac_event_handler_init()
237 cb.data = rvu; in cgx_lmac_event_handler_init()
239 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { in cgx_lmac_event_handler_init()
240 cgxd = rvu_cgx_pdata(cgx, rvu); in cgx_lmac_event_handler_init()
246 dev_err(rvu->dev, in cgx_lmac_event_handler_init()
255 static void rvu_cgx_wq_destroy(struct rvu *rvu) in rvu_cgx_wq_destroy() argument
257 if (rvu->cgx_evh_wq) { in rvu_cgx_wq_destroy()
258 flush_workqueue(rvu->cgx_evh_wq); in rvu_cgx_wq_destroy()
259 destroy_workqueue(rvu->cgx_evh_wq); in rvu_cgx_wq_destroy()
260 rvu->cgx_evh_wq = NULL; in rvu_cgx_wq_destroy()
264 int rvu_cgx_init(struct rvu *rvu) in rvu_cgx_init() argument
272 rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); in rvu_cgx_init()
273 if (!rvu->cgx_cnt_max) { in rvu_cgx_init()
274 dev_info(rvu->dev, "No CGX devices found!\n"); in rvu_cgx_init()
278 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * in rvu_cgx_init()
280 if (!rvu->cgx_idmap) in rvu_cgx_init()
284 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) in rvu_cgx_init()
285 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); in rvu_cgx_init()
288 err = rvu_map_cgx_lmac_pf(rvu); in rvu_cgx_init()
293 err = cgx_lmac_event_handler_init(rvu); in rvu_cgx_init()
303 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { in rvu_cgx_init()
304 cgxd = rvu_cgx_pdata(cgx, rvu); in rvu_cgx_init()
309 dev_err(rvu->dev, in rvu_cgx_init()
317 int rvu_cgx_exit(struct rvu *rvu) in rvu_cgx_exit() argument
322 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { in rvu_cgx_exit()
323 cgxd = rvu_cgx_pdata(cgx, rvu); in rvu_cgx_exit()
333 rvu_cgx_wq_destroy(rvu); in rvu_cgx_exit()
337 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) in rvu_cgx_config_rxtx() argument
345 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) in rvu_cgx_config_rxtx()
348 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_config_rxtx()
350 cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start); in rvu_cgx_config_rxtx()
355 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_start_rxtx() argument
358 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_start_rxtx()
362 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_stop_rxtx() argument
365 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_stop_rxtx()
369 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_stats() argument
379 !is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_stats()
382 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); in rvu_mbox_handler_cgx_stats()
383 cgxd = rvu_cgx_pdata(cgx_idx, rvu); in rvu_mbox_handler_cgx_stats()
406 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_set() argument
413 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_set()
420 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_get() argument
429 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_get()
439 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_promisc_enable() argument
450 !is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_promisc_enable()
453 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_promisc_enable()
459 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_promisc_disable() argument
470 !is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_promisc_disable()
473 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_promisc_disable()
479 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) in rvu_cgx_config_linkevents() argument
487 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) in rvu_cgx_config_linkevents()
490 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_config_linkevents()
493 set_bit(pf, &rvu->pf_notify_bmap); in rvu_cgx_config_linkevents()
495 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); in rvu_cgx_config_linkevents()
497 clear_bit(pf, &rvu->pf_notify_bmap); in rvu_cgx_config_linkevents()
503 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_start_linkevents() argument
506 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_start_linkevents()
510 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_stop_linkevents() argument
513 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_stop_linkevents()
517 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_get_linkinfo() argument
525 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_get_linkinfo()
528 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_get_linkinfo()
530 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, in rvu_mbox_handler_cgx_get_linkinfo()
535 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) in rvu_cgx_config_intlbk() argument
543 if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, pf)) in rvu_cgx_config_intlbk()
546 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_config_intlbk()
548 return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu), in rvu_cgx_config_intlbk()
552 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_intlbk_enable() argument
555 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_intlbk_enable()
559 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_intlbk_disable() argument
562 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_intlbk_disable()