Lines Matching refs:dd

61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)  in qib_disarm_piobufs()  argument
68 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
70 __clear_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs()
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs()
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded() local
103 spin_lock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) { in qib_disarm_piobufs_ifneeded()
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
110 spin_unlock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i) in is_sdma_buf() argument
119 for (pidx = 0; pidx < dd->num_pports; pidx++) { in is_sdma_buf()
120 ppd = dd->pport + pidx; in is_sdma_buf()
132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn) in find_ctxt() argument
138 spin_lock(&dd->uctxt_lock); in find_ctxt()
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt()
140 rcd = dd->rcd[ctxt]; in find_ctxt()
159 spin_unlock(&dd->uctxt_lock); in find_ctxt()
171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, in qib_disarm_piobufs_set() argument
178 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
188 ppd = is_sdma_buf(dd, i); in qib_disarm_piobufs_set()
197 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
198 if (test_bit(i, dd->pio_writing) || in qib_disarm_piobufs_set()
199 (!test_bit(i << 1, dd->pioavailkernel) && in qib_disarm_piobufs_set()
200 find_ctxt(dd, i))) { in qib_disarm_piobufs_set()
201 __set_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs_set()
203 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_set()
205 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
209 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
220 static void update_send_bufs(struct qib_devdata *dd) in update_send_bufs() argument
224 const unsigned piobregs = dd->pioavregs; in update_send_bufs()
244 if (!dd->pioavailregs_dma) in update_send_bufs()
246 spin_lock_irqsave(&dd->pioavail_lock, flags); in update_send_bufs()
250 piov = le64_to_cpu(dd->pioavailregs_dma[i]); in update_send_bufs()
251 pchg = dd->pioavailkernel[i] & in update_send_bufs()
252 ~(dd->pioavailshadow[i] ^ piov); in update_send_bufs()
254 if (pchg && (pchbusy & dd->pioavailshadow[i])) { in update_send_bufs()
255 pnew = dd->pioavailshadow[i] & ~pchbusy; in update_send_bufs()
257 dd->pioavailshadow[i] = pnew; in update_send_bufs()
260 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in update_send_bufs()
266 static noinline void no_send_bufs(struct qib_devdata *dd) in no_send_bufs() argument
268 dd->upd_pio_shadow = 1; in no_send_bufs()
281 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, in qib_getsendbuf_range() argument
287 unsigned long *shadow = dd->pioavailshadow; in qib_getsendbuf_range()
290 if (!(dd->flags & QIB_PRESENT)) in qib_getsendbuf_range()
294 if (dd->upd_pio_shadow) { in qib_getsendbuf_range()
301 update_send_bufs(dd); in qib_getsendbuf_range()
310 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
311 if (dd->last_pio >= first && dd->last_pio <= last) in qib_getsendbuf_range()
312 i = dd->last_pio + 1; in qib_getsendbuf_range()
315 nbufs = last - dd->min_kernel_pio + 1; in qib_getsendbuf_range()
318 i = !first ? dd->min_kernel_pio : first; in qib_getsendbuf_range()
324 __set_bit(i, dd->pio_writing); in qib_getsendbuf_range()
326 dd->last_pio = i; in qib_getsendbuf_range()
329 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
338 no_send_bufs(dd); in qib_getsendbuf_range()
341 if (i < dd->piobcnt2k) in qib_getsendbuf_range()
342 buf = (u32 __iomem *)(dd->pio2kbase + in qib_getsendbuf_range()
343 i * dd->palign); in qib_getsendbuf_range()
344 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) in qib_getsendbuf_range()
345 buf = (u32 __iomem *)(dd->pio4kbase + in qib_getsendbuf_range()
346 (i - dd->piobcnt2k) * dd->align4k); in qib_getsendbuf_range()
348 buf = (u32 __iomem *)(dd->piovl15base + in qib_getsendbuf_range()
349 (i - (dd->piobcnt2k + dd->piobcnt4k)) * in qib_getsendbuf_range()
350 dd->align4k); in qib_getsendbuf_range()
353 dd->upd_pio_shadow = 0; in qib_getsendbuf_range()
363 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n) in qib_sendbuf_done() argument
367 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_sendbuf_done()
368 __clear_bit(n, dd->pio_writing); in qib_sendbuf_done()
369 if (__test_and_clear_bit(n, dd->pio_need_disarm)) in qib_sendbuf_done()
370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); in qib_sendbuf_done()
371 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_sendbuf_done()
381 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start, in qib_chg_pioavailkernel() argument
392 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
414 dd->pioavailshadow); in qib_chg_pioavailkernel()
416 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_chg_pioavailkernel()
420 start, dd->pioavailshadow); in qib_chg_pioavailkernel()
423 + start, dd->pioavailshadow); in qib_chg_pioavailkernel()
424 __set_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
425 if ((start >> 1) < dd->min_kernel_pio) in qib_chg_pioavailkernel()
426 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
429 dd->pioavailshadow); in qib_chg_pioavailkernel()
430 __clear_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
431 if ((start >> 1) > dd->min_kernel_pio) in qib_chg_pioavailkernel()
432 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
437 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) in qib_chg_pioavailkernel()
438 dd->last_pio = dd->min_kernel_pio - 1; in qib_chg_pioavailkernel()
439 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
441 dd->f_txchk_change(dd, ostart, len, avail, rcd); in qib_chg_pioavailkernel()
455 struct qib_devdata *dd = ppd->dd; in qib_cancel_sends() local
470 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends()
471 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_cancel_sends()
472 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
488 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
489 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_cancel_sends()
491 __set_bit(i, dd->pio_need_disarm); in qib_cancel_sends()
492 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_cancel_sends()
494 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
497 if (!(dd->flags & QIB_HAS_SEND_DMA)) in qib_cancel_sends()
498 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | in qib_cancel_sends()
509 void qib_force_pio_avail_update(struct qib_devdata *dd) in qib_force_pio_avail_update() argument
511 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_force_pio_avail_update()
556 if (!(ppd->dd->flags & QIB_INITTED)) in qib_hol_event()