Lines Matching +full:first +full:- +full:generation
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
53 * qib_disarm_piobufs - cancel a range of PIO buffers
55 * @first: the first PIO buffer to cancel
61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt) in qib_disarm_piobufs() argument
67 last = first + cnt; in qib_disarm_piobufs()
68 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
69 for (i = first; i < last; i++) { in qib_disarm_piobufs()
70 __clear_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs()
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs()
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs()
82 struct qib_devdata *dd = rcd->dd; in qib_disarm_piobufs_ifneeded()
87 last = rcd->pio_base + rcd->piocnt; in qib_disarm_piobufs_ifneeded()
93 if (rcd->user_event_mask) { in qib_disarm_piobufs_ifneeded()
96 * separately, first, then remaining subctxt, if any in qib_disarm_piobufs_ifneeded()
98 clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]); in qib_disarm_piobufs_ifneeded()
99 for (i = 1; i < rcd->subctxt_cnt; i++) in qib_disarm_piobufs_ifneeded()
101 &rcd->user_event_mask[i]); in qib_disarm_piobufs_ifneeded()
103 spin_lock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
104 for (i = rcd->pio_base; i < last; i++) { in qib_disarm_piobufs_ifneeded()
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) { in qib_disarm_piobufs_ifneeded()
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_ifneeded()
110 spin_unlock_irq(&dd->pioavail_lock); in qib_disarm_piobufs_ifneeded()
119 for (pidx = 0; pidx < dd->num_pports; pidx++) { in is_sdma_buf()
120 ppd = dd->pport + pidx; in is_sdma_buf()
121 if (i >= ppd->sdma_state.first_sendbuf && in is_sdma_buf()
122 i < ppd->sdma_state.last_sendbuf) in is_sdma_buf()
138 spin_lock(&dd->uctxt_lock); in find_ctxt()
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in find_ctxt()
140 rcd = dd->rcd[ctxt]; in find_ctxt()
141 if (!rcd || bufn < rcd->pio_base || in find_ctxt()
142 bufn >= rcd->pio_base + rcd->piocnt) in find_ctxt()
144 if (rcd->user_event_mask) { in find_ctxt()
148 * separately, first, then remaining subctxt, if any in find_ctxt()
151 &rcd->user_event_mask[0]); in find_ctxt()
152 for (i = 1; i < rcd->subctxt_cnt; i++) in find_ctxt()
154 &rcd->user_event_mask[i]); in find_ctxt()
159 spin_unlock(&dd->uctxt_lock); in find_ctxt()
178 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
190 pppd[ppd->port] = ppd; in qib_disarm_piobufs_set()
197 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
198 if (test_bit(i, dd->pio_writing) || in qib_disarm_piobufs_set()
199 (!test_bit(i << 1, dd->pioavailkernel) && in qib_disarm_piobufs_set()
201 __set_bit(i, dd->pio_need_disarm); in qib_disarm_piobufs_set()
203 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i)); in qib_disarm_piobufs_set()
205 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_disarm_piobufs_set()
209 for (i = 0; i < dd->num_pports; i++) in qib_disarm_piobufs_set()
215 * update_send_bufs - update shadow copy of the PIO availability map
224 const unsigned piobregs = dd->pioavregs; in update_send_bufs()
227 * If the generation (check) bits have changed, then we update the in update_send_bufs()
244 if (!dd->pioavailregs_dma) in update_send_bufs()
246 spin_lock_irqsave(&dd->pioavail_lock, flags); in update_send_bufs()
250 piov = le64_to_cpu(dd->pioavailregs_dma[i]); in update_send_bufs()
251 pchg = dd->pioavailkernel[i] & in update_send_bufs()
252 ~(dd->pioavailshadow[i] ^ piov); in update_send_bufs()
254 if (pchg && (pchbusy & dd->pioavailshadow[i])) { in update_send_bufs()
255 pnew = dd->pioavailshadow[i] & ~pchbusy; in update_send_bufs()
257 dd->pioavailshadow[i] = pnew; in update_send_bufs()
260 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in update_send_bufs()
268 dd->upd_pio_shadow = 1; in no_send_bufs()
282 u32 first, u32 last) in qib_getsendbuf_range() argument
287 unsigned long *shadow = dd->pioavailshadow; in qib_getsendbuf_range()
290 if (!(dd->flags & QIB_PRESENT)) in qib_getsendbuf_range()
293 nbufs = last - first + 1; /* number in range to check */ in qib_getsendbuf_range()
294 if (dd->upd_pio_shadow) { in qib_getsendbuf_range()
304 i = first; in qib_getsendbuf_range()
310 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
311 if (dd->last_pio >= first && dd->last_pio <= last) in qib_getsendbuf_range()
312 i = dd->last_pio + 1; in qib_getsendbuf_range()
313 if (!first) in qib_getsendbuf_range()
315 nbufs = last - dd->min_kernel_pio + 1; in qib_getsendbuf_range()
318 i = !first ? dd->min_kernel_pio : first; in qib_getsendbuf_range()
321 /* flip generation bit */ in qib_getsendbuf_range()
324 __set_bit(i, dd->pio_writing); in qib_getsendbuf_range()
325 if (!first && first != last) /* first == last on VL15, avoid */ in qib_getsendbuf_range()
326 dd->last_pio = i; in qib_getsendbuf_range()
329 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_getsendbuf_range()
334 * First time through; shadow exhausted, but may be in qib_getsendbuf_range()
341 if (i < dd->piobcnt2k) in qib_getsendbuf_range()
342 buf = (u32 __iomem *)(dd->pio2kbase + in qib_getsendbuf_range()
343 i * dd->palign); in qib_getsendbuf_range()
344 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base) in qib_getsendbuf_range()
345 buf = (u32 __iomem *)(dd->pio4kbase + in qib_getsendbuf_range()
346 (i - dd->piobcnt2k) * dd->align4k); in qib_getsendbuf_range()
348 buf = (u32 __iomem *)(dd->piovl15base + in qib_getsendbuf_range()
349 (i - (dd->piobcnt2k + dd->piobcnt4k)) * in qib_getsendbuf_range()
350 dd->align4k); in qib_getsendbuf_range()
353 dd->upd_pio_shadow = 0; in qib_getsendbuf_range()
367 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_sendbuf_done()
368 __clear_bit(n, dd->pio_writing); in qib_sendbuf_done()
369 if (__test_and_clear_bit(n, dd->pio_need_disarm)) in qib_sendbuf_done()
370 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n)); in qib_sendbuf_done()
371 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_sendbuf_done()
375 * qib_chg_pioavailkernel - change which send buffers are available for kernel
389 /* There are two bits per send buffer (busy and generation) */ in qib_chg_pioavailkernel()
393 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
403 * kernel. We do have to make sure the generation in qib_chg_pioavailkernel()
407 * dma array because it is always little-endian, so in qib_chg_pioavailkernel()
408 * we have to flip to host-order first. in qib_chg_pioavailkernel()
415 dd->pioavailshadow); in qib_chg_pioavailkernel()
417 le64_to_cpu(dd->pioavailregs_dma[i]); in qib_chg_pioavailkernel()
421 start, dd->pioavailshadow); in qib_chg_pioavailkernel()
424 + start, dd->pioavailshadow); in qib_chg_pioavailkernel()
425 __set_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
426 if ((start >> 1) < dd->min_kernel_pio) in qib_chg_pioavailkernel()
427 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
430 dd->pioavailshadow); in qib_chg_pioavailkernel()
431 __clear_bit(start, dd->pioavailkernel); in qib_chg_pioavailkernel()
432 if ((start >> 1) > dd->min_kernel_pio) in qib_chg_pioavailkernel()
433 dd->min_kernel_pio = start >> 1; in qib_chg_pioavailkernel()
438 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1) in qib_chg_pioavailkernel()
439 dd->last_pio = dd->min_kernel_pio - 1; in qib_chg_pioavailkernel()
440 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_chg_pioavailkernel()
442 dd->f_txchk_change(dd, ostart, len, avail, rcd); in qib_chg_pioavailkernel()
456 struct qib_devdata *dd = ppd->dd; in qib_cancel_sends()
471 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { in qib_cancel_sends()
472 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_cancel_sends()
473 rcd = dd->rcd[ctxt]; in qib_cancel_sends()
474 if (rcd && rcd->ppd == ppd) { in qib_cancel_sends()
475 last = rcd->pio_base + rcd->piocnt; in qib_cancel_sends()
476 if (rcd->user_event_mask) { in qib_cancel_sends()
479 * separately, first, then remaining subctxt, in qib_cancel_sends()
483 &rcd->user_event_mask[0]); in qib_cancel_sends()
484 for (i = 1; i < rcd->subctxt_cnt; i++) in qib_cancel_sends()
486 &rcd->user_event_mask[i]); in qib_cancel_sends()
488 i = rcd->pio_base; in qib_cancel_sends()
489 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
490 spin_lock_irqsave(&dd->pioavail_lock, flags); in qib_cancel_sends()
492 __set_bit(i, dd->pio_need_disarm); in qib_cancel_sends()
493 spin_unlock_irqrestore(&dd->pioavail_lock, flags); in qib_cancel_sends()
495 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_cancel_sends()
498 if (!(dd->flags & QIB_HAS_SEND_DMA)) in qib_cancel_sends()
499 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL | in qib_cancel_sends()
504 * Force an update of in-memory copy of the pioavail registers, when
508 * This is a per-device operation, so just the first port.
512 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_force_pio_avail_update()
521 if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) in qib_hol_down()
532 if (ppd->hol_state != QIB_HOL_INIT) { in qib_hol_init()
533 ppd->hol_state = QIB_HOL_INIT; in qib_hol_init()
534 mod_timer(&ppd->hol_timer, in qib_hol_init()
546 ppd->hol_state = QIB_HOL_UP; in qib_hol_up()
557 if (!(ppd->dd->flags & QIB_INITTED)) in qib_hol_event()
560 if (ppd->hol_state != QIB_HOL_UP) { in qib_hol_event()
566 mod_timer(&ppd->hol_timer, in qib_hol_event()