Lines Matching +full:sci +full:- +full:reset
1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2015-2018 Intel Corporation.
18 * Set the CM reset bit and wait for it to clear. Use the provided
38 int flush = 0; /* re-read sendctrl to make sure it is flushed */ in pio_send_control()
41 spin_lock_irqsave(&dd->sendctrl_lock, flags); in pio_send_control()
50 for (i = 0; i < ARRAY_SIZE(dd->vld); i++) in pio_send_control()
51 if (!dd->vld[i].mtu) in pio_send_control()
86 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in pio_send_control()
93 #define SCS_POOL_0 -1
94 #define SCS_POOL_1 -2
97 #define SCC_PER_VL -1
98 #define SCC_PER_CPU -2
99 #define SCC_PER_KRCVQ -3
131 { 10000, -1 }, /* pool 0 */
132 { 0, -1 }, /* pool 1 */
138 * 100th of 1% of memory to use, -1 if blocks
148 * start at -1 and increase negatively. Map them as:
149 * -1 => 0
150 * -2 => 1
153 * Return -1 on non-wildcard input, otherwise convert to a pool number.
158 return -1; /* non-wildcard */ in wildcard_to_pool()
159 return -wc - 1; in wildcard_to_pool()
184 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; in init_sc_pools_and_sizes()
197 * setting it to a fixed size. The allocation allows 3-deep buffering in init_sc_pools_and_sizes()
212 * - copy the centipercents/absolute sizes from the pool config in init_sc_pools_and_sizes()
213 * - sanity check these values in init_sc_pools_and_sizes()
214 * - add up centipercents, then later check for full value in init_sc_pools_and_sizes()
215 * - add up absolute blocks, then later check for over-commit in init_sc_pools_and_sizes()
236 return -EINVAL; in init_sc_pools_and_sizes()
248 return -EINVAL; in init_sc_pools_and_sizes()
257 return -EINVAL; in init_sc_pools_and_sizes()
266 return -EINVAL; in init_sc_pools_and_sizes()
271 * - copy from the context size config in init_sc_pools_and_sizes()
272 * - replace context type wildcard counts with real values in init_sc_pools_and_sizes()
273 * - add up non-memory pool block sizes in init_sc_pools_and_sizes()
274 * - add up memory pool user counts in init_sc_pools_and_sizes()
289 count = dd->n_krcv_queues; in init_sc_pools_and_sizes()
293 count = dd->num_rcv_contexts - dd->n_krcv_queues; in init_sc_pools_and_sizes()
299 return -EINVAL; in init_sc_pools_and_sizes()
302 count = chip_send_contexts(dd) - total_contexts; in init_sc_pools_and_sizes()
308 * number or -1 if a fixed (non-negative) value. The fixed in init_sc_pools_and_sizes()
313 if (pool == -1) { /* non-wildcard */ in init_sc_pools_and_sizes()
322 return -EINVAL; in init_sc_pools_and_sizes()
325 dd->sc_sizes[i].count = count; in init_sc_pools_and_sizes()
326 dd->sc_sizes[i].size = size; in init_sc_pools_and_sizes()
333 return -EINVAL; in init_sc_pools_and_sizes()
337 pool_blocks = total_blocks - fixed_blocks; in init_sc_pools_and_sizes()
343 return -EINVAL; in init_sc_pools_and_sizes()
346 pool_blocks -= ab_total; in init_sc_pools_and_sizes()
352 if (pi->centipercent >= 0) in init_sc_pools_and_sizes()
353 pi->blocks = (pool_blocks * pi->centipercent) / 10000; in init_sc_pools_and_sizes()
355 if (pi->blocks == 0 && pi->count != 0) { in init_sc_pools_and_sizes()
359 i, pi->count); in init_sc_pools_and_sizes()
360 return -EINVAL; in init_sc_pools_and_sizes()
362 if (pi->count == 0) { in init_sc_pools_and_sizes()
364 if (pi->blocks != 0) in init_sc_pools_and_sizes()
368 i, pi->blocks); in init_sc_pools_and_sizes()
369 pi->size = 0; in init_sc_pools_and_sizes()
371 pi->size = pi->blocks / pi->count; in init_sc_pools_and_sizes()
378 if (dd->sc_sizes[i].size < 0) { in init_sc_pools_and_sizes()
379 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); in init_sc_pools_and_sizes()
382 dd->sc_sizes[i].size = mem_pool_info[pool].size; in init_sc_pools_and_sizes()
386 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) in init_sc_pools_and_sizes()
387 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; in init_sc_pools_and_sizes()
390 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; in init_sc_pools_and_sizes()
392 extra = total_blocks - used_blocks; in init_sc_pools_and_sizes()
408 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), in init_send_contexts()
410 dd->send_contexts = kcalloc(dd->num_send_contexts, in init_send_contexts()
413 if (!dd->send_contexts || !dd->hw_to_sw) { in init_send_contexts()
414 kfree(dd->hw_to_sw); in init_send_contexts()
415 kfree(dd->send_contexts); in init_send_contexts()
417 return -ENOMEM; in init_send_contexts()
422 dd->hw_to_sw[i] = INVALID_SCI; in init_send_contexts()
431 struct sc_config_sizes *scs = &dd->sc_sizes[i]; in init_send_contexts()
433 for (j = 0; j < scs->count; j++) { in init_send_contexts()
434 struct send_context_info *sci = in init_send_contexts() local
435 &dd->send_contexts[context]; in init_send_contexts()
436 sci->type = i; in init_send_contexts()
437 sci->base = base; in init_send_contexts()
438 sci->credits = scs->size; in init_send_contexts()
441 base += scs->size; in init_send_contexts()
451 * Must be called with dd->sc_lock held.
456 struct send_context_info *sci; in sc_hw_alloc() local
460 for (index = 0, sci = &dd->send_contexts[0]; in sc_hw_alloc()
461 index < dd->num_send_contexts; index++, sci++) { in sc_hw_alloc()
462 if (sci->type == type && sci->allocated == 0) { in sc_hw_alloc()
463 sci->allocated = 1; in sc_hw_alloc()
464 /* use a 1:1 mapping, but make them non-equal */ in sc_hw_alloc()
465 context = chip_send_contexts(dd) - index - 1; in sc_hw_alloc()
466 dd->hw_to_sw[context] = index; in sc_hw_alloc()
473 return -ENOSPC; in sc_hw_alloc()
479 * Must be called with dd->sc_lock held.
483 struct send_context_info *sci; in sc_hw_free() local
485 sci = &dd->send_contexts[sw_index]; in sc_hw_free()
486 if (!sci->allocated) { in sc_hw_free()
490 sci->allocated = 0; in sc_hw_free()
491 dd->hw_to_sw[hw_context] = INVALID_SCI; in sc_hw_free()
521 u32 gc = group_context(sc->hw_context, sc->group); in cr_group_addresses()
522 u32 index = sc->hw_context & 0x7; in cr_group_addresses()
524 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
526 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
561 if (sc->credits <= release_credits) in sc_mtu_to_threshold()
564 threshold = sc->credits - release_credits; in sc_mtu_to_threshold()
578 return (sc->credits * percent) / 100; in sc_percent_to_threshold()
590 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
592 old_threshold = (sc->credit_ctrl >> in sc_set_cr_threshold()
597 sc->credit_ctrl = in sc_set_cr_threshold()
598 (sc->credit_ctrl in sc_set_cr_threshold()
603 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
604 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_set_cr_threshold()
610 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_set_cr_threshold()
623 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity()
624 u32 hw_context = sc->hw_context; in set_pio_integrity()
625 int type = sc->type; in set_pio_integrity()
638 ret += *per_cpu_ptr(sc->buffers_allocated, cpu); in get_buffers_allocated()
647 (*per_cpu_ptr(sc->buffers_allocated, cpu)) = 0; in reset_buffers_allocated()
657 struct send_context_info *sci; in sc_alloc() local
669 if (dd->flags & HFI1_FROZEN) in sc_alloc()
676 sc->buffers_allocated = alloc_percpu(u32); in sc_alloc()
677 if (!sc->buffers_allocated) { in sc_alloc()
685 spin_lock_irqsave(&dd->sc_lock, flags); in sc_alloc()
688 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
689 free_percpu(sc->buffers_allocated); in sc_alloc()
694 sci = &dd->send_contexts[sw_index]; in sc_alloc()
695 sci->sc = sc; in sc_alloc()
697 sc->dd = dd; in sc_alloc()
698 sc->node = numa; in sc_alloc()
699 sc->type = type; in sc_alloc()
700 spin_lock_init(&sc->alloc_lock); in sc_alloc()
701 spin_lock_init(&sc->release_lock); in sc_alloc()
702 spin_lock_init(&sc->credit_ctrl_lock); in sc_alloc()
703 seqlock_init(&sc->waitlock); in sc_alloc()
704 INIT_LIST_HEAD(&sc->piowait); in sc_alloc()
705 INIT_WORK(&sc->halt_work, sc_halted); in sc_alloc()
706 init_waitqueue_head(&sc->halt_wait); in sc_alloc()
709 sc->group = 0; in sc_alloc()
711 sc->sw_index = sw_index; in sc_alloc()
712 sc->hw_context = hw_context; in sc_alloc()
714 sc->credits = sci->credits; in sc_alloc()
715 sc->size = sc->credits * PIO_BLOCK_SIZE; in sc_alloc()
720 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
724 reg = ((sci->credits & SC(CTRL_CTXT_DEPTH_MASK)) in sc_alloc()
726 | ((sci->base & SC(CTRL_CTXT_BASE_MASK)) in sc_alloc()
733 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); in sc_alloc()
764 * sanitized on driver start-up. in sc_alloc()
767 * work for both the 3-deep buffering allocation and the in sc_alloc()
787 /* set up write-through credit_ctrl */ in sc_alloc()
788 sc->credit_ctrl = reg; in sc_alloc()
797 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
812 sc->sr_size = sci->credits + 1; in sc_alloc()
813 sc->sr = kcalloc_node(sc->sr_size, in sc_alloc()
816 if (!sc->sr) { in sc_alloc()
827 sc->group, in sc_alloc()
828 sc->credits, in sc_alloc()
829 sc->credit_ctrl, in sc_alloc()
835 /* free a per-NUMA send context structure */
846 sc->flags |= SCF_IN_FREE; /* ensure no restarts */ in sc_free()
847 dd = sc->dd; in sc_free()
848 if (!list_empty(&sc->piowait)) in sc_free()
850 sw_index = sc->sw_index; in sc_free()
851 hw_context = sc->hw_context; in sc_free()
853 flush_work(&sc->halt_work); in sc_free()
855 spin_lock_irqsave(&dd->sc_lock, flags); in sc_free()
856 dd->send_contexts[sw_index].sc = NULL; in sc_free()
867 /* release the index and context for re-use */ in sc_free()
869 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_free()
871 kfree(sc->sr); in sc_free()
872 free_percpu(sc->buffers_allocated); in sc_free()
887 spin_lock_irq(&sc->alloc_lock); in sc_disable()
888 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
890 sc->flags &= ~SCF_ENABLED; in sc_disable()
892 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
897 * could be one in-process when the context is disabled). in sc_disable()
902 spin_lock(&sc->release_lock); in sc_disable()
903 if (sc->sr) { /* this context has a shadow ring */ in sc_disable()
904 while (sc->sr_tail != sc->sr_head) { in sc_disable()
905 pbuf = &sc->sr[sc->sr_tail].pbuf; in sc_disable()
906 if (pbuf->cb) in sc_disable()
907 (*pbuf->cb)(pbuf->arg, PRC_SC_DISABLE); in sc_disable()
908 sc->sr_tail++; in sc_disable()
909 if (sc->sr_tail >= sc->sr_size) in sc_disable()
910 sc->sr_tail = 0; in sc_disable()
913 spin_unlock(&sc->release_lock); in sc_disable()
915 write_seqlock(&sc->waitlock); in sc_disable()
916 if (!list_empty(&sc->piowait)) in sc_disable()
917 list_move(&sc->piowait, &wake_list); in sc_disable()
918 write_sequnlock(&sc->waitlock); in sc_disable()
926 priv = qp->priv; in sc_disable()
927 list_del_init(&priv->s_iowait.list); in sc_disable()
928 priv->s_iowait.lock = NULL; in sc_disable()
932 spin_unlock_irq(&sc->alloc_lock); in sc_disable()
957 * sc_wait_for_packet_egress - wait for packet
972 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress()
979 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
982 if (sc->flags & SCF_HALTED || in sc_wait_for_packet_egress()
983 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
988 /* counter is reset if occupancy count changes */ in sc_wait_for_packet_egress()
992 /* timed out - bounce the link */ in sc_wait_for_packet_egress()
995 __func__, sc->sw_index, in sc_wait_for_packet_egress()
996 sc->hw_context, (u32)reg); in sc_wait_for_packet_egress()
997 queue_work(dd->pport->link_wq, in sc_wait_for_packet_egress()
998 &dd->pport->link_bounce_work); in sc_wait_for_packet_egress()
1014 for (i = 0; i < dd->num_send_contexts; i++) { in sc_wait()
1015 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait()
1026 * If the first step fails - wait for the halt to be asserted, return early.
1034 struct hfi1_devdata *dd = sc->dd; in sc_restart()
1040 if (!(sc->flags & SCF_HALTED) || (sc->flags & SCF_IN_FREE)) in sc_restart()
1041 return -EINVAL; in sc_restart()
1043 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1044 sc->hw_context); in sc_restart()
1054 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1059 __func__, sc->sw_index, sc->hw_context); in sc_restart()
1060 return -ETIME; in sc_restart()
1076 if (sc->type != SC_USER) { in sc_restart()
1086 __func__, sc->sw_index, in sc_restart()
1087 sc->hw_context, count); in sc_restart()
1108 * This enable will clear the halted flag and per-send context in sc_restart()
1124 for (i = 0; i < dd->num_send_contexts; i++) { in pio_freeze()
1125 sc = dd->send_contexts[i].sc; in pio_freeze()
1129 * calls into the driver to reset its context. in pio_freeze()
1131 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_freeze()
1142 * been cleared. Now perform the last step and re-enable each kernel context.
1151 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_unfreeze()
1152 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1153 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) in pio_kernel_unfreeze()
1155 if (sc->flags & SCF_LINK_DOWN) in pio_kernel_unfreeze()
1163 * pio_kernel_linkup() - Re-enable send contexts after linkup event
1167 * event is different from a freeze because if the send context is re-enabled
1179 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_linkup()
1180 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1181 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) in pio_kernel_linkup()
1191 * -ETIMEDOUT - if we wait too long
1192 * -EIO - if there was an error
1200 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; in pio_init_wait_progress()
1206 return -ETIMEDOUT; in pio_init_wait_progress()
1211 return reg & SEND_PIO_INIT_CTXT_PIO_INIT_ERR_SMASK ? -EIO : 0; in pio_init_wait_progress()
1215 * Reset all of the send contexts to their power-on state. Used
1216 * only during manual init - no lock against sc_enable needed.
1225 if (ret == -EIO) { in pio_reset_all()
1231 /* reset init all */ in pio_reset_all()
1239 ret == -ETIMEDOUT ? "is stuck" : "had an error"); in pio_reset_all()
1252 return -EINVAL; in sc_enable()
1253 dd = sc->dd; in sc_enable()
1262 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_enable()
1263 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1267 /* IMPORTANT: only clear free and fill if transitioning 0 -> 1 */ in sc_enable()
1269 *sc->hw_free = 0; in sc_enable()
1270 sc->free = 0; in sc_enable()
1271 sc->alloc_free = 0; in sc_enable()
1272 sc->fill = 0; in sc_enable()
1273 sc->fill_wrap = 0; in sc_enable()
1274 sc->sr_head = 0; in sc_enable()
1275 sc->sr_tail = 0; in sc_enable()
1276 sc->flags = 0; in sc_enable()
1281 * Clear all per-context errors. Some of these will be set when in sc_enable()
1282 * we are re-enabling after a context halt. Now that the context in sc_enable()
1286 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1288 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1294 spin_lock(&dd->sc_init_lock); in sc_enable()
1302 pio = ((sc->hw_context & SEND_PIO_INIT_CTXT_PIO_CTXT_NUM_MASK) << in sc_enable()
1312 spin_unlock(&dd->sc_init_lock); in sc_enable()
1316 sc->sw_index, sc->hw_context, ret); in sc_enable()
1324 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1329 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1330 sc->flags |= SCF_ENABLED; in sc_enable()
1333 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_enable()
1344 /* a 0->1 transition schedules a credit return */ in sc_return_credits()
1345 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1349 * scheduled. We care more about the 0 -> 1 transition. in sc_return_credits()
1351 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1353 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1356 /* allow all in-flight packets to drain on the context */
1371 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", in sc_drop()
1372 __func__, sc->sw_index, sc->hw_context); in sc_drop()
1377 * - mark the context as halted or frozen
1378 * - stop buffer allocations
1388 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_stop()
1390 sc->flags |= flag; in sc_stop()
1391 sc->flags &= ~SCF_ENABLED; in sc_stop()
1392 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_stop()
1393 wake_up(&sc->halt_wait); in sc_stop()
1403 * @len: length of whole packet - including PBC - in dwords
1407 * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
1421 spin_lock_irqsave(&sc->alloc_lock, flags); in sc_buffer_alloc()
1422 if (!(sc->flags & SCF_ENABLED)) { in sc_buffer_alloc()
1423 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1424 return ERR_PTR(-ECOMM); in sc_buffer_alloc()
1428 avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1432 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1436 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1438 (unsigned long)sc->credits - in sc_buffer_alloc()
1439 (sc->fill - sc->alloc_free); in sc_buffer_alloc()
1443 sc->alloc_free = READ_ONCE(sc->free); in sc_buffer_alloc()
1452 this_cpu_inc(*sc->buffers_allocated); in sc_buffer_alloc()
1455 head = sc->sr_head; in sc_buffer_alloc()
1458 sc->fill += blocks; in sc_buffer_alloc()
1459 fill_wrap = sc->fill_wrap; in sc_buffer_alloc()
1460 sc->fill_wrap += blocks; in sc_buffer_alloc()
1461 if (sc->fill_wrap >= sc->credits) in sc_buffer_alloc()
1462 sc->fill_wrap = sc->fill_wrap - sc->credits; in sc_buffer_alloc()
1471 pbuf = &sc->sr[head].pbuf; in sc_buffer_alloc()
1472 pbuf->sent_at = sc->fill; in sc_buffer_alloc()
1473 pbuf->cb = cb; in sc_buffer_alloc()
1474 pbuf->arg = arg; in sc_buffer_alloc()
1475 pbuf->sc = sc; /* could be filled in at sc->sr init time */ in sc_buffer_alloc()
1480 if (next >= sc->sr_size) in sc_buffer_alloc()
1483 * update the head - must be last! - the releaser can look at fields in sc_buffer_alloc()
1487 sc->sr_head = next; in sc_buffer_alloc()
1488 spin_unlock_irqrestore(&sc->alloc_lock, flags); in sc_buffer_alloc()
1491 pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE; in sc_buffer_alloc()
1492 pbuf->end = sc->base_addr + sc->size; in sc_buffer_alloc()
1493 pbuf->qw_written = 0; in sc_buffer_alloc()
1494 pbuf->carry_bytes = 0; in sc_buffer_alloc()
1495 pbuf->carry.val64 = 0; in sc_buffer_alloc()
1516 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1517 if (sc->credit_intr_count == 0) { in sc_add_credit_return_intr()
1518 sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_add_credit_return_intr()
1519 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1520 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_add_credit_return_intr()
1522 sc->credit_intr_count++; in sc_add_credit_return_intr()
1523 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_add_credit_return_intr()
1534 WARN_ON(sc->credit_intr_count == 0); in sc_del_credit_return_intr()
1537 spin_lock_irqsave(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1538 sc->credit_intr_count--; in sc_del_credit_return_intr()
1539 if (sc->credit_intr_count == 0) { in sc_del_credit_return_intr()
1540 sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK); in sc_del_credit_return_intr()
1541 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1542 SC(CREDIT_CTRL), sc->credit_ctrl); in sc_del_credit_return_intr()
1544 spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags); in sc_del_credit_return_intr()
1557 trace_hfi1_wantpiointr(sc, needint, sc->credit_ctrl); in hfi1_sc_wantpiobuf_intr()
1563 * sc_piobufavail - callback when a PIO buffer is available
1572 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail()
1580 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1581 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1583 list = &sc->piowait; in sc_piobufavail()
1590 write_seqlock_irqsave(&sc->waitlock, flags); in sc_piobufavail()
1599 priv = qp->priv; in sc_piobufavail()
1600 list_del_init(&priv->s_iowait.list); in sc_piobufavail()
1601 priv->s_iowait.lock = NULL; in sc_piobufavail()
1603 priv = qps[top_idx]->priv; in sc_piobufavail()
1605 &priv->s_iowait, in sc_piobufavail()
1621 write_sequnlock_irqrestore(&sc->waitlock, flags); in sc_piobufavail()
1623 /* Wake up the top-priority one first */ in sc_piobufavail()
1671 spin_lock_irqsave(&sc->release_lock, flags); in sc_release_update()
1673 hw_free = le64_to_cpu(*sc->hw_free); /* volatile read */ in sc_release_update()
1674 old_free = sc->free; in sc_release_update()
1676 - (old_free & CR_COUNTER_MASK)) in sc_release_update()
1682 code = -1; /* code not yet set */ in sc_release_update()
1683 head = READ_ONCE(sc->sr_head); /* snapshot the head */ in sc_release_update()
1684 tail = sc->sr_tail; in sc_release_update()
1686 pbuf = &sc->sr[tail].pbuf; in sc_release_update()
1688 if (sent_before(free, pbuf->sent_at)) { in sc_release_update()
1692 if (pbuf->cb) { in sc_release_update()
1695 (*pbuf->cb)(pbuf->arg, code); in sc_release_update()
1699 if (tail >= sc->sr_size) in sc_release_update()
1702 sc->sr_tail = tail; in sc_release_update()
1705 sc->free = free; in sc_release_update()
1706 spin_unlock_irqrestore(&sc->release_lock, flags); in sc_release_update()
1725 spin_lock(&dd->sc_lock); in sc_group_release_update()
1726 sw_index = dd->hw_to_sw[hw_context]; in sc_group_release_update()
1727 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1732 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1736 gc = group_context(hw_context, sc->group); in sc_group_release_update()
1737 gc_end = gc + group_size(sc->group); in sc_group_release_update()
1739 sw_index = dd->hw_to_sw[gc]; in sc_group_release_update()
1740 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1746 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1749 spin_unlock(&dd->sc_lock); in sc_group_release_update()
1753 * pio_select_send_context_vl() - select send context
1769 * NOTE This should only happen if SC->VL changed after the initial in pio_select_send_context_vl()
1779 m = rcu_dereference(dd->pio_map); in pio_select_send_context_vl()
1782 return dd->vld[0].sc; in pio_select_send_context_vl()
1784 e = m->map[vl & m->mask]; in pio_select_send_context_vl()
1785 rval = e->ksc[selector & e->mask]; in pio_select_send_context_vl()
1789 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1794 * pio_select_send_context_sc() - select send context
1816 for (i = 0; m && i < m->actual_vls; i++) in pio_map_free()
1817 kfree(m->map[i]); in pio_map_free()
1838 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1840 sc_mtu_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1841 dd->vld[i].mtu, in set_threshold()
1842 dd->rcd[0]->rcvhdrqentsize)); in set_threshold()
1843 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); in set_threshold()
1847 * pio_map_init - called when #vls change
1855 * vl_scontexts is used to specify a non-uniform vl/send context
1865 * If either the num_vls or num_send_contexts are non-power of 2, the
1884 for (i = 0; i < dd->num_send_contexts; i++) in pio_map_init()
1885 if (dd->send_contexts[i].type == SC_KERNEL) in pio_map_init()
1893 for (i = num_vls - 1; i >= 0; i--, extra--) in pio_map_init()
1903 newmap->actual_vls = num_vls; in pio_map_init()
1904 newmap->vls = roundup_pow_of_two(num_vls); in pio_map_init()
1905 newmap->mask = (1 << ilog2(newmap->vls)) - 1; in pio_map_init()
1906 for (i = 0; i < newmap->vls; i++) { in pio_map_init()
1910 if (i < newmap->actual_vls) { in pio_map_init()
1914 newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + in pio_map_init()
1918 if (!newmap->map[i]) in pio_map_init()
1920 newmap->map[i]->mask = (1 << ilog2(sz)) - 1; in pio_map_init()
1926 if (dd->kernel_send_context[scontext]) { in pio_map_init()
1927 newmap->map[i]->ksc[j] = in pio_map_init()
1928 dd->kernel_send_context[scontext]; in pio_map_init()
1937 /* just re-use entry without allocating */ in pio_map_init()
1938 newmap->map[i] = newmap->map[i % num_vls]; in pio_map_init()
1943 spin_lock_irq(&dd->pio_map_lock); in pio_map_init()
1944 oldmap = rcu_dereference_protected(dd->pio_map, in pio_map_init()
1945 lockdep_is_held(&dd->pio_map_lock)); in pio_map_init()
1948 rcu_assign_pointer(dd->pio_map, newmap); in pio_map_init()
1950 spin_unlock_irq(&dd->pio_map_lock); in pio_map_init()
1953 call_rcu(&oldmap->list, pio_map_rcu_callback); in pio_map_init()
1958 return -ENOMEM; in pio_map_init()
1964 if (rcu_access_pointer(dd->pio_map)) { in free_pio_map()
1965 spin_lock_irq(&dd->pio_map_lock); in free_pio_map()
1966 pio_map_free(rcu_access_pointer(dd->pio_map)); in free_pio_map()
1967 RCU_INIT_POINTER(dd->pio_map, NULL); in free_pio_map()
1968 spin_unlock_irq(&dd->pio_map_lock); in free_pio_map()
1971 kfree(dd->kernel_send_context); in free_pio_map()
1972 dd->kernel_send_context = NULL; in free_pio_map()
1978 u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */ in init_pervl_scs()
1979 u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */ in init_pervl_scs()
1981 struct hfi1_pportdata *ppd = dd->pport; in init_pervl_scs()
1983 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
1984 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
1985 if (!dd->vld[15].sc) in init_pervl_scs()
1986 return -ENOMEM; in init_pervl_scs()
1988 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
1989 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); in init_pervl_scs()
1991 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, in init_pervl_scs()
1993 GFP_KERNEL, dd->node); in init_pervl_scs()
1994 if (!dd->kernel_send_context) in init_pervl_scs()
1997 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
2007 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
2008 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2009 if (!dd->vld[i].sc) in init_pervl_scs()
2011 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
2012 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2014 dd->vld[i].mtu = hfi1_max_mtu; in init_pervl_scs()
2017 dd->kernel_send_context[i + 1] = in init_pervl_scs()
2018 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2019 if (!dd->kernel_send_context[i + 1]) in init_pervl_scs()
2021 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2024 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2025 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2030 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2033 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2034 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2039 sc_enable(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2040 ctxt = dd->kernel_send_context[i + 1]->hw_context; in init_pervl_scs()
2045 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) in init_pervl_scs()
2051 sc_free(dd->vld[i].sc); in init_pervl_scs()
2052 dd->vld[i].sc = NULL; in init_pervl_scs()
2056 sc_free(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2058 kfree(dd->kernel_send_context); in init_pervl_scs()
2059 dd->kernel_send_context = NULL; in init_pervl_scs()
2062 sc_free(dd->vld[15].sc); in init_pervl_scs()
2063 return -ENOMEM; in init_pervl_scs()
2071 dd->cr_base = kcalloc( in init_credit_return()
2075 if (!dd->cr_base) { in init_credit_return()
2076 ret = -ENOMEM; in init_credit_return()
2082 set_dev_node(&dd->pcidev->dev, i); in init_credit_return()
2083 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, in init_credit_return()
2085 &dd->cr_base[i].dma, in init_credit_return()
2087 if (!dd->cr_base[i].va) { in init_credit_return()
2088 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2092 ret = -ENOMEM; in init_credit_return()
2096 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2107 if (!dd->cr_base) in free_credit_return()
2110 if (dd->cr_base[i].va) { in free_credit_return()
2111 dma_free_coherent(&dd->pcidev->dev, in free_credit_return()
2114 dd->cr_base[i].va, in free_credit_return()
2115 dd->cr_base[i].dma); in free_credit_return()
2118 kfree(dd->cr_base); in free_credit_return()
2119 dd->cr_base = NULL; in free_credit_return()
2123 struct send_context_info *sci) in seqfile_dump_sci() argument
2125 struct send_context *sc = sci->sc; in seqfile_dump_sci()
2128 seq_printf(s, "SCI %u: type %u base %u credits %u\n", in seqfile_dump_sci()
2129 i, sci->type, sci->base, sci->credits); in seqfile_dump_sci()
2131 sc->flags, sc->sw_index, sc->hw_context, sc->group); in seqfile_dump_sci()
2133 sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail); in seqfile_dump_sci()
2135 sc->fill, sc->free, sc->fill_wrap, sc->alloc_free); in seqfile_dump_sci()
2137 sc->credit_intr_count, sc->credit_ctrl); in seqfile_dump_sci()
2138 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()
2140 (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >> in seqfile_dump_sci()