Lines Matching refs:dd
63 void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl) in __cm_reset() argument
65 write_csr(dd, SEND_CTRL, sendctrl | SEND_CTRL_CM_RESET_SMASK); in __cm_reset()
68 sendctrl = read_csr(dd, SEND_CTRL); in __cm_reset()
75 void pio_send_control(struct hfi1_devdata *dd, int op) in pio_send_control() argument
83 spin_lock_irqsave(&dd->sendctrl_lock, flags); in pio_send_control()
85 reg = read_csr(dd, SEND_CTRL); in pio_send_control()
92 for (i = 0; i < ARRAY_SIZE(dd->vld); i++) in pio_send_control()
93 if (!dd->vld[i].mtu) in pio_send_control()
110 __cm_reset(dd, reg); in pio_send_control()
118 dd_dev_err(dd, "%s: invalid control %d\n", __func__, op); in pio_send_control()
123 write_csr(dd, SEND_CTRL, reg); in pio_send_control()
125 (void)read_csr(dd, SEND_CTRL); /* flush write */ in pio_send_control()
128 spin_unlock_irqrestore(&dd->sendctrl_lock, flags); in pio_send_control()
223 int init_sc_pools_and_sizes(struct hfi1_devdata *dd) in init_sc_pools_and_sizes() argument
226 int total_blocks = (chip_pio_mem_size(dd) / PIO_BLOCK_SIZE) - 1; in init_sc_pools_and_sizes()
275 dd, in init_sc_pools_and_sizes()
288 dd, in init_sc_pools_and_sizes()
296 dd, in init_sc_pools_and_sizes()
305 dd, in init_sc_pools_and_sizes()
331 count = dd->n_krcv_queues; in init_sc_pools_and_sizes()
335 count = dd->num_rcv_contexts - dd->n_krcv_queues; in init_sc_pools_and_sizes()
338 dd, in init_sc_pools_and_sizes()
343 if (total_contexts + count > chip_send_contexts(dd)) in init_sc_pools_and_sizes()
344 count = chip_send_contexts(dd) - total_contexts; in init_sc_pools_and_sizes()
361 dd, in init_sc_pools_and_sizes()
367 dd->sc_sizes[i].count = count; in init_sc_pools_and_sizes()
368 dd->sc_sizes[i].size = size; in init_sc_pools_and_sizes()
372 dd, in init_sc_pools_and_sizes()
382 dd, in init_sc_pools_and_sizes()
399 dd, in init_sc_pools_and_sizes()
408 dd, in init_sc_pools_and_sizes()
420 if (dd->sc_sizes[i].size < 0) { in init_sc_pools_and_sizes()
421 unsigned pool = wildcard_to_pool(dd->sc_sizes[i].size); in init_sc_pools_and_sizes()
424 dd->sc_sizes[i].size = mem_pool_info[pool].size; in init_sc_pools_and_sizes()
428 if (dd->sc_sizes[i].size > PIO_MAX_BLOCKS) in init_sc_pools_and_sizes()
429 dd->sc_sizes[i].size = PIO_MAX_BLOCKS; in init_sc_pools_and_sizes()
432 used_blocks += dd->sc_sizes[i].size * dd->sc_sizes[i].count; in init_sc_pools_and_sizes()
436 dd_dev_info(dd, "unused send context blocks: %d\n", extra); in init_sc_pools_and_sizes()
441 int init_send_contexts(struct hfi1_devdata *dd) in init_send_contexts() argument
446 ret = init_credit_return(dd); in init_send_contexts()
450 dd->hw_to_sw = kmalloc_array(TXE_NUM_CONTEXTS, sizeof(u8), in init_send_contexts()
452 dd->send_contexts = kcalloc(dd->num_send_contexts, in init_send_contexts()
455 if (!dd->send_contexts || !dd->hw_to_sw) { in init_send_contexts()
456 kfree(dd->hw_to_sw); in init_send_contexts()
457 kfree(dd->send_contexts); in init_send_contexts()
458 free_credit_return(dd); in init_send_contexts()
464 dd->hw_to_sw[i] = INVALID_SCI; in init_send_contexts()
473 struct sc_config_sizes *scs = &dd->sc_sizes[i]; in init_send_contexts()
477 &dd->send_contexts[context]; in init_send_contexts()
495 static int sc_hw_alloc(struct hfi1_devdata *dd, int type, u32 *sw_index, in sc_hw_alloc() argument
502 for (index = 0, sci = &dd->send_contexts[0]; in sc_hw_alloc()
503 index < dd->num_send_contexts; index++, sci++) { in sc_hw_alloc()
507 context = chip_send_contexts(dd) - index - 1; in sc_hw_alloc()
508 dd->hw_to_sw[context] = index; in sc_hw_alloc()
514 dd_dev_err(dd, "Unable to locate a free type %d send context\n", type); in sc_hw_alloc()
523 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) in sc_hw_free() argument
527 sci = &dd->send_contexts[sw_index]; in sc_hw_free()
529 dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n", in sc_hw_free()
533 dd->hw_to_sw[hw_context] = INVALID_SCI; in sc_hw_free()
566 sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index]; in cr_group_addresses()
568 &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc]; in cr_group_addresses()
645 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold()
665 struct hfi1_devdata *dd = sc->dd; in set_pio_integrity() local
669 write_kctxt_csr(dd, hw_context, in set_pio_integrity()
671 hfi1_pkt_default_send_ctxt_mask(dd, type)); in set_pio_integrity()
696 struct send_context *sc_alloc(struct hfi1_devdata *dd, int type, in sc_alloc() argument
711 if (dd->flags & HFI1_FROZEN) in sc_alloc()
721 dd_dev_err(dd, in sc_alloc()
727 spin_lock_irqsave(&dd->sc_lock, flags); in sc_alloc()
728 ret = sc_hw_alloc(dd, type, &sw_index, &hw_context); in sc_alloc()
730 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
736 sci = &dd->send_contexts[sw_index]; in sc_alloc()
739 sc->dd = dd; in sc_alloc()
762 sc->base_addr = dd->piobase + ((hw_context & PIO_ADDR_CONTEXT_MASK) in sc_alloc()
770 write_kctxt_csr(dd, hw_context, SC(CTRL), reg); in sc_alloc()
775 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), (u64)-1); in sc_alloc()
778 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), in sc_alloc()
793 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), in sc_alloc()
799 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg); in sc_alloc()
831 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), reg); in sc_alloc()
836 write_kctxt_csr(dd, hw_context, SC(CHECK_VL), reg); in sc_alloc()
839 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_alloc()
880 struct hfi1_devdata *dd; in sc_free() local
889 dd = sc->dd; in sc_free()
891 dd_dev_err(dd, "piowait list not empty!\n"); in sc_free()
897 spin_lock_irqsave(&dd->sc_lock, flags); in sc_free()
898 dd->send_contexts[sw_index].sc = NULL; in sc_free()
901 write_kctxt_csr(dd, hw_context, SC(CTRL), 0); in sc_free()
902 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), 0); in sc_free()
903 write_kctxt_csr(dd, hw_context, SC(ERR_MASK), 0); in sc_free()
904 write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY), 0); in sc_free()
905 write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE), 0); in sc_free()
906 write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), 0); in sc_free()
907 write_kctxt_csr(dd, hw_context, SC(CREDIT_CTRL), 0); in sc_free()
910 sc_hw_free(dd, sw_index, hw_context); in sc_free()
911 spin_unlock_irqrestore(&dd->sc_lock, flags); in sc_free()
929 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); in sc_disable()
933 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); in sc_disable()
989 static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context) in is_sc_halted() argument
991 return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) & in is_sc_halted()
1011 struct hfi1_devdata *dd = sc->dd; in sc_wait_for_packet_egress() local
1018 reg = read_csr(dd, sc->hw_context * 8 + in sc_wait_for_packet_egress()
1022 is_sc_halted(dd, sc->hw_context) || egress_halted(reg)) in sc_wait_for_packet_egress()
1032 dd_dev_err(dd, in sc_wait_for_packet_egress()
1036 queue_work(dd->pport->link_wq, in sc_wait_for_packet_egress()
1037 &dd->pport->link_bounce_work); in sc_wait_for_packet_egress()
1046 pause_for_credit_return(dd); in sc_wait_for_packet_egress()
1049 void sc_wait(struct hfi1_devdata *dd) in sc_wait() argument
1053 for (i = 0; i < dd->num_send_contexts; i++) { in sc_wait()
1054 struct send_context *sc = dd->send_contexts[i].sc; in sc_wait()
1073 struct hfi1_devdata *dd = sc->dd; in sc_restart() local
1082 dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index, in sc_restart()
1093 reg = read_kctxt_csr(dd, sc->hw_context, SC(STATUS)); in sc_restart()
1097 dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n", in sc_restart()
1123 dd_dev_err(dd, in sc_restart()
1158 void pio_freeze(struct hfi1_devdata *dd) in pio_freeze() argument
1163 for (i = 0; i < dd->num_send_contexts; i++) { in pio_freeze()
1164 sc = dd->send_contexts[i].sc; in pio_freeze()
1185 void pio_kernel_unfreeze(struct hfi1_devdata *dd) in pio_kernel_unfreeze() argument
1190 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_unfreeze()
1191 sc = dd->send_contexts[i].sc; in pio_kernel_unfreeze()
1213 void pio_kernel_linkup(struct hfi1_devdata *dd) in pio_kernel_linkup() argument
1218 for (i = 0; i < dd->num_send_contexts; i++) { in pio_kernel_linkup()
1219 sc = dd->send_contexts[i].sc; in pio_kernel_linkup()
1233 static int pio_init_wait_progress(struct hfi1_devdata *dd) in pio_init_wait_progress() argument
1239 max = (dd->icode == ICODE_FPGA_EMULATION) ? 120 : 5; in pio_init_wait_progress()
1241 reg = read_csr(dd, SEND_PIO_INIT_CTXT); in pio_init_wait_progress()
1257 void pio_reset_all(struct hfi1_devdata *dd) in pio_reset_all() argument
1262 ret = pio_init_wait_progress(dd); in pio_reset_all()
1266 write_csr(dd, SEND_PIO_ERR_CLEAR, in pio_reset_all()
1271 write_csr(dd, SEND_PIO_INIT_CTXT, in pio_reset_all()
1274 ret = pio_init_wait_progress(dd); in pio_reset_all()
1276 dd_dev_err(dd, in pio_reset_all()
1286 struct hfi1_devdata *dd; in sc_enable() local
1292 dd = sc->dd; in sc_enable()
1302 sc_ctrl = read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1325 reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS)); in sc_enable()
1327 write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg); in sc_enable()
1333 spin_lock(&dd->sc_init_lock); in sc_enable()
1344 write_csr(dd, SEND_PIO_INIT_CTXT, pio); in sc_enable()
1350 ret = pio_init_wait_progress(dd); in sc_enable()
1351 spin_unlock(&dd->sc_init_lock); in sc_enable()
1353 dd_dev_err(dd, in sc_enable()
1363 write_kctxt_csr(dd, sc->hw_context, SC(CTRL), sc_ctrl); in sc_enable()
1368 read_kctxt_csr(dd, sc->hw_context, SC(CTRL)); in sc_enable()
1384 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), in sc_return_credits()
1390 read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE)); in sc_return_credits()
1392 write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE), 0); in sc_return_credits()
1410 dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n", in sc_drop()
1558 write_kctxt_csr(sc->dd, sc->hw_context, in sc_add_credit_return_intr()
1580 write_kctxt_csr(sc->dd, sc->hw_context, in sc_del_credit_return_intr()
1611 struct hfi1_devdata *dd = sc->dd; in sc_piobufavail() local
1619 if (dd->send_contexts[sc->sw_index].type != SC_KERNEL && in sc_piobufavail()
1620 dd->send_contexts[sc->sw_index].type != SC_VL15) in sc_piobufavail()
1758 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context) in sc_group_release_update() argument
1764 spin_lock(&dd->sc_lock); in sc_group_release_update()
1765 sw_index = dd->hw_to_sw[hw_context]; in sc_group_release_update()
1766 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1767 dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n", in sc_group_release_update()
1771 sc = dd->send_contexts[sw_index].sc; in sc_group_release_update()
1778 sw_index = dd->hw_to_sw[gc]; in sc_group_release_update()
1779 if (unlikely(sw_index >= dd->num_send_contexts)) { in sc_group_release_update()
1780 dd_dev_err(dd, in sc_group_release_update()
1785 sc_release_update(dd->send_contexts[sw_index].sc); in sc_group_release_update()
1788 spin_unlock(&dd->sc_lock); in sc_group_release_update()
1800 struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd, in pio_select_send_context_vl() argument
1818 m = rcu_dereference(dd->pio_map); in pio_select_send_context_vl()
1821 return dd->vld[0].sc; in pio_select_send_context_vl()
1828 rval = !rval ? dd->vld[0].sc : rval; in pio_select_send_context_vl()
1840 struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd, in pio_select_send_context_sc() argument
1843 u8 vl = sc_to_vlt(dd, sc5); in pio_select_send_context_sc()
1845 return pio_select_send_context_vl(dd, selector, vl); in pio_select_send_context_sc()
1873 static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) in set_threshold() argument
1877 thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1879 sc_mtu_to_threshold(dd->kernel_send_context[scontext], in set_threshold()
1880 dd->vld[i].mtu, in set_threshold()
1881 dd->rcd[0]->rcvhdrqentsize)); in set_threshold()
1882 sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); in set_threshold()
1913 int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) in pio_map_init() argument
1923 for (i = 0; i < dd->num_send_contexts; i++) in pio_map_init()
1924 if (dd->send_contexts[i].type == SC_KERNEL) in pio_map_init()
1965 if (dd->kernel_send_context[scontext]) { in pio_map_init()
1967 dd->kernel_send_context[scontext]; in pio_map_init()
1968 set_threshold(dd, scontext, i); in pio_map_init()
1982 spin_lock_irq(&dd->pio_map_lock); in pio_map_init()
1983 oldmap = rcu_dereference_protected(dd->pio_map, in pio_map_init()
1984 lockdep_is_held(&dd->pio_map_lock)); in pio_map_init()
1987 rcu_assign_pointer(dd->pio_map, newmap); in pio_map_init()
1989 spin_unlock_irq(&dd->pio_map_lock); in pio_map_init()
2000 void free_pio_map(struct hfi1_devdata *dd) in free_pio_map() argument
2003 if (rcu_access_pointer(dd->pio_map)) { in free_pio_map()
2004 spin_lock_irq(&dd->pio_map_lock); in free_pio_map()
2005 pio_map_free(rcu_access_pointer(dd->pio_map)); in free_pio_map()
2006 RCU_INIT_POINTER(dd->pio_map, NULL); in free_pio_map()
2007 spin_unlock_irq(&dd->pio_map_lock); in free_pio_map()
2010 kfree(dd->kernel_send_context); in free_pio_map()
2011 dd->kernel_send_context = NULL; in free_pio_map()
2014 int init_pervl_scs(struct hfi1_devdata *dd) in init_pervl_scs() argument
2020 struct hfi1_pportdata *ppd = dd->pport; in init_pervl_scs()
2022 dd->vld[15].sc = sc_alloc(dd, SC_VL15, in init_pervl_scs()
2023 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2024 if (!dd->vld[15].sc) in init_pervl_scs()
2027 hfi1_init_ctxt(dd->vld[15].sc); in init_pervl_scs()
2028 dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048); in init_pervl_scs()
2030 dd->kernel_send_context = kcalloc_node(dd->num_send_contexts, in init_pervl_scs()
2032 GFP_KERNEL, dd->node); in init_pervl_scs()
2033 if (!dd->kernel_send_context) in init_pervl_scs()
2036 dd->kernel_send_context[0] = dd->vld[15].sc; in init_pervl_scs()
2046 dd->vld[i].sc = sc_alloc(dd, SC_KERNEL, in init_pervl_scs()
2047 dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2048 if (!dd->vld[i].sc) in init_pervl_scs()
2050 dd->kernel_send_context[i + 1] = dd->vld[i].sc; in init_pervl_scs()
2051 hfi1_init_ctxt(dd->vld[i].sc); in init_pervl_scs()
2053 dd->vld[i].mtu = hfi1_max_mtu; in init_pervl_scs()
2056 dd->kernel_send_context[i + 1] = in init_pervl_scs()
2057 sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node); in init_pervl_scs()
2058 if (!dd->kernel_send_context[i + 1]) in init_pervl_scs()
2060 hfi1_init_ctxt(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2063 sc_enable(dd->vld[15].sc); in init_pervl_scs()
2064 ctxt = dd->vld[15].sc->hw_context; in init_pervl_scs()
2066 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2067 dd_dev_info(dd, in init_pervl_scs()
2069 dd->vld[15].sc->sw_index, ctxt); in init_pervl_scs()
2072 sc_enable(dd->vld[i].sc); in init_pervl_scs()
2073 ctxt = dd->vld[i].sc->hw_context; in init_pervl_scs()
2075 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2078 sc_enable(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2079 ctxt = dd->kernel_send_context[i + 1]->hw_context; in init_pervl_scs()
2081 write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask); in init_pervl_scs()
2084 if (pio_map_init(dd, ppd->port - 1, num_vls, NULL)) in init_pervl_scs()
2090 sc_free(dd->vld[i].sc); in init_pervl_scs()
2091 dd->vld[i].sc = NULL; in init_pervl_scs()
2095 sc_free(dd->kernel_send_context[i + 1]); in init_pervl_scs()
2097 kfree(dd->kernel_send_context); in init_pervl_scs()
2098 dd->kernel_send_context = NULL; in init_pervl_scs()
2101 sc_free(dd->vld[15].sc); in init_pervl_scs()
2105 int init_credit_return(struct hfi1_devdata *dd) in init_credit_return() argument
2110 dd->cr_base = kcalloc( in init_credit_return()
2114 if (!dd->cr_base) { in init_credit_return()
2121 set_dev_node(&dd->pcidev->dev, i); in init_credit_return()
2122 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev, in init_credit_return()
2124 &dd->cr_base[i].dma, in init_credit_return()
2126 if (!dd->cr_base[i].va) { in init_credit_return()
2127 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2128 dd_dev_err(dd, in init_credit_return()
2135 set_dev_node(&dd->pcidev->dev, dd->node); in init_credit_return()
2142 void free_credit_return(struct hfi1_devdata *dd) in free_credit_return() argument
2146 if (!dd->cr_base) in free_credit_return()
2149 if (dd->cr_base[i].va) { in free_credit_return()
2150 dma_free_coherent(&dd->pcidev->dev, in free_credit_return()
2153 dd->cr_base[i].va, in free_credit_return()
2154 dd->cr_base[i].dma); in free_credit_return()
2157 kfree(dd->cr_base); in free_credit_return()
2158 dd->cr_base = NULL; in free_credit_return()
2177 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS)); in seqfile_dump_sci()