| /Linux-v5.4/drivers/infiniband/hw/hfi1/ |
| D | pio.c | 496 u32 *hw_context) in sc_hw_alloc() argument 510 *hw_context = context; in sc_hw_alloc() 523 static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context) in sc_hw_free() argument 530 __func__, sw_index, hw_context); in sc_hw_free() 533 dd->hw_to_sw[hw_context] = INVALID_SCI; in sc_hw_free() 563 u32 gc = group_context(sc->hw_context, sc->group); in cr_group_addresses() 564 u32 index = sc->hw_context & 0x7; in cr_group_addresses() 645 write_kctxt_csr(sc->dd, sc->hw_context, in sc_set_cr_threshold() 666 u32 hw_context = sc->hw_context; in set_pio_integrity() local 669 write_kctxt_csr(dd, hw_context, in set_pio_integrity() [all …]
|
| D | trace_tx.h | 69 __field(u32, hw_context) 74 __entry->hw_context = sc->hw_context; 80 __entry->hw_context, 90 __field(u32, hw_context) 96 __entry->hw_context = sc->hw_context; 103 __entry->hw_context,
|
| D | pio.h | 111 u8 hw_context; /* hardware context number */ member 298 void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
|
| D | file_ops.c | 374 (uctxt->sc->hw_context * BIT(16))) + in hfi1_file_mmap() 979 uctxt->sc->hw_context); in allocate_ctxt() 1154 cinfo.send_ctxt = uctxt->sc->hw_context; in get_ctxt_info()
|
| D | chip.c | 5958 unsigned int hw_context) in is_sendctxt_err_int() argument 5968 sw_index = dd->hw_to_sw[hw_context]; in is_sendctxt_err_int() 5972 sw_index, hw_context); in is_sendctxt_err_int() 5980 sw_index, hw_context); in is_sendctxt_err_int() 5988 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS); in is_sendctxt_err_int() 5990 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context, in is_sendctxt_err_int() 13006 reg = read_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt() 13012 write_kctxt_csr(dd, sc->hw_context, in hfi1_init_ctxt() 14586 hw_ctxt = rcd->sc->hw_context; in hfi1_set_ctxt_jkey() 14620 hw_ctxt = rcd->sc->hw_context; in hfi1_clear_ctxt_jkey() [all …]
|
| /Linux-v5.4/drivers/gpu/drm/gma500/ |
| D | mmu.c | 128 void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context) in psb_mmu_set_pd_context() argument 132 uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 : in psb_mmu_set_pd_context() 133 PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4; in psb_mmu_set_pd_context() 139 pd->hw_context = hw_context; in psb_mmu_set_pd_context() 214 pd->hw_context = -1; in psb_mmu_alloc_pd() 246 if (pd->hw_context != -1) { in psb_mmu_free_pagedir() 247 PSB_WSGX32(0, PSB_CR_BIF_DIR_LIST_BASE0 + pd->hw_context * 4); in psb_mmu_free_pagedir() 297 if (pd->driver->has_clflush && pd->hw_context != -1) { in psb_mmu_alloc_pt() 347 if (pd->hw_context != -1) { in psb_mmu_pt_alloc_map_lock() 384 if (pd->hw_context != -1) { in psb_mmu_pt_unmap_unlock() [all …]
|
| D | mmu.h | 45 int hw_context; member 76 extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
|
| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
| D | ipsec.c | 343 sa_entry->hw_context = in mlx5e_xfrm_add_state() 348 if (IS_ERR(sa_entry->hw_context)) { in mlx5e_xfrm_add_state() 349 err = PTR_ERR(sa_entry->hw_context); in mlx5e_xfrm_add_state() 387 if (sa_entry->hw_context) { in mlx5e_xfrm_free_state() 389 mlx5_accel_esp_free_hw_context(sa_entry->hw_context); in mlx5e_xfrm_free_state()
|
| D | ipsec.h | 102 void *hw_context; member
|
| /Linux-v5.4/drivers/gpu/drm/i915/gt/ |
| D | intel_lrc.c | 507 owner = rq->hw_context->engine; in __unwind_incomplete_requests() 568 struct intel_context * const ce = rq->hw_context; in __execlists_schedule_in() 582 struct intel_context * const ce = rq->hw_context; in execlists_schedule_in() 613 struct intel_context * const ce = rq->hw_context; in __execlists_schedule_out() 637 struct intel_context * const ce = rq->hw_context; in execlists_schedule_out() 654 struct intel_context *ce = rq->hw_context; in execlists_update_context() 729 if (ce == rq->hw_context) in assert_pending_valid() 732 ce = rq->hw_context; in assert_pending_valid() 817 if (!can_merge_ctx(prev->hw_context, next->hw_context)) in can_merge_rq() 1111 last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; in execlists_dequeue() [all …]
|
| D | intel_breadcrumbs.c | 282 struct intel_context *ce = rq->hw_context; in i915_request_enable_breadcrumb() 338 struct intel_context *ce = rq->hw_context; in i915_request_cancel_breadcrumb()
|
| D | intel_context.c | 299 GEM_BUG_ON(rq->hw_context == ce); in intel_context_prepare_remote_request()
|
| D | intel_ringbuffer.c | 1653 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; in mi_set_context() 1745 struct i915_address_space *vm = vm_alias(rq->hw_context); in switch_context() 1782 if (rq->hw_context->state) { in switch_context() 1843 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); in ring_request_alloc()
|
| D | intel_engine_cs.c | 1461 if (!intel_context_inflight_count(rq->hw_context)) in intel_enable_engine_stats()
|
| /Linux-v5.4/drivers/crypto/ |
| D | talitos.c | 865 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; member 879 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; member 1807 req_ctx->hw_context, in common_nonsnoop_hash() 1852 req_ctx->hw_context, in common_nonsnoop_hash() 1879 req_ctx->hw_context, in common_nonsnoop_hash() 1890 req_ctx->hw_context, in common_nonsnoop_hash() 1945 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, in ahash_init() 1960 req_ctx->hw_context[0] = SHA224_H0; in ahash_init_sha224_swinit() 1961 req_ctx->hw_context[1] = SHA224_H1; in ahash_init_sha224_swinit() 1962 req_ctx->hw_context[2] = SHA224_H2; in ahash_init_sha224_swinit() [all …]
|
| /Linux-v5.4/drivers/gpu/drm/i915/gt/uc/ |
| D | intel_guc_submission.c | 467 u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); in guc_add_request() 568 if (last && rq->hw_context != last->hw_context) { in __guc_dequeue() 673 intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); in guc_reset()
|
| /Linux-v5.4/drivers/gpu/drm/i915/ |
| D | i915_scheduler.c | 219 if (inflight->hw_context == rq->hw_context) in kick_submission()
|
| D | i915_request.h | 114 struct intel_context *hw_context; member
|
| D | i915_request.c | 307 intel_context_exit(rq->hw_context); in i915_request_retire() 308 intel_context_unpin(rq->hw_context); in i915_request_retire() 679 rq->hw_context = ce; in __i915_request_create()
|
| D | i915_gem.c | 1334 GEM_BUG_ON(intel_context_is_pinned(rq->hw_context)); in __intel_engines_record_defaults() 1336 state = rq->hw_context->state; in __intel_engines_record_defaults() 1388 ce = rq->hw_context; in __intel_engines_record_defaults()
|
| D | i915_gpu_error.c | 1407 request->hw_context->state, in gem_record_rings()
|
| /Linux-v5.4/drivers/crypto/stm32/ |
| D | stm32-hash.c | 147 u32 *hw_context; member 967 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER, in stm32_hash_export() 971 preg = rctx->hw_context; in stm32_hash_export() 998 preg = rctx->hw_context; in stm32_hash_import() 1014 kfree(rctx->hw_context); in stm32_hash_import()
|
| /Linux-v5.4/drivers/gpu/drm/i915/gvt/ |
| D | scheduler.c | 61 workload->req->hw_context->state->obj; in update_shadow_pdps() 132 workload->req->hw_context->state->obj; in populate_shadow_context() 309 if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context)) in copy_workload_to_ring_buffer() 532 (struct execlist_ring_context *)rq->hw_context->lrc_reg_state; in update_wa_ctx_2_shadow_ctx() 799 struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj; in update_guest_context()
|
| /Linux-v5.4/drivers/net/ethernet/broadcom/bnx2x/ |
| D | bnx2x.h | 1193 struct hw_context { struct 1648 struct hw_context context[ILT_MAX_L2_LINES];
|
| /Linux-v5.4/drivers/gpu/drm/i915/gem/ |
| D | i915_gem_context.c | 986 struct i915_address_space *vm = rq->hw_context->vm; in emit_ppgtt_update()
|