/Linux-v5.4/crypto/ |
D | crypto_engine.c | 3 * Handle async block request by crypto hardware engine. 12 #include <crypto/engine.h> 20 * @engine: the hardware engine 24 static void crypto_finalize_request(struct crypto_engine *engine, in crypto_finalize_request() argument 32 spin_lock_irqsave(&engine->queue_lock, flags); in crypto_finalize_request() 33 if (engine->cur_req == req) in crypto_finalize_request() 35 spin_unlock_irqrestore(&engine->queue_lock, flags); in crypto_finalize_request() 39 if (engine->cur_req_prepared && in crypto_finalize_request() 41 ret = enginectx->op.unprepare_request(engine, req); in crypto_finalize_request() 43 dev_err(engine->dev, "failed to unprepare request\n"); in crypto_finalize_request() [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/disp/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/disp/base.o 3 nvkm-y += nvkm/engine/disp/nv04.o 4 nvkm-y += nvkm/engine/disp/nv50.o 5 nvkm-y += nvkm/engine/disp/g84.o 6 nvkm-y += nvkm/engine/disp/g94.o 7 nvkm-y += nvkm/engine/disp/gt200.o 8 nvkm-y += nvkm/engine/disp/mcp77.o 9 nvkm-y += nvkm/engine/disp/gt215.o 10 nvkm-y += nvkm/engine/disp/mcp89.o 11 nvkm-y += nvkm/engine/disp/gf119.o [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/gr/base.o 3 nvkm-y += nvkm/engine/gr/nv04.o 4 nvkm-y += nvkm/engine/gr/nv10.o 5 nvkm-y += nvkm/engine/gr/nv15.o 6 nvkm-y += nvkm/engine/gr/nv17.o 7 nvkm-y += nvkm/engine/gr/nv20.o 8 nvkm-y += nvkm/engine/gr/nv25.o 9 nvkm-y += nvkm/engine/gr/nv2a.o 10 nvkm-y += nvkm/engine/gr/nv30.o 11 nvkm-y += nvkm/engine/gr/nv34.o [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/core/ |
D | engine.c | 24 #include <core/engine.h> 31 nvkm_engine_chsw_load(struct nvkm_engine *engine) in nvkm_engine_chsw_load() argument 33 if (engine->func->chsw_load) in nvkm_engine_chsw_load() 34 return engine->func->chsw_load(engine); in nvkm_engine_chsw_load() 41 struct nvkm_engine *engine = *pengine; in nvkm_engine_unref() local 42 if (engine) { in nvkm_engine_unref() 43 mutex_lock(&engine->subdev.mutex); in nvkm_engine_unref() 44 if (--engine->usecount == 0) in nvkm_engine_unref() 45 nvkm_subdev_fini(&engine->subdev, false); in nvkm_engine_unref() 46 mutex_unlock(&engine->subdev.mutex); in nvkm_engine_unref() [all …]
|
/Linux-v5.4/drivers/gpu/drm/i915/gt/ |
D | intel_engine_cs.c | 142 * intel_engine_context_size() - return the size of the context for an engine 144 * @class: engine class 146 * Each engine class may require a different amount of space for a context 149 * Return: size (in bytes) of an engine class specific context image 239 static void __sprint_engine_name(struct intel_engine_cs *engine) in __sprint_engine_name() argument 242 * Before we know what the uABI name for this engine will be, in __sprint_engine_name() 243 * we still would like to keep track of this engine in the debug logs. in __sprint_engine_name() 246 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", in __sprint_engine_name() 247 intel_engine_class_repr(engine->class), in __sprint_engine_name() 248 engine->instance) >= sizeof(engine->name)); in __sprint_engine_name() [all …]
|
D | mock_engine.c | 46 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) in mock_ring() argument 66 static struct i915_request *first_request(struct mock_engine *engine) in first_request() argument 68 return list_first_entry_or_null(&engine->hw_queue, in first_request() 79 intel_engine_queue_breadcrumbs(request->engine); in advance() 84 struct mock_engine *engine = from_timer(engine, t, hw_delay); in hw_delay_complete() local 88 spin_lock_irqsave(&engine->hw_lock, flags); in hw_delay_complete() 91 request = first_request(engine); in hw_delay_complete() 99 while ((request = first_request(engine))) { in hw_delay_complete() 101 mod_timer(&engine->hw_delay, in hw_delay_complete() 109 spin_unlock_irqrestore(&engine->hw_lock, flags); in hw_delay_complete() [all …]
|
D | intel_engine_user.c | 38 void intel_engine_add_user(struct intel_engine_cs *engine) in intel_engine_add_user() argument 40 llist_add((struct llist_node *)&engine->uabi_node, in intel_engine_add_user() 41 (struct llist_head *)&engine->i915->uabi_engines); in intel_engine_add_user() 82 struct intel_engine_cs *engine = in sort_engines() local 83 container_of((struct rb_node *)pos, typeof(*engine), in sort_engines() 85 list_add((struct list_head *)&engine->uabi_node, engines); in sort_engines() 93 u8 engine; in set_scheduler_caps() member 102 struct intel_engine_cs *engine; in set_scheduler_caps() local 107 for_each_uabi_engine(engine, i915) { /* all engines must agree! */ in set_scheduler_caps() 110 if (engine->schedule) in set_scheduler_caps() [all …]
|
D | intel_engine_pm.c | 17 struct intel_engine_cs *engine = in __engine_unpark() local 18 container_of(wf, typeof(*engine), wakeref); in __engine_unpark() 21 GEM_TRACE("%s\n", engine->name); in __engine_unpark() 23 intel_gt_pm_get(engine->gt); in __engine_unpark() 27 if (engine->default_state) in __engine_unpark() 28 map = i915_gem_object_pin_map(engine->default_state, in __engine_unpark() 31 engine->pinned_default_state = map; in __engine_unpark() 33 if (engine->unpark) in __engine_unpark() 34 engine->unpark(engine); in __engine_unpark() 36 intel_engine_init_hangcheck(engine); in __engine_unpark() [all …]
|
D | intel_ringbuffer.c | 80 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen2_render_ring_flush() 154 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_render_ring_flush() 164 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen4_render_ring_flush() 219 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush() 254 intel_gt_scratch_offset(rq->engine->gt, in gen6_render_ring_flush() 313 *cs++ = intel_gt_scratch_offset(rq->engine->gt, in gen6_rcs_emit_breadcrumb() 359 intel_gt_scratch_offset(rq->engine->gt, in gen7_render_ring_flush() 442 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); in gen6_xcs_emit_breadcrumb() 462 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); in gen7_xcs_emit_breadcrumb() 489 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) in set_hwstam() argument [all …]
|
D | intel_hangcheck.c | 53 static bool subunits_stuck(struct intel_engine_cs *engine) in subunits_stuck() argument 55 struct drm_i915_private *dev_priv = engine->i915; in subunits_stuck() 57 struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; in subunits_stuck() 62 intel_engine_get_instdone(engine, &instdone); in subunits_stuck() 85 head_stuck(struct intel_engine_cs *engine, u64 acthd) in head_stuck() argument 87 if (acthd != engine->hangcheck.acthd) { in head_stuck() 90 memset(&engine->hangcheck.instdone, 0, in head_stuck() 91 sizeof(engine->hangcheck.instdone)); in head_stuck() 96 if (!subunits_stuck(engine)) in head_stuck() 103 engine_stuck(struct intel_engine_cs *engine, u64 acthd) in engine_stuck() argument [all …]
|
D | intel_reset.c | 42 struct intel_engine_cs *engine = rq->engine; in engine_skip_context() local 48 lockdep_assert_held(&engine->active.lock); in engine_skip_context() 49 list_for_each_entry_continue(rq, &engine->active.requests, sched.link) in engine_skip_context() 120 rq->engine->name, in __i915_request_reset() 285 struct intel_engine_cs *engine; in gen6_reset_engines() local 301 for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { in gen6_reset_engines() 302 GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); in gen6_reset_engines() 303 hw_mask |= hw_engine_mask[engine->id]; in gen6_reset_engines() 310 static u32 gen11_lock_sfc(struct intel_engine_cs *engine) in gen11_lock_sfc() argument 312 struct intel_uncore *uncore = engine->uncore; in gen11_lock_sfc() [all …]
|
D | intel_engine.h | 36 * ENGINE_READ(engine, REG_FOO); 41 * ENGINE_READ_IDX(engine, REG_BAR, i) 157 intel_read_status_page(const struct intel_engine_cs *engine, int reg) in intel_read_status_page() argument 160 return READ_ONCE(engine->status_page.addr[reg]); in intel_read_status_page() 164 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) in intel_write_status_page() argument 173 clflush(&engine->status_page.addr[reg]); in intel_write_status_page() 174 engine->status_page.addr[reg] = value; in intel_write_status_page() 175 clflush(&engine->status_page.addr[reg]); in intel_write_status_page() 178 WRITE_ONCE(engine->status_page.addr[reg], value); in intel_write_status_page() 210 intel_engine_create_ring(struct intel_engine_cs *engine, int size); [all …]
|
D | intel_lrc.c | 49 * shouldn't we just need a set of those per engine command streamer? This is 51 * rings, the engine cs shifts to a new "ring buffer" with every context 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 75 * more complex, because we don't know at creation time which engine is going 80 * gets populated for a given engine once we receive an execbuffer. If later 82 * engine, we allocate/populate a new ringbuffer and context backing object and 99 * for the appropriate engine: this structure contains a copy of the context's [all …]
|
D | selftest_workarounds.c | 33 } engine[I915_NUM_ENGINES]; member 39 struct intel_engine_cs *engine; in reference_lists_init() local 48 for_each_engine(engine, i915, id) { in reference_lists_init() 49 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init() 51 wa_init_start(wal, "REF", engine->name); in reference_lists_init() 52 engine_init_workarounds(engine, wal); in reference_lists_init() 55 __intel_engine_init_ctx_wa(engine, in reference_lists_init() 56 &lists->engine[id].ctx_wa_list, in reference_lists_init() 64 struct intel_engine_cs *engine; in reference_lists_fini() local 67 for_each_engine(engine, i915, id) in reference_lists_fini() [all …]
|
D | selftest_hangcheck.c | 131 hang_create_request(struct hang *h, struct intel_engine_cs *engine) in hang_create_request() argument 134 struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; in hang_create_request() 175 rq = igt_request_alloc(h->ctx, engine); in hang_create_request() 244 intel_gt_chipset_flush(engine->gt); in hang_create_request() 246 if (rq->engine->emit_init_breadcrumb) { in hang_create_request() 247 err = rq->engine->emit_init_breadcrumb(rq); in hang_create_request() 256 err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); in hang_create_request() 305 struct intel_engine_cs *engine; in igt_hang_sanitycheck() local 317 for_each_engine(engine, gt->i915, id) { in igt_hang_sanitycheck() 321 if (!intel_engine_can_store_dword(engine)) in igt_hang_sanitycheck() [all …]
|
D | selftest_context.c | 68 static int __live_context_size(struct intel_engine_cs *engine, in __live_context_size() argument 76 ce = intel_context_create(fixme, engine); in __live_context_size() 85 i915_coherent_map_type(engine->i915)); in __live_context_size() 104 if (HAS_EXECLISTS(engine->i915)) in __live_context_size() 107 vaddr += engine->context_size - I915_GTT_PAGE_SIZE; in __live_context_size() 122 rq = i915_request_create(engine->kernel_context); in __live_context_size() 132 pr_err("%s context overwrote trailing red-zone!", engine->name); in __live_context_size() 146 struct intel_engine_cs *engine; in live_context_size() local 164 for_each_engine(engine, gt->i915, id) { in live_context_size() 170 if (!engine->context_size) in live_context_size() [all …]
|
/Linux-v5.4/drivers/gpu/drm/sun4i/ |
D | sunxi_engine.h | 25 * This callback allows to prepare our engine for an atomic 32 void (*atomic_begin)(struct sunxi_engine *engine, 49 int (*atomic_check)(struct sunxi_engine *engine, 61 void (*commit)(struct sunxi_engine *engine); 67 * the layers supported by that engine. 77 struct sunxi_engine *engine); 83 * engine. This is useful only for the composite output. 87 void (*apply_color_correction)(struct sunxi_engine *engine); 93 * engine. This is useful only for the composite output. 97 void (*disable_color_correction)(struct sunxi_engine *engine); [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/fifo/base.o 3 nvkm-y += nvkm/engine/fifo/nv04.o 4 nvkm-y += nvkm/engine/fifo/nv10.o 5 nvkm-y += nvkm/engine/fifo/nv17.o 6 nvkm-y += nvkm/engine/fifo/nv40.o 7 nvkm-y += nvkm/engine/fifo/nv50.o 8 nvkm-y += nvkm/engine/fifo/g84.o 9 nvkm-y += nvkm/engine/fifo/gf100.o 10 nvkm-y += nvkm/engine/fifo/gk104.o 11 nvkm-y += nvkm/engine/fifo/gk110.o [all …]
|
/Linux-v5.4/drivers/crypto/ |
D | picoxcell_crypto.c | 81 struct spacc_engine *engine; member 95 struct spacc_engine *engine; member 138 struct spacc_engine *engine; member 146 struct spacc_engine *engine; member 186 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) in spacc_fifo_cmd_full() argument 188 u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); in spacc_fifo_cmd_full() 204 return is_cipher_ctx ? ctx->engine->cipher_ctx_base + in spacc_ctx_page_addr() 205 (indx * ctx->engine->cipher_pg_sz) : in spacc_ctx_page_addr() 206 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); in spacc_ctx_page_addr() 240 unsigned indx = ctx->engine->next_ctx++; in spacc_load_ctx() [all …]
|
/Linux-v5.4/drivers/video/fbdev/via/ |
D | accel.c | 13 static int viafb_set_bpp(void __iomem *engine, u8 bpp) in viafb_set_bpp() argument 19 gemode = readl(engine + VIA_REG_GEMODE) & 0xfffffcfc; in viafb_set_bpp() 34 writel(gemode, engine + VIA_REG_GEMODE); in viafb_set_bpp() 39 static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height, in hw_bitblt_1() argument 79 ret = viafb_set_bpp(engine, dst_bpp); in hw_bitblt_1() 91 writel(tmp, engine + 0x08); in hw_bitblt_1() 100 writel(tmp, engine + 0x0C); in hw_bitblt_1() 108 writel(tmp, engine + 0x10); in hw_bitblt_1() 111 writel(fg_color, engine + 0x18); in hw_bitblt_1() 114 writel(bg_color, engine + 0x1C); in hw_bitblt_1() [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/ |
D | Kbuild | 2 nvkm-y += nvkm/engine/falcon.o 3 nvkm-y += nvkm/engine/xtensa.o 5 include $(src)/nvkm/engine/bsp/Kbuild 6 include $(src)/nvkm/engine/ce/Kbuild 7 include $(src)/nvkm/engine/cipher/Kbuild 8 include $(src)/nvkm/engine/device/Kbuild 9 include $(src)/nvkm/engine/disp/Kbuild 10 include $(src)/nvkm/engine/dma/Kbuild 11 include $(src)/nvkm/engine/fifo/Kbuild 12 include $(src)/nvkm/engine/gr/Kbuild [all …]
|
/Linux-v5.4/drivers/crypto/marvell/ |
D | cesa.c | 3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA) 5 * driver supports the TDMA engine on platforms on which it is available. 38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine, in mv_cesa_dequeue_req_locked() argument 43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked() 44 req = crypto_dequeue_request(&engine->queue); in mv_cesa_dequeue_req_locked() 52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine) in mv_cesa_rearm_engine() argument 58 spin_lock_bh(&engine->lock); in mv_cesa_rearm_engine() 59 if (!engine->req) { in mv_cesa_rearm_engine() 60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine() 61 engine->req = req; in mv_cesa_rearm_engine() [all …]
|
/Linux-v5.4/include/crypto/ |
D | engine.h | 3 * Crypto engine API 22 * struct crypto_engine - crypto hardware engine 23 * @name: the engine name 24 * @idling: the engine is entering idle state 26 * @running: the engine is on working 28 * @list: link with the global crypto engine list 30 * @queue: the crypto queue of the engine 40 * @priv_data: the engine private data 57 int (*prepare_crypt_hardware)(struct crypto_engine *engine); 58 int (*unprepare_crypt_hardware)(struct crypto_engine *engine); [all …]
|
/Linux-v5.4/drivers/dma/ |
D | Kconfig | 3 # DMA engine configuration 7 bool "DMA Engine support" 18 bool "DMA Engine debugging" 22 say N here. This enables DMA engine core and driver debugging. 25 bool "DMA Engine verbose debugging" 30 the DMA engine core and drivers. 61 tristate "Altera / Intel mSGDMA Engine" 73 provide DMA engine support. This includes the original ARM 113 tristate "Broadcom SBA RAID engine support" 122 Enable support for Broadcom SBA RAID Engine. The SBA RAID [all …]
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/engine/device/ |
D | priv.h | 32 #include <engine/bsp.h> 33 #include <engine/ce.h> 34 #include <engine/cipher.h> 35 #include <engine/disp.h> 36 #include <engine/dma.h> 37 #include <engine/fifo.h> 38 #include <engine/gr.h> 39 #include <engine/mpeg.h> 40 #include <engine/mspdec.h> 41 #include <engine/msppp.h> [all …]
|