Home
last modified time | relevance | path

Searched full:context (Results 1 – 25 of 4940) sorted by relevance

12345678910>>...198

/Linux-v6.1/tools/tracing/rtla/src/
Dosnoise.c24 char *osnoise_get_cpus(struct osnoise_context *context) in osnoise_get_cpus() argument
26 if (context->curr_cpus) in osnoise_get_cpus()
27 return context->curr_cpus; in osnoise_get_cpus()
29 if (context->orig_cpus) in osnoise_get_cpus()
30 return context->orig_cpus; in osnoise_get_cpus()
32 context->orig_cpus = tracefs_instance_file_read(NULL, "osnoise/cpus", NULL); in osnoise_get_cpus()
38 return context->orig_cpus; in osnoise_get_cpus()
48 int osnoise_set_cpus(struct osnoise_context *context, char *cpus) in osnoise_set_cpus() argument
50 char *orig_cpus = osnoise_get_cpus(context); in osnoise_set_cpus()
57 context->curr_cpus = strdup(cpus); in osnoise_set_cpus()
[all …]
/Linux-v6.1/drivers/misc/vmw_vmci/
Dvmci_context.c30 * These, along with context lookup, are protected by the
35 spinlock_t lock; /* Spinlock for context list operations */
44 static void ctx_signal_notify(struct vmci_ctx *context) in ctx_signal_notify() argument
46 *context->notify = true; in ctx_signal_notify()
49 static void ctx_clear_notify(struct vmci_ctx *context) in ctx_clear_notify() argument
51 *context->notify = false; in ctx_clear_notify()
58 static void ctx_clear_notify_call(struct vmci_ctx *context) in ctx_clear_notify_call() argument
60 if (context->pending_datagrams == 0 && in ctx_clear_notify_call()
61 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) in ctx_clear_notify_call()
62 ctx_clear_notify(context); in ctx_clear_notify_call()
[all …]
Dvmci_route.c18 * devices. Will set the source context if it is invalid.
41 /* Must have a valid destination context. */ in vmci_route()
42 if (VMCI_INVALID_ID == dst->context) in vmci_route()
46 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { in vmci_route()
63 /* And we cannot send if the source is the host context. */ in vmci_route()
64 if (VMCI_HOST_CONTEXT_ID == src->context) in vmci_route()
69 * respect it (both context and resource are invalid). in vmci_route()
70 * However, if they passed only an invalid context, in vmci_route()
72 * should set the real context here before passing it in vmci_route()
75 if (VMCI_INVALID_ID == src->context && in vmci_route()
[all …]
/Linux-v6.1/drivers/infiniband/hw/hns/
Dhns_roce_restrack.c49 struct hns_roce_v2_cq_context context; in hns_roce_fill_res_cq_entry_raw() local
57 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); in hns_roce_fill_res_cq_entry_raw()
61 data[offset++] = hr_reg_read(&context, CQC_CQ_ST); in hns_roce_fill_res_cq_entry_raw()
62 data[offset++] = hr_reg_read(&context, CQC_SHIFT); in hns_roce_fill_res_cq_entry_raw()
63 data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE); in hns_roce_fill_res_cq_entry_raw()
64 data[offset++] = hr_reg_read(&context, CQC_CQE_CNT); in hns_roce_fill_res_cq_entry_raw()
65 data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX); in hns_roce_fill_res_cq_entry_raw()
66 data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX); in hns_roce_fill_res_cq_entry_raw()
67 data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN); in hns_roce_fill_res_cq_entry_raw()
68 data[offset++] = hr_reg_read(&context, CQC_ARM_ST); in hns_roce_fill_res_cq_entry_raw()
[all …]
/Linux-v6.1/security/selinux/ss/
Dmls.h27 #include "context.h"
31 int mls_compute_context_len(struct policydb *p, struct context *context);
32 void mls_sid_to_context(struct policydb *p, struct context *context,
34 int mls_context_isvalid(struct policydb *p, struct context *c);
41 struct context *context,
45 int mls_from_string(struct policydb *p, char *str, struct context *context,
48 int mls_range_set(struct context *context, struct mls_range *range);
52 struct context *oldc,
53 struct context *newc);
56 struct context *scontext,
[all …]
Dmls.c34 * security context string representation of `context'.
36 int mls_compute_context_len(struct policydb *p, struct context *context) in mls_compute_context_len() argument
48 int index_sens = context->range.level[l].sens; in mls_compute_context_len()
54 e = &context->range.level[l].cat; in mls_compute_context_len()
73 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len()
74 &context->range.level[1])) in mls_compute_context_len()
85 * Write the security context string representation of
86 * the MLS fields of `context' into the string `*scontext'.
90 struct context *context, in mls_sid_to_context() argument
108 context->range.level[l].sens - 1)); in mls_sid_to_context()
[all …]
/Linux-v6.1/drivers/gpu/drm/etnaviv/
Detnaviv_mmu.c16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, in etnaviv_context_unmap() argument
29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap()
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context, in etnaviv_context_map() argument
55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map()
67 etnaviv_context_unmap(context, orig_iova, orig_size - size); in etnaviv_context_map()
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, in etnaviv_iommu_map() argument
79 if (!context || !sgt) in etnaviv_iommu_map()
88 ret = etnaviv_context_map(context, da, pa, bytes, prot); in etnaviv_iommu_map()
95 context->flush_seq++; in etnaviv_iommu_map()
100 etnaviv_context_unmap(context, iova, da - iova); in etnaviv_iommu_map()
[all …]
Detnaviv_iommu.c28 to_v1_context(struct etnaviv_iommu_context *context) in to_v1_context() argument
30 return container_of(context, struct etnaviv_iommuv1_context, base); in to_v1_context()
33 static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context) in etnaviv_iommuv1_free() argument
35 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); in etnaviv_iommuv1_free()
37 drm_mm_takedown(&context->mm); in etnaviv_iommuv1_free()
39 dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu, in etnaviv_iommuv1_free()
42 context->global->v1.shared_context = NULL; in etnaviv_iommuv1_free()
47 static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context, in etnaviv_iommuv1_map() argument
51 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); in etnaviv_iommuv1_map()
62 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context, in etnaviv_iommuv1_unmap() argument
[all …]
Detnaviv_iommu_v2.c42 to_v2_context(struct etnaviv_iommu_context *context) in to_v2_context() argument
44 return container_of(context, struct etnaviv_iommuv2_context, base); in to_v2_context()
47 static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context) in etnaviv_iommuv2_free() argument
49 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); in etnaviv_iommuv2_free()
52 drm_mm_takedown(&context->mm); in etnaviv_iommuv2_free()
56 dma_free_wc(context->global->dev, SZ_4K, in etnaviv_iommuv2_free()
61 dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu, in etnaviv_iommuv2_free()
64 clear_bit(v2_context->id, context->global->v2.pta_alloc); in etnaviv_iommuv2_free()
92 static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context, in etnaviv_iommuv2_map() argument
96 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); in etnaviv_iommuv2_map()
[all …]
/Linux-v6.1/kernel/
Dauditsc.c28 * Subject and object context labeling support added by <danjones@us.ibm.com>
217 * it's going to remain 1-element for almost any setup) until we free context itself.
453 /* Determine if any context name data matches a rule's watch data */
811 * @ctx: audit context
907 static inline void audit_proctitle_free(struct audit_context *context) in audit_proctitle_free() argument
909 kfree(context->proctitle.value); in audit_proctitle_free()
910 context->proctitle.value = NULL; in audit_proctitle_free()
911 context->proctitle.len = 0; in audit_proctitle_free()
914 static inline void audit_free_module(struct audit_context *context) in audit_free_module() argument
916 if (context->type == AUDIT_KERN_MODULE) { in audit_free_module()
[all …]
/Linux-v6.1/drivers/gpu/drm/tegra/
Duapi.c31 static void tegra_drm_channel_context_close(struct tegra_drm_context *context) in tegra_drm_channel_context_close() argument
36 if (context->memory_context) in tegra_drm_channel_context_close()
37 host1x_memory_context_put(context->memory_context); in tegra_drm_channel_context_close()
39 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close()
42 xa_destroy(&context->mappings); in tegra_drm_channel_context_close()
44 host1x_channel_put(context->channel); in tegra_drm_channel_context_close()
46 kfree(context); in tegra_drm_channel_context_close()
51 struct tegra_drm_context *context; in tegra_drm_uapi_close_file() local
55 xa_for_each(&file->contexts, id, context) in tegra_drm_uapi_close_file()
56 tegra_drm_channel_context_close(context); in tegra_drm_uapi_close_file()
[all …]
Dsubmit.c26 #define SUBMIT_ERR(context, fmt, ...) \ argument
27 dev_err_ratelimited(context->client->base.dev, \
146 tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id) in tegra_drm_mapping_get() argument
150 xa_lock(&context->mappings); in tegra_drm_mapping_get()
152 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get()
156 xa_unlock(&context->mappings); in tegra_drm_mapping_get()
185 struct tegra_drm_context *context, in submit_copy_gather_data() argument
192 SUBMIT_ERR(context, "gather_data_words cannot be zero"); in submit_copy_gather_data()
197 SUBMIT_ERR(context, "gather_data_words is too large"); in submit_copy_gather_data()
203 SUBMIT_ERR(context, "failed to allocate memory for bo info"); in submit_copy_gather_data()
[all …]
/Linux-v6.1/fs/xfs/
Dxfs_attr_list.c54 struct xfs_attr_list_context *context) in xfs_attr_shortform_list() argument
56 struct xfs_attrlist_cursor_kern *cursor = &context->cursor; in xfs_attr_shortform_list()
57 struct xfs_inode *dp = context->dp; in xfs_attr_shortform_list()
69 trace_xfs_attr_list_sf(context); in xfs_attr_shortform_list()
80 if (context->bufsize == 0 || in xfs_attr_shortform_list()
82 (dp->i_af.if_bytes + sf->hdr.count * 16) < context->bufsize)) { in xfs_attr_shortform_list()
84 if (XFS_IS_CORRUPT(context->dp->i_mount, in xfs_attr_shortform_list()
88 context->put_listent(context, in xfs_attr_shortform_list()
97 if (context->seen_enough) in xfs_attr_shortform_list()
101 trace_xfs_attr_list_sf_all(context); in xfs_attr_shortform_list()
[all …]
Dxfs_xattr.c191 struct xfs_attr_list_context *context, in __xfs_xattr_put_listent() argument
200 if (context->count < 0 || context->seen_enough) in __xfs_xattr_put_listent()
203 if (!context->buffer) in __xfs_xattr_put_listent()
206 arraytop = context->count + prefix_len + namelen + 1; in __xfs_xattr_put_listent()
207 if (arraytop > context->firstu) { in __xfs_xattr_put_listent()
208 context->count = -1; /* insufficient space */ in __xfs_xattr_put_listent()
209 context->seen_enough = 1; in __xfs_xattr_put_listent()
212 offset = context->buffer + context->count; in __xfs_xattr_put_listent()
220 context->count += prefix_len + namelen + 1; in __xfs_xattr_put_listent()
226 struct xfs_attr_list_context *context, in xfs_xattr_put_listent() argument
[all …]
/Linux-v6.1/drivers/net/ethernet/qlogic/qed/
Dqed_nvmetcp_fw_funcs.c129 init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, in init_nvmetcp_task_params() argument
133 context->ystorm_st_context.state.cccid = task_params->host_cccid; in init_nvmetcp_task_params()
134 SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); in init_nvmetcp_task_params()
135 context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); in init_nvmetcp_task_params()
136 context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); in init_nvmetcp_task_params()
145 struct e5_nvmetcp_task_context *context = task_params->context; in init_default_nvmetcp_task() local
146 const u8 val_byte = context->mstorm_ag_context.cdu_validation; in init_default_nvmetcp_task()
149 memset(context, 0, sizeof(*context)); in init_default_nvmetcp_task()
150 init_nvmetcp_task_params(context, task_params, in init_default_nvmetcp_task()
159 context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = in init_default_nvmetcp_task()
[all …]
/Linux-v6.1/drivers/platform/x86/intel/int1092/
Dintel_sar.c33 * @context: pointer to driver context structure
36 * context->reg_value will never exceed MAX_REGULATORY
38 static void update_sar_data(struct wwan_sar_context *context) in update_sar_data() argument
41 &context->config_data[context->reg_value]; in update_sar_data()
44 context->sar_data.device_mode < config->total_dev_mode) { in update_sar_data()
48 if (context->sar_data.device_mode == in update_sar_data()
53 context->sar_data.antennatable_index = dev_mode->antennatable_index; in update_sar_data()
54 context->sar_data.bandtable_index = dev_mode->bandtable_index; in update_sar_data()
55 context->sar_data.sartable_index = dev_mode->sartable_index; in update_sar_data()
64 * @context: pointer to driver context structure
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/display/dc/dml/dcn32/
Ddcn32_fpu.c253 struct dc_state *context, in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch() argument
259 const struct vba_vars_st *vba = &context->bw_ctx.dml.vba; in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch()
265 context->bw_ctx.dml.soc.dram_clock_change_latency_us = in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch()
267 dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch()
269 if (vlevel < context->bw_ctx.dml.vba.soc.num_states && in dcn32_find_dummy_latency_index_for_fw_based_mclk_switch()
294 * @context: [in] new dc state
298 * This function must be called AFTER the phantom pipes are added to context
303 struct dc_state *context, in dcn32_helper_populate_phantom_dlg_params() argument
312 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; in dcn32_helper_populate_phantom_dlg_params()
319 get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); in dcn32_helper_populate_phantom_dlg_params()
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/display/dc/dml/dcn30/
Ddcn30_fpu.c365 void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) in dcn30_fpu_update_soc_for_wm_a() argument
371context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries… in dcn30_fpu_update_soc_for_wm_a()
372context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[W… in dcn30_fpu_update_soc_for_wm_a()
373context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_in… in dcn30_fpu_update_soc_for_wm_a()
378 struct dc *dc, struct dc_state *context, in dcn30_fpu_calculate_wm_and_dlg() argument
383 int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; in dcn30_fpu_calculate_wm_and_dlg()
385 double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; in dcn30_fpu_calculate_wm_and_dlg()
386 …bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clo… in dcn30_fpu_calculate_wm_and_dlg()
390 if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) in dcn30_fpu_calculate_wm_and_dlg()
391 dcfclk = context->bw_ctx.dml.soc.min_dcfclk; in dcn30_fpu_calculate_wm_and_dlg()
[all …]
/Linux-v6.1/arch/s390/include/asm/
Dmmu_context.h24 spin_lock_init(&mm->context.lock); in init_new_context()
25 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context()
26 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context()
27 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context()
28 atomic_set(&mm->context.flush_count, 0); in init_new_context()
29 atomic_set(&mm->context.protected_count, 0); in init_new_context()
30 mm->context.gmap_asce = 0; in init_new_context()
31 mm->context.flush_mm = 0; in init_new_context()
33 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context()
35 (current->mm && current->mm->context.alloc_pgste); in init_new_context()
[all …]
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx4/
Den_resources.c42 int user_prio, struct mlx4_qp_context *context) in mlx4_en_fill_qp_context() argument
47 memset(context, 0, sizeof(*context)); in mlx4_en_fill_qp_context()
48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); in mlx4_en_fill_qp_context()
49 context->pd = cpu_to_be32(mdev->priv_pdn); in mlx4_en_fill_qp_context()
50 context->mtu_msgmax = 0xff; in mlx4_en_fill_qp_context()
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context()
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context()
56 context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP); in mlx4_en_fill_qp_context()
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; in mlx4_en_fill_qp_context()
61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, in mlx4_en_fill_qp_context()
[all …]
/Linux-v6.1/include/misc/
Dcxl.h31 * Context lifetime overview:
33 * An AFU context may be inited and then started and stoppped multiple times
42 * Once released, a context can't be started again.
44 * One context is inited by the cxl driver for every pci_dev. This is to be
45 * used as a default kernel context. cxl_get_context() will get this
46 * context. This context will be released by PCI hot unplug, so doesn't need to
52 * Once a context has been inited, IRQs may be configured. Firstly these IRQs
65 * On pci_enabled_device(), the cxl driver will init a single cxl context for
66 * use by the driver. It doesn't start this context (as that will likely
69 * This gets the default context associated with this pci_dev. This context
[all …]
/Linux-v6.1/arch/ia64/include/asm/
Dmmu_context.h11 * Routines to manage the allocation of task context numbers. Task context
13 * due to context switches. Context numbers are implemented using ia-64
38 unsigned int next; /* next context number to use */
40 unsigned int max_ctx; /* max. context value supported by all CPUs */
53 * When the context counter wraps around all TLBs need to be flushed because
54 * an old context number might have been reused. This is signalled by the
78 nv_mm_context_t context = mm->context; in get_mmu_context() local
80 if (likely(context)) in get_mmu_context()
85 context = mm->context; in get_mmu_context()
86 if (context == 0) { in get_mmu_context()
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/display/dc/dml/dcn20/
Ddcn20_fpu.c920 struct dc_state *context, in dcn20_fpu_set_wb_arb_params() argument
929 …wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) *… in dcn20_fpu_set_wb_arb_params()
930 …wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipe… in dcn20_fpu_set_wb_arb_params()
932 …wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / … in dcn20_fpu_set_wb_arb_params()
935 static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) in is_dtbclk_required() argument
939 if (!context->res_ctx.pipe_ctx[i].stream) in is_dtbclk_required()
941 if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) in is_dtbclk_required()
947 static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struct dc_state *context) in decide_zstate_support() argument
956 if (context->res_ctx.pipe_ctx[i].plane_state) in decide_zstate_support()
971 else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { in decide_zstate_support()
[all …]
/Linux-v6.1/arch/nios2/mm/
Dmmu_context.c2 * MMU context handling.
19 /* The pids position and mask in context */
24 /* The versions position and mask in context */
29 /* Return the version part of a context */
32 /* Return the pid part of a context */
35 /* Value of the first context (version 1, pid 0) */
41 * Initialize MMU context management stuff.
51 * Set new context (pid), keep way
53 static void set_context(mm_context_t context) in set_context() argument
55 set_mmu_pid(CTX_PID(context)); in set_context()
[all …]
/Linux-v6.1/tools/perf/scripts/perl/Perf-Trace-Util/
DContext.c4 * contents of Context.xs. Do not edit this file, edit Context.xs instead.
12 #line 1 "Context.xs"
14 * Context.xs. XS interfaces for perf script.
28 #line 42 "Context.c"
39 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); in XS()
42 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); in XS() local
46 RETVAL = common_pc(context); in XS()
62 Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); in XS()
65 struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); in XS() local
69 RETVAL = common_flags(context); in XS()
[all …]

12345678910>>...198