Home
last modified time | relevance | path

Searched refs:context (Results 1 – 25 of 2089) sorted by relevance

12345678910>>...84

/Linux-v5.15/drivers/misc/vmw_vmci/
Dvmci_context.c44 static void ctx_signal_notify(struct vmci_ctx *context) in ctx_signal_notify() argument
46 *context->notify = true; in ctx_signal_notify()
49 static void ctx_clear_notify(struct vmci_ctx *context) in ctx_clear_notify() argument
51 *context->notify = false; in ctx_clear_notify()
58 static void ctx_clear_notify_call(struct vmci_ctx *context) in ctx_clear_notify_call() argument
60 if (context->pending_datagrams == 0 && in ctx_clear_notify_call()
61 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) in ctx_clear_notify_call()
62 ctx_clear_notify(context); in ctx_clear_notify_call()
69 void vmci_ctx_check_signal_notify(struct vmci_ctx *context) in vmci_ctx_check_signal_notify() argument
71 spin_lock(&context->lock); in vmci_ctx_check_signal_notify()
[all …]
Dvmci_route.c42 if (VMCI_INVALID_ID == dst->context) in vmci_route()
46 if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) { in vmci_route()
64 if (VMCI_HOST_CONTEXT_ID == src->context) in vmci_route()
75 if (VMCI_INVALID_ID == src->context && in vmci_route()
77 src->context = vmci_get_context_id(); in vmci_route()
85 if (VMCI_HOST_CONTEXT_ID == dst->context) { in vmci_route()
94 if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) { in vmci_route()
113 if (VMCI_INVALID_ID == src->context) in vmci_route()
114 src->context = vmci_get_context_id(); in vmci_route()
130 if (VMCI_INVALID_ID == src->context) { in vmci_route()
[all …]
/Linux-v5.15/security/selinux/ss/
Dmls.h31 int mls_compute_context_len(struct policydb *p, struct context *context);
32 void mls_sid_to_context(struct policydb *p, struct context *context,
34 int mls_context_isvalid(struct policydb *p, struct context *c);
41 struct context *context,
45 int mls_from_string(struct policydb *p, char *str, struct context *context,
48 int mls_range_set(struct context *context, struct mls_range *range);
52 struct context *oldc,
53 struct context *newc);
56 struct context *scontext,
57 struct context *tcontext,
[all …]
Dmls.c36 int mls_compute_context_len(struct policydb *p, struct context *context) in mls_compute_context_len() argument
48 int index_sens = context->range.level[l].sens; in mls_compute_context_len()
54 e = &context->range.level[l].cat; in mls_compute_context_len()
73 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len()
74 &context->range.level[1])) in mls_compute_context_len()
90 struct context *context, in mls_sid_to_context() argument
108 context->range.level[l].sens - 1)); in mls_sid_to_context()
114 e = &context->range.level[l].cat; in mls_sid_to_context()
150 if (mls_level_eq(&context->range.level[0], in mls_sid_to_context()
151 &context->range.level[1])) in mls_sid_to_context()
[all …]
/Linux-v5.15/kernel/
Dauditsc.c879 static inline void audit_proctitle_free(struct audit_context *context) in audit_proctitle_free() argument
881 kfree(context->proctitle.value); in audit_proctitle_free()
882 context->proctitle.value = NULL; in audit_proctitle_free()
883 context->proctitle.len = 0; in audit_proctitle_free()
886 static inline void audit_free_module(struct audit_context *context) in audit_free_module() argument
888 if (context->type == AUDIT_KERN_MODULE) { in audit_free_module()
889 kfree(context->module.name); in audit_free_module()
890 context->module.name = NULL; in audit_free_module()
893 static inline void audit_free_names(struct audit_context *context) in audit_free_names() argument
897 list_for_each_entry_safe(n, next, &context->names_list, list) { in audit_free_names()
[all …]
/Linux-v5.15/drivers/gpu/drm/etnaviv/
Detnaviv_mmu.c16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, in etnaviv_context_unmap() argument
29 unmapped_page = context->global->ops->unmap(context, iova, in etnaviv_context_unmap()
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context, in etnaviv_context_map() argument
55 ret = context->global->ops->map(context, iova, paddr, pgsize, in etnaviv_context_map()
67 etnaviv_context_unmap(context, orig_iova, orig_size - size); in etnaviv_context_map()
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, in etnaviv_iommu_map() argument
79 if (!context || !sgt) in etnaviv_iommu_map()
88 ret = etnaviv_context_map(context, da, pa, bytes, prot); in etnaviv_iommu_map()
98 etnaviv_context_unmap(context, iova, da - iova); in etnaviv_iommu_map()
102 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, in etnaviv_iommu_unmap() argument
[all …]
Detnaviv_iommu.c28 to_v1_context(struct etnaviv_iommu_context *context) in to_v1_context() argument
30 return container_of(context, struct etnaviv_iommuv1_context, base); in to_v1_context()
33 static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context) in etnaviv_iommuv1_free() argument
35 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); in etnaviv_iommuv1_free()
37 drm_mm_takedown(&context->mm); in etnaviv_iommuv1_free()
39 dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu, in etnaviv_iommuv1_free()
42 context->global->v1.shared_context = NULL; in etnaviv_iommuv1_free()
47 static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context, in etnaviv_iommuv1_map() argument
51 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); in etnaviv_iommuv1_map()
62 static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context, in etnaviv_iommuv1_unmap() argument
[all …]
Detnaviv_iommu_v2.c42 to_v2_context(struct etnaviv_iommu_context *context) in to_v2_context() argument
44 return container_of(context, struct etnaviv_iommuv2_context, base); in to_v2_context()
47 static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context) in etnaviv_iommuv2_free() argument
49 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); in etnaviv_iommuv2_free()
52 drm_mm_takedown(&context->mm); in etnaviv_iommuv2_free()
56 dma_free_wc(context->global->dev, SZ_4K, in etnaviv_iommuv2_free()
61 dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu, in etnaviv_iommuv2_free()
64 clear_bit(v2_context->id, context->global->v2.pta_alloc); in etnaviv_iommuv2_free()
92 static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context, in etnaviv_iommuv2_map() argument
96 struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); in etnaviv_iommuv2_map()
[all …]
/Linux-v5.15/fs/xfs/
Dxfs_attr_list.c53 struct xfs_attr_list_context *context) in xfs_attr_shortform_list() argument
55 struct xfs_attrlist_cursor_kern *cursor = &context->cursor; in xfs_attr_shortform_list()
56 struct xfs_inode *dp = context->dp; in xfs_attr_shortform_list()
69 trace_xfs_attr_list_sf(context); in xfs_attr_shortform_list()
80 if (context->bufsize == 0 || in xfs_attr_shortform_list()
82 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { in xfs_attr_shortform_list()
84 if (XFS_IS_CORRUPT(context->dp->i_mount, in xfs_attr_shortform_list()
88 context->put_listent(context, in xfs_attr_shortform_list()
97 if (context->seen_enough) in xfs_attr_shortform_list()
101 trace_xfs_attr_list_sf_all(context); in xfs_attr_shortform_list()
[all …]
Dxfs_xattr.c99 struct xfs_attr_list_context *context, in __xfs_xattr_put_listent() argument
108 if (context->count < 0 || context->seen_enough) in __xfs_xattr_put_listent()
111 if (!context->buffer) in __xfs_xattr_put_listent()
114 arraytop = context->count + prefix_len + namelen + 1; in __xfs_xattr_put_listent()
115 if (arraytop > context->firstu) { in __xfs_xattr_put_listent()
116 context->count = -1; /* insufficient space */ in __xfs_xattr_put_listent()
117 context->seen_enough = 1; in __xfs_xattr_put_listent()
120 offset = context->buffer + context->count; in __xfs_xattr_put_listent()
128 context->count += prefix_len + namelen + 1; in __xfs_xattr_put_listent()
134 struct xfs_attr_list_context *context, in xfs_xattr_put_listent() argument
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx4/
Den_resources.c42 int user_prio, struct mlx4_qp_context *context) in mlx4_en_fill_qp_context() argument
47 memset(context, 0, sizeof(*context)); in mlx4_en_fill_qp_context()
48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); in mlx4_en_fill_qp_context()
49 context->pd = cpu_to_be32(mdev->priv_pdn); in mlx4_en_fill_qp_context()
50 context->mtu_msgmax = 0xff; in mlx4_en_fill_qp_context()
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context()
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); in mlx4_en_fill_qp_context()
56 context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP); in mlx4_en_fill_qp_context()
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; in mlx4_en_fill_qp_context()
61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev, in mlx4_en_fill_qp_context()
[all …]
/Linux-v5.15/drivers/platform/x86/intel/int1092/
Dintel_sar.c38 static void update_sar_data(struct wwan_sar_context *context) in update_sar_data() argument
41 &context->config_data[context->reg_value]; in update_sar_data()
44 context->sar_data.device_mode < config->total_dev_mode) { in update_sar_data()
48 if (context->sar_data.device_mode == in update_sar_data()
53 context->sar_data.antennatable_index = dev_mode->antennatable_index; in update_sar_data()
54 context->sar_data.bandtable_index = dev_mode->bandtable_index; in update_sar_data()
55 context->sar_data.sartable_index = dev_mode->sartable_index; in update_sar_data()
77 static acpi_status parse_package(struct wwan_sar_context *context, union acpi_object *item) in parse_package() argument
89 data = &context->config_data[reg]; in parse_package()
130 struct wwan_sar_context *context = dev_get_drvdata(&device->dev); in sar_get_device_mode() local
[all …]
/Linux-v5.15/drivers/gpu/drm/tegra/
Dsubmit.c26 #define SUBMIT_ERR(context, fmt, ...) \ argument
27 dev_err_ratelimited(context->client->base.dev, \
117 tegra_drm_mapping_get(struct tegra_drm_context *context, u32 id) in tegra_drm_mapping_get() argument
121 xa_lock(&context->mappings); in tegra_drm_mapping_get()
123 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get()
127 xa_unlock(&context->mappings); in tegra_drm_mapping_get()
156 struct tegra_drm_context *context, in submit_copy_gather_data() argument
163 SUBMIT_ERR(context, "gather_data_words cannot be zero"); in submit_copy_gather_data()
168 SUBMIT_ERR(context, "gather_data_words is too large"); in submit_copy_gather_data()
174 SUBMIT_ERR(context, "failed to allocate memory for bo info"); in submit_copy_gather_data()
[all …]
Duapi.c35 static void tegra_drm_channel_context_close(struct tegra_drm_context *context) in tegra_drm_channel_context_close() argument
40 xa_for_each(&context->mappings, id, mapping) in tegra_drm_channel_context_close()
43 xa_destroy(&context->mappings); in tegra_drm_channel_context_close()
45 host1x_channel_put(context->channel); in tegra_drm_channel_context_close()
47 kfree(context); in tegra_drm_channel_context_close()
52 struct tegra_drm_context *context; in tegra_drm_uapi_close_file() local
56 xa_for_each(&file->contexts, id, context) in tegra_drm_uapi_close_file()
57 tegra_drm_channel_context_close(context); in tegra_drm_uapi_close_file()
83 struct tegra_drm_context *context; in tegra_drm_ioctl_channel_open() local
89 context = kzalloc(sizeof(*context), GFP_KERNEL); in tegra_drm_ioctl_channel_open()
[all …]
/Linux-v5.15/drivers/net/ethernet/qlogic/qed/
Dqed_nvmetcp_fw_funcs.c129 init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, in init_nvmetcp_task_params() argument
133 context->ystorm_st_context.state.cccid = task_params->host_cccid; in init_nvmetcp_task_params()
134 SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); in init_nvmetcp_task_params()
135 context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); in init_nvmetcp_task_params()
136 context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); in init_nvmetcp_task_params()
145 struct e5_nvmetcp_task_context *context = task_params->context; in init_default_nvmetcp_task() local
146 const u8 val_byte = context->mstorm_ag_context.cdu_validation; in init_default_nvmetcp_task()
149 memset(context, 0, sizeof(*context)); in init_default_nvmetcp_task()
150 init_nvmetcp_task_params(context, task_params, in init_default_nvmetcp_task()
159 context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = in init_default_nvmetcp_task()
[all …]
/Linux-v5.15/arch/s390/include/asm/
Dmmu_context.h24 spin_lock_init(&mm->context.lock); in init_new_context()
25 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context()
26 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context()
27 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context()
28 atomic_set(&mm->context.flush_count, 0); in init_new_context()
29 atomic_set(&mm->context.is_protected, 0); in init_new_context()
30 mm->context.gmap_asce = 0; in init_new_context()
31 mm->context.flush_mm = 0; in init_new_context()
33 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context()
35 (current->mm && current->mm->context.alloc_pgste); in init_new_context()
[all …]
/Linux-v5.15/arch/powerpc/mm/book3s64/
Dmmu_context.c96 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context()
98 if (!mm->context.hash_context) in hash__init_new_context()
115 if (mm->context.id == 0) { in hash__init_new_context()
116 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context()
120 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context()
123 if (current->mm->context.hash_context->spt) { in hash__init_new_context()
124 mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table), in hash__init_new_context()
126 if (!mm->context.hash_context->spt) { in hash__init_new_context()
127 kfree(mm->context.hash_context); in hash__init_new_context()
134 index = realloc_context_ids(&mm->context); in hash__init_new_context()
[all …]
/Linux-v5.15/arch/sparc/mm/
Dtsb.c124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user()
157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page()
160 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user_page()
161 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user_page()
[all …]
/Linux-v5.15/drivers/usb/image/
Dmicrotek.c187 MTS_DEBUG("transfer = 0x%x context = 0x%x\n",(int)transfer,(int)context ); \
188 …MTS_DEBUG("status = 0x%x data-length = 0x%x sent = 0x%x\n",transfer->status,(int)context->data_len…
189 mts_debug_dump(context->instance);\
204 struct mts_transfer_context* context = (struct mts_transfer_context*)transfer->context; \
381 context->instance->usb_dev, in mts_int_submit_urb()
386 context in mts_int_submit_urb()
392 set_host_byte(context->srb, DID_ERROR); in mts_int_submit_urb()
403 if ( likely(context->final_callback != NULL) ) in mts_transfer_cleanup()
404 context->final_callback(context->srb); in mts_transfer_cleanup()
411 context->srb->result &= MTS_SCSI_ERR_MASK; in mts_transfer_done()
[all …]
/Linux-v5.15/drivers/infiniband/hw/hns/
Dhns_roce_restrack.c13 struct hns_roce_v2_cq_context *context) in hns_roce_fill_cq() argument
16 roce_get_field(context->byte_4_pg_ceqn, in hns_roce_fill_cq()
22 roce_get_field(context->byte_4_pg_ceqn, in hns_roce_fill_cq()
28 roce_get_field(context->byte_8_cqn, in hns_roce_fill_cq()
34 roce_get_field(context->byte_16_hop_addr, in hns_roce_fill_cq()
41 roce_get_field(context->byte_28_cq_pi, in hns_roce_fill_cq()
48 roce_get_field(context->byte_32_cq_ci, in hns_roce_fill_cq()
55 roce_get_field(context->byte_56_cqe_period_maxcnt, in hns_roce_fill_cq()
62 roce_get_field(context->byte_56_cqe_period_maxcnt, in hns_roce_fill_cq()
68 roce_get_field(context->byte_52_cqe_cnt, in hns_roce_fill_cq()
[all …]
Dhns_roce_cmd.c89 struct hns_roce_cmd_context *context = in hns_roce_cmd_event() local
90 &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds]; in hns_roce_cmd_event()
92 if (unlikely(token != context->token)) { in hns_roce_cmd_event()
95 token, context->token); in hns_roce_cmd_event()
99 context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO); in hns_roce_cmd_event()
100 context->out_param = out_param; in hns_roce_cmd_event()
101 complete(&context->done); in hns_roce_cmd_event()
110 struct hns_roce_cmd_context *context; in __hns_roce_cmd_mbox_wait() local
117 context = &cmd->context[cmd->free_head]; in __hns_roce_cmd_mbox_wait()
118 cmd->free_head = context->next; in __hns_roce_cmd_mbox_wait()
[all …]
/Linux-v5.15/fs/ocfs2/
Dmove_extents.c50 struct ocfs2_move_extents_context *context, in __ocfs2_move_extent() argument
55 struct inode *inode = context->inode; in __ocfs2_move_extent()
60 u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); in __ocfs2_move_extent()
76 path = ocfs2_new_path_from_et(&context->et); in __ocfs2_move_extent()
109 context->et.et_root_bh, in __ocfs2_move_extent()
116 ret = ocfs2_split_extent(handle, &context->et, path, index, in __ocfs2_move_extent()
117 &replace_rec, context->meta_ac, in __ocfs2_move_extent()
118 &context->dealloc); in __ocfs2_move_extent()
124 ocfs2_journal_dirty(handle, context->et.et_root_bh); in __ocfs2_move_extent()
126 context->new_phys_cpos = new_p_cpos; in __ocfs2_move_extent()
[all …]
/Linux-v5.15/arch/ia64/include/asm/
Dmmu_context.h78 nv_mm_context_t context = mm->context; in get_mmu_context() local
80 if (likely(context)) in get_mmu_context()
85 context = mm->context; in get_mmu_context()
86 if (context == 0) { in get_mmu_context()
96 mm->context = context = ia64_ctx.next++; in get_mmu_context()
97 __set_bit(context, ia64_ctx.bitmap); in get_mmu_context()
107 return context; in get_mmu_context()
118 mm->context = 0; in init_new_context()
123 reload_context (nv_mm_context_t context) in reload_context() argument
130 rid = context << 3; /* make space for encoding the region number */ in reload_context()
[all …]
/Linux-v5.15/drivers/gpu/drm/i915/selftests/
Di915_syncmap.c163 static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno) in check_one() argument
167 err = i915_syncmap_set(sync, context, seqno); in check_one()
173 context, (*sync)->height, (*sync)->prefix); in check_one()
179 context); in check_one()
193 if (!i915_syncmap_is_later(sync, context, seqno)) { in check_one()
195 context, seqno); in check_one()
217 u64 context = i915_prandom_u64_state(&prng); in igt_syncmap_one() local
225 err = check_one(&sync, context, in igt_syncmap_one()
238 static int check_leaf(struct i915_syncmap **sync, u64 context, u32 seqno) in check_leaf() argument
242 err = i915_syncmap_set(sync, context, seqno); in check_leaf()
[all …]
/Linux-v5.15/drivers/pci/hotplug/
Dacpiphp_glue.c50 static void hotplug_event(u32 type, struct acpiphp_context *context);
61 struct acpiphp_context *context; in acpiphp_init_context() local
63 context = kzalloc(sizeof(*context), GFP_KERNEL); in acpiphp_init_context()
64 if (!context) in acpiphp_init_context()
67 context->refcount = 1; in acpiphp_init_context()
68 context->hp.notify = acpiphp_hotplug_notify; in acpiphp_init_context()
69 context->hp.fixup = acpiphp_post_dock_fixup; in acpiphp_init_context()
70 acpi_set_hp_context(adev, &context->hp); in acpiphp_init_context()
71 return context; in acpiphp_init_context()
82 struct acpiphp_context *context; in acpiphp_get_context() local
[all …]

12345678910>>...84