Lines Matching +full:bypass +full:- +full:slot +full:- +full:no
1 // SPDX-License-Identifier: GPL-2.0
10 * per-cpu buffer of outstanding multicalls. When you want to queue a
11 * multicall for issuing, you can allocate a multicall slot for the
18 * when explicitly requested. There's no way to get per-multicall
70 trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); in xen_mc_flush()
73 memcpy(b->debug, b->entries, in xen_mc_flush()
74 b->mcidx * sizeof(struct multicall_entry)); in xen_mc_flush()
77 switch (b->mcidx) { in xen_mc_flush()
79 /* no-op */ in xen_mc_flush()
80 BUG_ON(b->argidx != 0); in xen_mc_flush()
84 /* Singleton multicall - bypass multicall machinery in xen_mc_flush()
86 mc = &b->entries[0]; in xen_mc_flush()
88 mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1], in xen_mc_flush()
89 mc->args[2], mc->args[3], in xen_mc_flush()
90 mc->args[4]); in xen_mc_flush()
91 ret = mc->result < 0; in xen_mc_flush()
95 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) in xen_mc_flush()
97 for (i = 0; i < b->mcidx; i++) in xen_mc_flush()
98 if (b->entries[i].result < 0) in xen_mc_flush()
104 ret, b->mcidx, smp_processor_id()); in xen_mc_flush()
105 for (i = 0; i < b->mcidx; i++) { in xen_mc_flush()
106 if (b->entries[i].result < 0) { in xen_mc_flush()
110 b->debug[i].op, in xen_mc_flush()
111 b->debug[i].args[0], in xen_mc_flush()
112 b->entries[i].result, in xen_mc_flush()
113 b->caller[i]); in xen_mc_flush()
117 b->entries[i].op, in xen_mc_flush()
118 b->entries[i].args[0], in xen_mc_flush()
119 b->entries[i].result); in xen_mc_flush()
125 b->mcidx = 0; in xen_mc_flush()
126 b->argidx = 0; in xen_mc_flush()
128 for (i = 0; i < b->cbidx; i++) { in xen_mc_flush()
129 struct callback *cb = &b->callbacks[i]; in xen_mc_flush()
131 (*cb->fn)(cb->data); in xen_mc_flush()
133 b->cbidx = 0; in xen_mc_flush()
142 unsigned argidx = roundup(b->argidx, sizeof(u64)); in __xen_mc_entry()
147 BUG_ON(b->argidx >= MC_ARGS); in __xen_mc_entry()
149 if (unlikely(b->mcidx == MC_BATCH || in __xen_mc_entry()
151 trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ? in __xen_mc_entry()
154 argidx = roundup(b->argidx, sizeof(u64)); in __xen_mc_entry()
157 ret.mc = &b->entries[b->mcidx]; in __xen_mc_entry()
159 b->caller[b->mcidx] = __builtin_return_address(0); in __xen_mc_entry()
161 b->mcidx++; in __xen_mc_entry()
162 ret.args = &b->args[argidx]; in __xen_mc_entry()
163 b->argidx = argidx + args; in __xen_mc_entry()
165 BUG_ON(b->argidx >= MC_ARGS); in __xen_mc_entry()
175 BUG_ON(b->argidx >= MC_ARGS); in xen_mc_extend_args()
177 if (unlikely(b->mcidx == 0 || in xen_mc_extend_args()
178 b->entries[b->mcidx - 1].op != op)) { in xen_mc_extend_args()
183 if (unlikely((b->argidx + size) >= MC_ARGS)) { in xen_mc_extend_args()
188 ret.mc = &b->entries[b->mcidx - 1]; in xen_mc_extend_args()
189 ret.args = &b->args[b->argidx]; in xen_mc_extend_args()
190 b->argidx += size; in xen_mc_extend_args()
192 BUG_ON(b->argidx >= MC_ARGS); in xen_mc_extend_args()
204 if (b->cbidx == MC_BATCH) { in xen_mc_callback()
211 cb = &b->callbacks[b->cbidx++]; in xen_mc_callback()
212 cb->fn = fn; in xen_mc_callback()
213 cb->data = data; in xen_mc_callback()