1 /* SPDX-License-Identifier: MIT */
2 /*
3  ****************************************************************************
4  * (C) 2006 - Cambridge University
5  * (C) 2021-2022 - EPAM Systems
6  ****************************************************************************
7  *
8  *        File: gnttab.c
9  *      Author: Steven Smith (sos22@cam.ac.uk)
10  *     Changes: Grzegorz Milos (gm281@cam.ac.uk)
11  *
12  *        Date: July 2006
13  *
14  * Environment: Xen Minimal OS
15  * Description: Simple grant tables implementation. About as stupid as it's
16  *  possible to be and still work.
17  *
18  ****************************************************************************
19  */
20 #include <zephyr/arch/arm64/hypercall.h>
21 #include <zephyr/xen/generic.h>
22 #include <zephyr/xen/gnttab.h>
23 #include <zephyr/xen/public/grant_table.h>
24 #include <zephyr/xen/public/memory.h>
25 #include <zephyr/xen/public/xen.h>
26 #include <zephyr/sys/barrier.h>
27 
28 #include <zephyr/init.h>
29 #include <zephyr/kernel.h>
30 #include <zephyr/logging/log.h>
31 #include <zephyr/sys/device_mmio.h>
32 
33 LOG_MODULE_REGISTER(xen_gnttab);
34 
35 /* Timeout for grant table ops retrying */
36 #define GOP_RETRY_DELAY 200
37 
38 #define GNTTAB_SIZE DT_REG_SIZE_BY_IDX(DT_INST(0, xen_xen), 0)
39 BUILD_ASSERT(!(GNTTAB_SIZE % XEN_PAGE_SIZE), "Size of gnttab have to be aligned on XEN_PAGE_SIZE");
40 
41 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
42 #define NR_GRANT_FRAMES (GNTTAB_SIZE / XEN_PAGE_SIZE)
43 #define NR_GRANT_ENTRIES \
44 	(NR_GRANT_FRAMES * XEN_PAGE_SIZE / sizeof(grant_entry_v1_t))
45 
46 BUILD_ASSERT(GNTTAB_SIZE <= CONFIG_KERNEL_VM_SIZE);
47 DEVICE_MMIO_TOPLEVEL_STATIC(grant_tables, DT_INST(0, xen_xen));
48 
49 static struct gnttab {
50 	struct k_sem sem;
51 	grant_entry_v1_t *table;
52 	grant_ref_t gref_list[NR_GRANT_ENTRIES];
53 } gnttab;
54 
get_free_entry(void)55 static grant_ref_t get_free_entry(void)
56 {
57 	grant_ref_t gref;
58 	unsigned int flags;
59 
60 	k_sem_take(&gnttab.sem, K_FOREVER);
61 
62 	flags = irq_lock();
63 	gref = gnttab.gref_list[0];
64 	__ASSERT((gref >= GNTTAB_NR_RESERVED_ENTRIES &&
65 		gref < NR_GRANT_ENTRIES), "Invalid gref = %d", gref);
66 	gnttab.gref_list[0] = gnttab.gref_list[gref];
67 	irq_unlock(flags);
68 
69 	return gref;
70 }
71 
put_free_entry(grant_ref_t gref)72 static void put_free_entry(grant_ref_t gref)
73 {
74 	unsigned int flags;
75 
76 	flags = irq_lock();
77 	gnttab.gref_list[gref] = gnttab.gref_list[0];
78 	gnttab.gref_list[0] = gref;
79 
80 	irq_unlock(flags);
81 
82 	k_sem_give(&gnttab.sem);
83 }
84 
gnttab_grant_permit_access(grant_ref_t gref,domid_t domid,unsigned long gfn,bool readonly)85 static void gnttab_grant_permit_access(grant_ref_t gref, domid_t domid,
86 		unsigned long gfn, bool readonly)
87 {
88 	uint16_t flags = GTF_permit_access;
89 
90 	if (readonly) {
91 		flags |= GTF_readonly;
92 	}
93 
94 	gnttab.table[gref].frame = gfn;
95 	gnttab.table[gref].domid = domid;
96 	/* Need to be sure that gfn and domid will be set before flags */
97 	barrier_dmem_fence_full();
98 
99 	gnttab.table[gref].flags = flags;
100 }
101 
gnttab_grant_access(domid_t domid,unsigned long gfn,bool readonly)102 grant_ref_t gnttab_grant_access(domid_t domid, unsigned long gfn,
103 		bool readonly)
104 {
105 	grant_ref_t gref = get_free_entry();
106 
107 	gnttab_grant_permit_access(gref, domid, gfn, readonly);
108 
109 	return gref;
110 }
111 
112 /* Reset flags to zero in order to stop using the grant */
gnttab_reset_flags(grant_ref_t gref)113 static int gnttab_reset_flags(grant_ref_t gref)
114 {
115 	uint16_t flags, nflags;
116 	uint16_t *pflags;
117 
118 	pflags = &gnttab.table[gref].flags;
119 	nflags = *pflags;
120 
121 	do {
122 		flags = nflags;
123 		if (flags & (GTF_reading | GTF_writing)) {
124 			LOG_WRN("gref = %u still in use! (0x%x)\n",
125 				gref, flags);
126 			return 1;
127 		}
128 		nflags = synch_cmpxchg(pflags, flags, 0);
129 	} while (nflags != flags);
130 
131 	return 0;
132 }
133 
gnttab_end_access(grant_ref_t gref)134 int gnttab_end_access(grant_ref_t gref)
135 {
136 	int rc;
137 
138 	__ASSERT((gref >= GNTTAB_NR_RESERVED_ENTRIES &&
139 		gref < NR_GRANT_ENTRIES), "Invalid gref = %d", gref);
140 
141 	rc = gnttab_reset_flags(gref);
142 	if (!rc) {
143 		return rc;
144 	}
145 
146 	put_free_entry(gref);
147 
148 	return 0;
149 }
150 
gnttab_alloc_and_grant(void ** map,bool readonly)151 int32_t gnttab_alloc_and_grant(void **map, bool readonly)
152 {
153 	void *page;
154 	unsigned long gfn;
155 	grant_ref_t gref;
156 
157 	__ASSERT_NO_MSG(map != NULL);
158 
159 	page = k_aligned_alloc(XEN_PAGE_SIZE, XEN_PAGE_SIZE);
160 	if (page == NULL) {
161 		return -ENOMEM;
162 	}
163 
164 	gfn = xen_virt_to_gfn(page);
165 	gref = gnttab_grant_access(0, gfn, readonly);
166 
167 	*map = page;
168 
169 	return gref;
170 }
171 
gop_eagain_retry(int cmd,struct gnttab_map_grant_ref * gref)172 static void gop_eagain_retry(int cmd, struct gnttab_map_grant_ref *gref)
173 {
174 	unsigned int step = 10, delay = step;
175 	int16_t *status = &gref->status;
176 
177 	do {
178 		HYPERVISOR_grant_table_op(cmd, gref, 1);
179 		if (*status == GNTST_eagain) {
180 			k_sleep(K_MSEC(delay));
181 		}
182 
183 		delay += step;
184 	} while ((*status == GNTST_eagain) && (delay < GOP_RETRY_DELAY));
185 
186 	if (delay >= GOP_RETRY_DELAY) {
187 		LOG_ERR("Failed to map grant, timeout reached\n");
188 		*status = GNTST_bad_page;
189 	}
190 }
191 
gnttab_get_page(void)192 void *gnttab_get_page(void)
193 {
194 	int ret;
195 	void *page_addr;
196 	struct xen_remove_from_physmap rfpm;
197 
198 	page_addr = k_aligned_alloc(XEN_PAGE_SIZE, XEN_PAGE_SIZE);
199 	if (!page_addr) {
200 		LOG_WRN("Failed to allocate memory for gnttab page!\n");
201 		return NULL;
202 	}
203 
204 	rfpm.domid = DOMID_SELF;
205 	rfpm.gpfn = xen_virt_to_gfn(page_addr);
206 
207 	/*
208 	 * GNTTABOP_map_grant_ref will simply replace the entry in the P2M
209 	 * and not release any RAM that may have been associated with
210 	 * page_addr, so we release this memory before mapping.
211 	 */
212 	ret = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &rfpm);
213 	if (ret) {
214 		LOG_WRN("Failed to remove gnttab page from physmap, ret = %d\n", ret);
215 		return NULL;
216 	}
217 
218 	return page_addr;
219 }
220 
gnttab_put_page(void * page_addr)221 void gnttab_put_page(void *page_addr)
222 {
223 	int ret, nr_extents = 1;
224 	struct xen_memory_reservation reservation;
225 	xen_pfn_t page = xen_virt_to_gfn(page_addr);
226 
227 	/*
228 	 * After unmapping there will be a 4Kb holes in address space
229 	 * at 'page_addr' positions. To keep it contiguous and be able
230 	 * to return such addresses to memory allocator we need to
231 	 * populate memory on unmapped positions here.
232 	 */
233 	memset(&reservation, 0, sizeof(reservation));
234 	reservation.domid = DOMID_SELF;
235 	reservation.extent_order = 0;
236 	reservation.nr_extents = nr_extents;
237 	set_xen_guest_handle(reservation.extent_start, &page);
238 
239 	ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
240 	if (ret != nr_extents) {
241 		LOG_WRN("failed to populate physmap on gfn = 0x%llx, ret = %d\n",
242 			page, ret);
243 		return;
244 	}
245 
246 	k_free(page_addr);
247 }
248 
gnttab_map_refs(struct gnttab_map_grant_ref * map_ops,unsigned int count)249 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, unsigned int count)
250 {
251 	int i, ret;
252 
253 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
254 	if (ret) {
255 		return ret;
256 	}
257 
258 	for (i = 0; i < count; i++) {
259 		switch (map_ops[i].status) {
260 		case GNTST_no_device_space:
261 			LOG_WRN("map_grant_ref failed, no device space for page #%d\n", i);
262 			break;
263 
264 		case GNTST_eagain:
265 			/* Operation not done; need to try again */
266 			gop_eagain_retry(GNTTABOP_map_grant_ref, &map_ops[i]);
267 			/* Need to re-check status for current page */
268 			i--;
269 
270 			break;
271 
272 		default:
273 			break;
274 		}
275 	}
276 
277 	return 0;
278 }
279 
gnttab_unmap_refs(struct gnttab_map_grant_ref * unmap_ops,unsigned int count)280 int gnttab_unmap_refs(struct gnttab_map_grant_ref *unmap_ops, unsigned int count)
281 {
282 	return HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
283 }
284 
285 
286 static const char * const gnttab_error_msgs[] = GNTTABOP_error_msgs;
287 
gnttabop_error(int16_t status)288 const char *gnttabop_error(int16_t status)
289 {
290 	status = -status;
291 	if (status < 0 || (uint16_t) status >= ARRAY_SIZE(gnttab_error_msgs)) {
292 		return "bad status";
293 	} else {
294 		return gnttab_error_msgs[status];
295 	}
296 }
297 
gnttab_init(void)298 static int gnttab_init(void)
299 {
300 	grant_ref_t gref;
301 	struct xen_add_to_physmap xatp;
302 	struct gnttab_setup_table setup;
303 	xen_pfn_t frames[NR_GRANT_FRAMES];
304 	int rc = 0, i;
305 
306 	/* Will be taken/given during gnt_refs allocation/release */
307 	k_sem_init(&gnttab.sem, 0, NR_GRANT_ENTRIES - GNTTAB_NR_RESERVED_ENTRIES);
308 
309 	for (
310 		gref = GNTTAB_NR_RESERVED_ENTRIES;
311 		gref < NR_GRANT_ENTRIES;
312 		gref++
313 	    ) {
314 		put_free_entry(gref);
315 	}
316 
317 	for (i = 0; i < NR_GRANT_FRAMES; i++) {
318 		xatp.domid = DOMID_SELF;
319 		xatp.size = 0;
320 		xatp.space = XENMAPSPACE_grant_table;
321 		xatp.idx = i;
322 		xatp.gpfn = xen_virt_to_gfn(Z_TOPLEVEL_ROM_NAME(grant_tables).phys_addr) + i;
323 		rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
324 		__ASSERT(!rc, "add_to_physmap failed; status = %d\n", rc);
325 	}
326 
327 	setup.dom = DOMID_SELF;
328 	setup.nr_frames = NR_GRANT_FRAMES;
329 	set_xen_guest_handle(setup.frame_list, frames);
330 	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
331 	__ASSERT((!rc) && (!setup.status), "Table setup failed; status = %s\n",
332 		gnttabop_error(setup.status));
333 
334 	DEVICE_MMIO_TOPLEVEL_MAP(grant_tables, K_MEM_CACHE_WB | K_MEM_PERM_RW);
335 	gnttab.table = (grant_entry_v1_t *)DEVICE_MMIO_TOPLEVEL_GET(grant_tables);
336 
337 	LOG_DBG("%s: grant table mapped\n", __func__);
338 
339 	return 0;
340 }
341 
342 SYS_INIT(gnttab_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE);
343