1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Ultra Wide Band Radio Control
4  * Event Size Tables management
5  *
6  * Copyright (C) 2005-2006 Intel Corporation
7  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8  *
9  * FIXME: docs
10  *
11  * Infrastructure, code and data tables for guessing the size of
12  * events received on the notification endpoints of UWB radio
13  * controllers.
14  *
15  * You define a table of events and for each, its size and how to get
16  * the extra size.
17  *
18  * ENTRY POINTS:
19  *
20  * uwb_est_{init/destroy}(): To initialize/release the EST subsystem.
21  *
22  * uwb_est_[u]register(): To un/register event size tables
23  *   uwb_est_grow()
24  *
25  * uwb_est_find_size(): Get the size of an event
26  *   uwb_est_get_size()
27  */
28 #include <linux/spinlock.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 
32 #include "uwb-internal.h"
33 
34 struct uwb_est {
35 	u16 type_event_high;
36 	u16 vendor, product;
37 	u8 entries;
38 	const struct uwb_est_entry *entry;
39 };
40 
41 static struct uwb_est *uwb_est;
42 static u8 uwb_est_size;
43 static u8 uwb_est_used;
44 static DEFINE_RWLOCK(uwb_est_lock);
45 
46 /**
47  * WUSB Standard Event Size Table, HWA-RC interface
48  *
49  * Sizes for events and notifications type 0 (general), high nibble 0.
50  */
51 static
52 struct uwb_est_entry uwb_est_00_00xx[] = {
53 	[UWB_RC_EVT_IE_RCV] = {
54 		.size = sizeof(struct uwb_rc_evt_ie_rcv),
55 		.offset = 1 + offsetof(struct uwb_rc_evt_ie_rcv, wIELength),
56 	},
57 	[UWB_RC_EVT_BEACON] = {
58 		.size = sizeof(struct uwb_rc_evt_beacon),
59 		.offset = 1 + offsetof(struct uwb_rc_evt_beacon, wBeaconInfoLength),
60 	},
61 	[UWB_RC_EVT_BEACON_SIZE] = {
62 		.size = sizeof(struct uwb_rc_evt_beacon_size),
63 	},
64 	[UWB_RC_EVT_BPOIE_CHANGE] = {
65 		.size = sizeof(struct uwb_rc_evt_bpoie_change),
66 		.offset = 1 + offsetof(struct uwb_rc_evt_bpoie_change,
67 				       wBPOIELength),
68 	},
69 	[UWB_RC_EVT_BP_SLOT_CHANGE] = {
70 		.size = sizeof(struct uwb_rc_evt_bp_slot_change),
71 	},
72 	[UWB_RC_EVT_BP_SWITCH_IE_RCV] = {
73 		.size = sizeof(struct uwb_rc_evt_bp_switch_ie_rcv),
74 		.offset = 1 + offsetof(struct uwb_rc_evt_bp_switch_ie_rcv, wIELength),
75 	},
76 	[UWB_RC_EVT_DEV_ADDR_CONFLICT] = {
77 		.size = sizeof(struct uwb_rc_evt_dev_addr_conflict),
78 	},
79 	[UWB_RC_EVT_DRP_AVAIL] = {
80 		.size = sizeof(struct uwb_rc_evt_drp_avail)
81 	},
82 	[UWB_RC_EVT_DRP] = {
83 		.size = sizeof(struct uwb_rc_evt_drp),
84 		.offset = 1 + offsetof(struct uwb_rc_evt_drp, ie_length),
85 	},
86 	[UWB_RC_EVT_BP_SWITCH_STATUS] = {
87 		.size = sizeof(struct uwb_rc_evt_bp_switch_status),
88 	},
89 	[UWB_RC_EVT_CMD_FRAME_RCV] = {
90 		.size = sizeof(struct uwb_rc_evt_cmd_frame_rcv),
91 		.offset = 1 + offsetof(struct uwb_rc_evt_cmd_frame_rcv, dataLength),
92 	},
93 	[UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV] = {
94 		.size = sizeof(struct uwb_rc_evt_channel_change_ie_rcv),
95 		.offset = 1 + offsetof(struct uwb_rc_evt_channel_change_ie_rcv, wIELength),
96 	},
97 	[UWB_RC_CMD_CHANNEL_CHANGE] = {
98 		.size = sizeof(struct uwb_rc_evt_confirm),
99 	},
100 	[UWB_RC_CMD_DEV_ADDR_MGMT] = {
101 		.size = sizeof(struct uwb_rc_evt_dev_addr_mgmt) },
102 	[UWB_RC_CMD_GET_IE] = {
103 		.size = sizeof(struct uwb_rc_evt_get_ie),
104 		.offset = 1 + offsetof(struct uwb_rc_evt_get_ie, wIELength),
105 	},
106 	[UWB_RC_CMD_RESET] = {
107 		.size = sizeof(struct uwb_rc_evt_confirm),
108 	},
109 	[UWB_RC_CMD_SCAN] = {
110 		.size = sizeof(struct uwb_rc_evt_confirm),
111 	},
112 	[UWB_RC_CMD_SET_BEACON_FILTER] = {
113 		.size = sizeof(struct uwb_rc_evt_confirm),
114 	},
115 	[UWB_RC_CMD_SET_DRP_IE] = {
116 		.size = sizeof(struct uwb_rc_evt_set_drp_ie),
117 	},
118 	[UWB_RC_CMD_SET_IE] = {
119 		.size = sizeof(struct uwb_rc_evt_set_ie),
120 	},
121 	[UWB_RC_CMD_SET_NOTIFICATION_FILTER] = {
122 		.size = sizeof(struct uwb_rc_evt_confirm),
123 	},
124 	[UWB_RC_CMD_SET_TX_POWER] = {
125 		.size = sizeof(struct uwb_rc_evt_confirm),
126 	},
127 	[UWB_RC_CMD_SLEEP] = {
128 		.size = sizeof(struct uwb_rc_evt_confirm),
129 	},
130 	[UWB_RC_CMD_START_BEACON] = {
131 		.size = sizeof(struct uwb_rc_evt_confirm),
132 	},
133 	[UWB_RC_CMD_STOP_BEACON] = {
134 		.size = sizeof(struct uwb_rc_evt_confirm),
135 	},
136 	[UWB_RC_CMD_BP_MERGE] = {
137 		.size = sizeof(struct uwb_rc_evt_confirm),
138 	},
139 	[UWB_RC_CMD_SEND_COMMAND_FRAME] = {
140 		.size = sizeof(struct uwb_rc_evt_confirm),
141 	},
142 	[UWB_RC_CMD_SET_ASIE_NOTIF] = {
143 		.size = sizeof(struct uwb_rc_evt_confirm),
144 	},
145 };
146 
147 static
148 struct uwb_est_entry uwb_est_01_00xx[] = {
149 	[UWB_RC_DAA_ENERGY_DETECTED] = {
150 		.size = sizeof(struct uwb_rc_evt_daa_energy_detected),
151 	},
152 	[UWB_RC_SET_DAA_ENERGY_MASK] = {
153 		.size = sizeof(struct uwb_rc_evt_set_daa_energy_mask),
154 	},
155 	[UWB_RC_SET_NOTIFICATION_FILTER_EX] = {
156 		.size = sizeof(struct uwb_rc_evt_set_notification_filter_ex),
157 	},
158 };
159 
160 /**
161  * Initialize the EST subsystem
162  *
163  * Register the standard tables also.
164  *
165  * FIXME: tag init
166  */
uwb_est_create(void)167 int uwb_est_create(void)
168 {
169 	int result;
170 
171 	uwb_est_size = 2;
172 	uwb_est_used = 0;
173 	uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
174 	if (uwb_est == NULL)
175 		return -ENOMEM;
176 
177 	result = uwb_est_register(UWB_RC_CET_GENERAL, 0, 0xffff, 0xffff,
178 				  uwb_est_00_00xx, ARRAY_SIZE(uwb_est_00_00xx));
179 	if (result < 0)
180 		goto out;
181 	result = uwb_est_register(UWB_RC_CET_EX_TYPE_1, 0, 0xffff, 0xffff,
182 				  uwb_est_01_00xx, ARRAY_SIZE(uwb_est_01_00xx));
183 out:
184 	return result;
185 }
186 
187 
188 /** Clean it up */
uwb_est_destroy(void)189 void uwb_est_destroy(void)
190 {
191 	kfree(uwb_est);
192 	uwb_est = NULL;
193 	uwb_est_size = uwb_est_used = 0;
194 }
195 
196 
197 /**
198  * Double the capacity of the EST table
199  *
200  * @returns 0 if ok, < 0 errno no error.
201  */
202 static
uwb_est_grow(void)203 int uwb_est_grow(void)
204 {
205 	size_t actual_size = uwb_est_size * sizeof(uwb_est[0]);
206 	void *new = kmalloc_array(2, actual_size, GFP_ATOMIC);
207 	if (new == NULL)
208 		return -ENOMEM;
209 	memcpy(new, uwb_est, actual_size);
210 	memset(new + actual_size, 0, actual_size);
211 	kfree(uwb_est);
212 	uwb_est = new;
213 	uwb_est_size *= 2;
214 	return 0;
215 }
216 
217 
218 /**
219  * Register an event size table
220  *
221  * Makes room for it if the table is full, and then inserts  it in the
222  * right position (entries are sorted by type, event_high, vendor and
223  * then product).
224  *
225  * @vendor:  vendor code for matching against the device (0x0000 and
226  *           0xffff mean any); use 0x0000 to force all to match without
227  *           checking possible vendor specific ones, 0xfffff to match
228  *           after checking vendor specific ones.
229  *
230  * @product: product code from that vendor; same matching rules, use
231  *           0x0000 for not allowing vendor specific matches, 0xffff
232  *           for allowing.
233  *
234  * This arragement just makes the tables sort differenty. Because the
235  * table is sorted by growing type-event_high-vendor-product, a zero
236  * vendor will match before than a 0x456a vendor, that will match
237  * before a 0xfffff vendor.
238  *
239  * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
240  */
241 /* FIXME: add bus type to vendor/product code */
uwb_est_register(u8 type,u8 event_high,u16 vendor,u16 product,const struct uwb_est_entry * entry,size_t entries)242 int uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product,
243 		     const struct uwb_est_entry *entry, size_t entries)
244 {
245 	unsigned long flags;
246 	unsigned itr;
247 	int result = 0;
248 
249 	write_lock_irqsave(&uwb_est_lock, flags);
250 	if (uwb_est_used == uwb_est_size) {
251 		result = uwb_est_grow();
252 		if (result < 0)
253 			goto out;
254 	}
255 	/* Find the right spot to insert it in */
256 	for (itr = 0; itr < uwb_est_used; itr++)
257 		if (uwb_est[itr].type_event_high < type
258 		    && uwb_est[itr].vendor < vendor
259 		    && uwb_est[itr].product < product)
260 			break;
261 
262 	/* Shift others to make room for the new one? */
263 	if (itr < uwb_est_used)
264 		memmove(&uwb_est[itr+1], &uwb_est[itr], uwb_est_used - itr);
265 	uwb_est[itr].type_event_high = type << 8 | event_high;
266 	uwb_est[itr].vendor = vendor;
267 	uwb_est[itr].product = product;
268 	uwb_est[itr].entry = entry;
269 	uwb_est[itr].entries = entries;
270 	uwb_est_used++;
271 out:
272 	write_unlock_irqrestore(&uwb_est_lock, flags);
273 	return result;
274 }
275 EXPORT_SYMBOL_GPL(uwb_est_register);
276 
277 
278 /**
279  * Unregister an event size table
280  *
281  * This just removes the specified entry and moves the ones after it
282  * to fill in the gap. This is needed to keep the list sorted; no
283  * reallocation is done to reduce the size of the table.
284  *
285  * We unregister by all the data we used to register instead of by
286  * pointer to the @entry array because we might have used the same
287  * table for a bunch of IDs (for example).
288  *
289  * @returns 0 if ok, < 0 errno on error (-ENOENT if not found).
290  */
uwb_est_unregister(u8 type,u8 event_high,u16 vendor,u16 product,const struct uwb_est_entry * entry,size_t entries)291 int uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product,
292 		       const struct uwb_est_entry *entry, size_t entries)
293 {
294 	unsigned long flags;
295 	unsigned itr;
296 	struct uwb_est est_cmp = {
297 		.type_event_high = type << 8 | event_high,
298 		.vendor = vendor,
299 		.product = product,
300 		.entry = entry,
301 		.entries = entries
302 	};
303 	write_lock_irqsave(&uwb_est_lock, flags);
304 	for (itr = 0; itr < uwb_est_used; itr++)
305 		if (!memcmp(&uwb_est[itr], &est_cmp, sizeof(est_cmp)))
306 			goto found;
307 	write_unlock_irqrestore(&uwb_est_lock, flags);
308 	return -ENOENT;
309 
310 found:
311 	if (itr < uwb_est_used - 1)	/* Not last one? move ones above */
312 		memmove(&uwb_est[itr], &uwb_est[itr+1], uwb_est_used - itr - 1);
313 	uwb_est_used--;
314 	write_unlock_irqrestore(&uwb_est_lock, flags);
315 	return 0;
316 }
317 EXPORT_SYMBOL_GPL(uwb_est_unregister);
318 
319 
320 /**
321  * Get the size of an event from a table
322  *
323  * @rceb: pointer to the buffer with the event
324  * @rceb_size: size of the area pointed to by @rceb in bytes.
325  * @returns: > 0      Size of the event
326  *	     -ENOSPC  An area big enough was not provided to look
327  *		      ahead into the event's guts and guess the size.
328  *	     -EINVAL  Unknown event code (wEvent).
329  *
330  * This will look at the received RCEB and guess what is the total
331  * size. For variable sized events, it will look further ahead into
332  * their length field to see how much data should be read.
333  *
334  * Note this size is *not* final--the neh (Notification/Event Handle)
335  * might specificy an extra size to add.
336  */
337 static
uwb_est_get_size(struct uwb_rc * uwb_rc,struct uwb_est * est,u8 event_low,const struct uwb_rceb * rceb,size_t rceb_size)338 ssize_t uwb_est_get_size(struct uwb_rc *uwb_rc, struct uwb_est *est,
339 			 u8 event_low, const struct uwb_rceb *rceb,
340 			 size_t rceb_size)
341 {
342 	unsigned offset;
343 	ssize_t size;
344 	struct device *dev = &uwb_rc->uwb_dev.dev;
345 	const struct uwb_est_entry *entry;
346 
347 	size = -ENOENT;
348 	if (event_low >= est->entries) {	/* in range? */
349 		dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u out of range\n",
350 			est, est->type_event_high, est->vendor, est->product,
351 			est->entries, event_low);
352 		goto out;
353 	}
354 	size = -ENOENT;
355 	entry = &est->entry[event_low];
356 	if (entry->size == 0 && entry->offset == 0) {	/* unknown? */
357 		dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: event %u unknown\n",
358 			est, est->type_event_high, est->vendor,	est->product,
359 			est->entries, event_low);
360 		goto out;
361 	}
362 	offset = entry->offset;	/* extra fries with that? */
363 	if (offset == 0)
364 		size = entry->size;
365 	else {
366 		/* Ops, got an extra size field at 'offset'--read it */
367 		const void *ptr = rceb;
368 		size_t type_size = 0;
369 		offset--;
370 		size = -ENOSPC;			/* enough data for more? */
371 		switch (entry->type) {
372 		case UWB_EST_16:  type_size = sizeof(__le16); break;
373 		case UWB_EST_8:   type_size = sizeof(u8);     break;
374 		default: 	 BUG();
375 		}
376 		if (offset + type_size > rceb_size) {
377 			dev_err(dev, "EST %p 0x%04x/%04x/%04x[%u]: "
378 				"not enough data to read extra size\n",
379 				est, est->type_event_high, est->vendor,
380 				est->product, est->entries);
381 			goto out;
382 		}
383 		size = entry->size;
384 		ptr += offset;
385 		switch (entry->type) {
386 		case UWB_EST_16:  size += le16_to_cpu(*(__le16 *)ptr); break;
387 		case UWB_EST_8:   size += *(u8 *)ptr;                  break;
388 		default: 	 BUG();
389 		}
390 	}
391 out:
392 	return size;
393 }
394 
395 
396 /**
397  * Guesses the size of a WA event
398  *
399  * @rceb: pointer to the buffer with the event
400  * @rceb_size: size of the area pointed to by @rceb in bytes.
401  * @returns: > 0      Size of the event
402  *	     -ENOSPC  An area big enough was not provided to look
403  *		      ahead into the event's guts and guess the size.
404  *	     -EINVAL  Unknown event code (wEvent).
405  *
406  * This will look at the received RCEB and guess what is the total
407  * size by checking all the tables registered with
408  * uwb_est_register(). For variable sized events, it will look further
409  * ahead into their length field to see how much data should be read.
410  *
411  * Note this size is *not* final--the neh (Notification/Event Handle)
412  * might specificy an extra size to add or replace.
413  */
uwb_est_find_size(struct uwb_rc * rc,const struct uwb_rceb * rceb,size_t rceb_size)414 ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
415 			  size_t rceb_size)
416 {
417 	/* FIXME: add vendor/product data */
418 	ssize_t size;
419 	struct device *dev = &rc->uwb_dev.dev;
420 	unsigned long flags;
421 	unsigned itr;
422 	u16 type_event_high, event;
423 
424 	read_lock_irqsave(&uwb_est_lock, flags);
425 	size = -ENOSPC;
426 	if (rceb_size < sizeof(*rceb))
427 		goto out;
428 	event = le16_to_cpu(rceb->wEvent);
429 	type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8;
430 	for (itr = 0; itr < uwb_est_used; itr++) {
431 		if (uwb_est[itr].type_event_high != type_event_high)
432 			continue;
433 		size = uwb_est_get_size(rc, &uwb_est[itr],
434 					event & 0x00ff, rceb, rceb_size);
435 		/* try more tables that might handle the same type */
436 		if (size != -ENOENT)
437 			goto out;
438 	}
439 	dev_dbg(dev,
440 		"event 0x%02x/%04x/%02x: no handlers available; RCEB %4ph\n",
441 		(unsigned) rceb->bEventType,
442 		(unsigned) le16_to_cpu(rceb->wEvent),
443 		(unsigned) rceb->bEventContext,
444 		rceb);
445 	size = -ENOENT;
446 out:
447 	read_unlock_irqrestore(&uwb_est_lock, flags);
448 	return size;
449 }
450 EXPORT_SYMBOL_GPL(uwb_est_find_size);
451