1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Ultra Wide Band
4 * UWB API
5 *
6 * Copyright (C) 2005-2006 Intel Corporation
7 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 *
9 * FIXME: doc: overview of the API, different parts and pointers
10 */
11
12 #ifndef __LINUX__UWB_H__
13 #define __LINUX__UWB_H__
14
15 #include <linux/limits.h>
16 #include <linux/device.h>
17 #include <linux/mutex.h>
18 #include <linux/timer.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 #include <asm/page.h>
22 #include "include/spec.h"
23
24 struct uwb_dev;
25 struct uwb_beca_e;
26 struct uwb_rc;
27 struct uwb_rsv;
28 struct uwb_dbg;
29
30 /**
31 * struct uwb_dev - a UWB Device
32 * @rc: UWB Radio Controller that discovered the device (kind of its
33 * parent).
34 * @bce: a beacon cache entry for this device; or NULL if the device
35 * is a local radio controller.
36 * @mac_addr: the EUI-48 address of this device.
37 * @dev_addr: the current DevAddr used by this device.
38 * @beacon_slot: the slot number the beacon is using.
39 * @streams: bitmap of streams allocated to reservations targeted at
40 * this device. For an RC, this is the streams allocated for
41 * reservations targeted at DevAddrs.
42 *
43 * A UWB device may either by a neighbor or part of a local radio
44 * controller.
45 */
46 struct uwb_dev {
47 struct mutex mutex;
48 struct list_head list_node;
49 struct device dev;
50 struct uwb_rc *rc; /* radio controller */
51 struct uwb_beca_e *bce; /* Beacon Cache Entry */
52
53 struct uwb_mac_addr mac_addr;
54 struct uwb_dev_addr dev_addr;
55 int beacon_slot;
56 DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
57 DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
58 };
59 #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
60
61 /**
62 * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
63 *
64 * RC[CE]Bs have a 'context ID' field that matches the command with
65 * the event received to confirm it.
66 *
67 * Maximum number of context IDs
68 */
69 enum { UWB_RC_CTX_MAX = 256 };
70
71
72 /** Notification chain head for UWB generated events to listeners */
73 struct uwb_notifs_chain {
74 struct list_head list;
75 struct mutex mutex;
76 };
77
78 /* Beacon cache list */
79 struct uwb_beca {
80 struct list_head list;
81 size_t entries;
82 struct mutex mutex;
83 };
84
85 /* Event handling thread. */
86 struct uwbd {
87 int pid;
88 struct task_struct *task;
89 wait_queue_head_t wq;
90 struct list_head event_list;
91 spinlock_t event_list_lock;
92 };
93
94 /**
95 * struct uwb_mas_bm - a bitmap of all MAS in a superframe
96 * @bm: a bitmap of length #UWB_NUM_MAS
97 */
98 struct uwb_mas_bm {
99 DECLARE_BITMAP(bm, UWB_NUM_MAS);
100 DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
101 int safe;
102 int unsafe;
103 };
104
105 /**
106 * uwb_rsv_state - UWB Reservation state.
107 *
108 * NONE - reservation is not active (no DRP IE being transmitted).
109 *
110 * Owner reservation states:
111 *
112 * INITIATED - owner has sent an initial DRP request.
113 * PENDING - target responded with pending Reason Code.
114 * MODIFIED - reservation manager is modifying an established
115 * reservation with a different MAS allocation.
116 * ESTABLISHED - the reservation has been successfully negotiated.
117 *
118 * Target reservation states:
119 *
120 * DENIED - request is denied.
121 * ACCEPTED - request is accepted.
122 * PENDING - PAL has yet to make a decision to whether to accept or
123 * deny.
124 *
125 * FIXME: further target states TBD.
126 */
127 enum uwb_rsv_state {
128 UWB_RSV_STATE_NONE = 0,
129 UWB_RSV_STATE_O_INITIATED,
130 UWB_RSV_STATE_O_PENDING,
131 UWB_RSV_STATE_O_MODIFIED,
132 UWB_RSV_STATE_O_ESTABLISHED,
133 UWB_RSV_STATE_O_TO_BE_MOVED,
134 UWB_RSV_STATE_O_MOVE_EXPANDING,
135 UWB_RSV_STATE_O_MOVE_COMBINING,
136 UWB_RSV_STATE_O_MOVE_REDUCING,
137 UWB_RSV_STATE_T_ACCEPTED,
138 UWB_RSV_STATE_T_DENIED,
139 UWB_RSV_STATE_T_CONFLICT,
140 UWB_RSV_STATE_T_PENDING,
141 UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
142 UWB_RSV_STATE_T_EXPANDING_CONFLICT,
143 UWB_RSV_STATE_T_EXPANDING_PENDING,
144 UWB_RSV_STATE_T_EXPANDING_DENIED,
145 UWB_RSV_STATE_T_RESIZED,
146
147 UWB_RSV_STATE_LAST,
148 };
149
150 enum uwb_rsv_target_type {
151 UWB_RSV_TARGET_DEV,
152 UWB_RSV_TARGET_DEVADDR,
153 };
154
155 /**
156 * struct uwb_rsv_target - the target of a reservation.
157 *
158 * Reservations unicast and targeted at a single device
159 * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
160 * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
161 */
162 struct uwb_rsv_target {
163 enum uwb_rsv_target_type type;
164 union {
165 struct uwb_dev *dev;
166 struct uwb_dev_addr devaddr;
167 };
168 };
169
170 struct uwb_rsv_move {
171 struct uwb_mas_bm final_mas;
172 struct uwb_ie_drp *companion_drp_ie;
173 struct uwb_mas_bm companion_mas;
174 };
175
176 /*
177 * Number of streams reserved for reservations targeted at DevAddrs.
178 */
179 #define UWB_NUM_GLOBAL_STREAMS 1
180
181 typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
182
183 /**
184 * struct uwb_rsv - a DRP reservation
185 *
186 * Data structure management:
187 *
188 * @rc: the radio controller this reservation is for
189 * (as target or owner)
190 * @rc_node: a list node for the RC
191 * @pal_node: a list node for the PAL
192 *
193 * Owner and target parameters:
194 *
195 * @owner: the UWB device owning this reservation
196 * @target: the target UWB device
197 * @type: reservation type
198 *
199 * Owner parameters:
200 *
201 * @max_mas: maxiumum number of MAS
202 * @min_mas: minimum number of MAS
203 * @sparsity: owner selected sparsity
204 * @is_multicast: true iff multicast
205 *
206 * @callback: callback function when the reservation completes
207 * @pal_priv: private data for the PAL making the reservation
208 *
209 * Reservation status:
210 *
211 * @status: negotiation status
212 * @stream: stream index allocated for this reservation
213 * @tiebreaker: conflict tiebreaker for this reservation
214 * @mas: reserved MAS
215 * @drp_ie: the DRP IE
216 * @ie_valid: true iff the DRP IE matches the reservation parameters
217 *
218 * DRP reservations are uniquely identified by the owner, target and
219 * stream index. However, when using a DevAddr as a target (e.g., for
220 * a WUSB cluster reservation) the responses may be received from
221 * devices with different DevAddrs. In this case, reservations are
222 * uniquely identified by just the stream index. A number of stream
223 * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
224 */
225 struct uwb_rsv {
226 struct uwb_rc *rc;
227 struct list_head rc_node;
228 struct list_head pal_node;
229 struct kref kref;
230
231 struct uwb_dev *owner;
232 struct uwb_rsv_target target;
233 enum uwb_drp_type type;
234 int max_mas;
235 int min_mas;
236 int max_interval;
237 bool is_multicast;
238
239 uwb_rsv_cb_f callback;
240 void *pal_priv;
241
242 enum uwb_rsv_state state;
243 bool needs_release_companion_mas;
244 u8 stream;
245 u8 tiebreaker;
246 struct uwb_mas_bm mas;
247 struct uwb_ie_drp *drp_ie;
248 struct uwb_rsv_move mv;
249 bool ie_valid;
250 struct timer_list timer;
251 struct work_struct handle_timeout_work;
252 };
253
254 static const
255 struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
256
uwb_mas_bm_copy_le(void * dst,const struct uwb_mas_bm * mas)257 static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
258 {
259 bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
260 }
261
262 /**
263 * struct uwb_drp_avail - a radio controller's view of MAS usage
264 * @global: MAS unused by neighbors (excluding reservations targeted
265 * or owned by the local radio controller) or the beaon period
266 * @local: MAS unused by local established reservations
267 * @pending: MAS unused by local pending reservations
268 * @ie: DRP Availability IE to be included in the beacon
269 * @ie_valid: true iff @ie is valid and does not need to regenerated from
270 * @global and @local
271 *
272 * Each radio controller maintains a view of MAS usage or
273 * availability. MAS available for a new reservation are determined
274 * from the intersection of @global, @local, and @pending.
275 *
276 * The radio controller must transmit a DRP Availability IE that's the
277 * intersection of @global and @local.
278 *
279 * A set bit indicates the MAS is unused and available.
280 *
281 * rc->rsvs_mutex should be held before accessing this data structure.
282 *
283 * [ECMA-368] section 17.4.3.
284 */
285 struct uwb_drp_avail {
286 DECLARE_BITMAP(global, UWB_NUM_MAS);
287 DECLARE_BITMAP(local, UWB_NUM_MAS);
288 DECLARE_BITMAP(pending, UWB_NUM_MAS);
289 struct uwb_ie_drp_avail ie;
290 bool ie_valid;
291 };
292
293 struct uwb_drp_backoff_win {
294 u8 window;
295 u8 n;
296 int total_expired;
297 struct timer_list timer;
298 bool can_reserve_extra_mases;
299 };
300
301 const char *uwb_rsv_state_str(enum uwb_rsv_state state);
302 const char *uwb_rsv_type_str(enum uwb_drp_type type);
303
304 struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
305 void *pal_priv);
306 void uwb_rsv_destroy(struct uwb_rsv *rsv);
307
308 int uwb_rsv_establish(struct uwb_rsv *rsv);
309 int uwb_rsv_modify(struct uwb_rsv *rsv,
310 int max_mas, int min_mas, int sparsity);
311 void uwb_rsv_terminate(struct uwb_rsv *rsv);
312
313 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
314
315 void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
316
317 /**
318 * Radio Control Interface instance
319 *
320 *
321 * Life cycle rules: those of the UWB Device.
322 *
323 * @index: an index number for this radio controller, as used in the
324 * device name.
325 * @version: version of protocol supported by this device
326 * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
327 * @cmd: Backend implementation to execute commands; rw and call
328 * only with uwb_dev.dev.sem taken.
329 * @reset: Hardware reset of radio controller and any PAL controllers.
330 * @filter: Backend implementation to manipulate data to and from device
331 * to be compliant to specification assumed by driver (WHCI
332 * 0.95).
333 *
334 * uwb_dev.dev.mutex is used to execute commands and update
335 * the corresponding structures; can't use a spinlock
336 * because rc->cmd() can sleep.
337 * @ies: This is a dynamically allocated array cacheing the
338 * IEs (settable by the host) that the beacon of this
339 * radio controller is currently sending.
340 *
341 * In reality, we store here the full command we set to
342 * the radio controller (which is basically a command
343 * prefix followed by all the IEs the beacon currently
344 * contains). This way we don't have to realloc and
345 * memcpy when setting it.
346 *
347 * We set this up in uwb_rc_ie_setup(), where we alloc
348 * this struct, call get_ie() [so we know which IEs are
349 * currently being sent, if any].
350 *
351 * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
352 * amount used is given by sizeof(*ies) plus ies->wIELength
353 * (which is a little endian quantity all the time).
354 * @ies_mutex: protect the IE cache
355 * @dbg: information for the debug interface
356 */
357 struct uwb_rc {
358 struct uwb_dev uwb_dev;
359 int index;
360 u16 version;
361
362 struct module *owner;
363 void *priv;
364 int (*start)(struct uwb_rc *rc);
365 void (*stop)(struct uwb_rc *rc);
366 int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
367 int (*reset)(struct uwb_rc *rc);
368 int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
369 int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
370 size_t *, size_t *);
371
372 spinlock_t neh_lock; /* protects neh_* and ctx_* */
373 struct list_head neh_list; /* Open NE handles */
374 unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
375 u8 ctx_roll;
376
377 int beaconing; /* Beaconing state [channel number] */
378 int beaconing_forced;
379 int scanning;
380 enum uwb_scan_type scan_type:3;
381 unsigned ready:1;
382 struct uwb_notifs_chain notifs_chain;
383 struct uwb_beca uwb_beca;
384
385 struct uwbd uwbd;
386
387 struct uwb_drp_backoff_win bow;
388 struct uwb_drp_avail drp_avail;
389 struct list_head reservations;
390 struct list_head cnflt_alien_list;
391 struct uwb_mas_bm cnflt_alien_bitmap;
392 struct mutex rsvs_mutex;
393 spinlock_t rsvs_lock;
394 struct workqueue_struct *rsv_workq;
395
396 struct delayed_work rsv_update_work;
397 struct delayed_work rsv_alien_bp_work;
398 int set_drp_ie_pending;
399 struct mutex ies_mutex;
400 struct uwb_rc_cmd_set_ie *ies;
401 size_t ies_capacity;
402
403 struct list_head pals;
404 int active_pals;
405
406 struct uwb_dbg *dbg;
407 };
408
409
410 /**
411 * struct uwb_pal - a UWB PAL
412 * @name: descriptive name for this PAL (wusbhc, wlp, etc.).
413 * @device: a device for the PAL. Used to link the PAL and the radio
414 * controller in sysfs.
415 * @rc: the radio controller the PAL uses.
416 * @channel_changed: called when the channel used by the radio changes.
417 * A channel of -1 means the channel has been stopped.
418 * @new_rsv: called when a peer requests a reservation (may be NULL if
419 * the PAL cannot accept reservation requests).
420 * @channel: channel being used by the PAL; 0 if the PAL isn't using
421 * the radio; -1 if the PAL wishes to use the radio but
422 * cannot.
423 * @debugfs_dir: a debugfs directory which the PAL can use for its own
424 * debugfs files.
425 *
426 * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
427 * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
428 *
429 * The PALs using a radio controller must register themselves to
430 * permit the UWB stack to coordinate usage of the radio between the
431 * various PALs or to allow PALs to response to certain requests from
432 * peers.
433 *
434 * A struct uwb_pal should be embedded in a containing structure
435 * belonging to the PAL and initialized with uwb_pal_init()). Fields
436 * should be set appropriately by the PAL before registering the PAL
437 * with uwb_pal_register().
438 */
439 struct uwb_pal {
440 struct list_head node;
441 const char *name;
442 struct device *device;
443 struct uwb_rc *rc;
444
445 void (*channel_changed)(struct uwb_pal *pal, int channel);
446 void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv);
447
448 int channel;
449 struct dentry *debugfs_dir;
450 };
451
452 void uwb_pal_init(struct uwb_pal *pal);
453 int uwb_pal_register(struct uwb_pal *pal);
454 void uwb_pal_unregister(struct uwb_pal *pal);
455
456 int uwb_radio_start(struct uwb_pal *pal);
457 void uwb_radio_stop(struct uwb_pal *pal);
458
459 /*
460 * General public API
461 *
462 * This API can be used by UWB device drivers or by those implementing
463 * UWB Radio Controllers
464 */
465 struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
466 const struct uwb_dev_addr *devaddr);
467 struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
uwb_dev_get(struct uwb_dev * uwb_dev)468 static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
469 {
470 get_device(&uwb_dev->dev);
471 }
uwb_dev_put(struct uwb_dev * uwb_dev)472 static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
473 {
474 put_device(&uwb_dev->dev);
475 }
476 struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
477
478 /**
479 * Callback function for 'uwb_{dev,rc}_foreach()'.
480 *
481 * @dev: Linux device instance
482 * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
483 * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
484 *
485 * @returns: 0 to continue the iterations, any other val to stop
486 * iterating and return the value to the caller of
487 * _foreach().
488 */
489 typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
490 int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
491
492 struct uwb_rc *uwb_rc_alloc(void);
493 struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
494 struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
495 void uwb_rc_put(struct uwb_rc *rc);
496
497 typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
498 struct uwb_rceb *reply, ssize_t reply_size);
499
500 int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
501 struct uwb_rccb *cmd, size_t cmd_size,
502 u8 expected_type, u16 expected_event,
503 uwb_rc_cmd_cb_f cb, void *arg);
504 ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
505 struct uwb_rccb *cmd, size_t cmd_size,
506 struct uwb_rceb *reply, size_t reply_size);
507 ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
508 struct uwb_rccb *cmd, size_t cmd_size,
509 u8 expected_type, u16 expected_event,
510 struct uwb_rceb **preply);
511
512 size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
513
514 int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
515 int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
516 int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
517 int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
518 int __uwb_mac_addr_assigned_check(struct device *, void *);
519 int __uwb_dev_addr_assigned_check(struct device *, void *);
520
521 /* Print in @buf a pretty repr of @addr */
uwb_dev_addr_print(char * buf,size_t buf_size,const struct uwb_dev_addr * addr)522 static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
523 const struct uwb_dev_addr *addr)
524 {
525 return __uwb_addr_print(buf, buf_size, addr->data, 0);
526 }
527
528 /* Print in @buf a pretty repr of @addr */
uwb_mac_addr_print(char * buf,size_t buf_size,const struct uwb_mac_addr * addr)529 static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
530 const struct uwb_mac_addr *addr)
531 {
532 return __uwb_addr_print(buf, buf_size, addr->data, 1);
533 }
534
535 /* @returns 0 if device addresses @addr2 and @addr1 are equal */
uwb_dev_addr_cmp(const struct uwb_dev_addr * addr1,const struct uwb_dev_addr * addr2)536 static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
537 const struct uwb_dev_addr *addr2)
538 {
539 return memcmp(addr1, addr2, sizeof(*addr1));
540 }
541
542 /* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
uwb_mac_addr_cmp(const struct uwb_mac_addr * addr1,const struct uwb_mac_addr * addr2)543 static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
544 const struct uwb_mac_addr *addr2)
545 {
546 return memcmp(addr1, addr2, sizeof(*addr1));
547 }
548
549 /* @returns !0 if a MAC @addr is a broadcast address */
uwb_mac_addr_bcast(const struct uwb_mac_addr * addr)550 static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
551 {
552 struct uwb_mac_addr bcast = {
553 .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
554 };
555 return !uwb_mac_addr_cmp(addr, &bcast);
556 }
557
558 /* @returns !0 if a MAC @addr is all zeroes*/
uwb_mac_addr_unset(const struct uwb_mac_addr * addr)559 static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
560 {
561 struct uwb_mac_addr unset = {
562 .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
563 };
564 return !uwb_mac_addr_cmp(addr, &unset);
565 }
566
567 /* @returns !0 if the address is in use. */
__uwb_dev_addr_assigned(struct uwb_rc * rc,struct uwb_dev_addr * addr)568 static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
569 struct uwb_dev_addr *addr)
570 {
571 return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
572 }
573
574 /*
575 * UWB Radio Controller API
576 *
577 * This API is used (in addition to the general API) to implement UWB
578 * Radio Controllers.
579 */
580 void uwb_rc_init(struct uwb_rc *);
581 int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
582 void uwb_rc_rm(struct uwb_rc *);
583 void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
584 void uwb_rc_neh_error(struct uwb_rc *, int);
585 void uwb_rc_reset_all(struct uwb_rc *rc);
586 void uwb_rc_pre_reset(struct uwb_rc *rc);
587 int uwb_rc_post_reset(struct uwb_rc *rc);
588
589 /**
590 * uwb_rsv_is_owner - is the owner of this reservation the RC?
591 * @rsv: the reservation
592 */
uwb_rsv_is_owner(struct uwb_rsv * rsv)593 static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
594 {
595 return rsv->owner == &rsv->rc->uwb_dev;
596 }
597
598 /**
599 * enum uwb_notifs - UWB events that can be passed to any listeners
600 * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group.
601 * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group.
602 *
603 * Higher layers can register callback functions with the radio
604 * controller using uwb_notifs_register(). The radio controller
605 * maintains a list of all registered handlers and will notify all
606 * nodes when an event occurs.
607 */
608 enum uwb_notifs {
609 UWB_NOTIF_ONAIR,
610 UWB_NOTIF_OFFAIR,
611 };
612
613 /* Callback function registered with UWB */
614 struct uwb_notifs_handler {
615 struct list_head list_node;
616 void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
617 void *data;
618 };
619
620 int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
621 int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
622
623
624 /**
625 * UWB radio controller Event Size Entry (for creating entry tables)
626 *
627 * WUSB and WHCI define events and notifications, and they might have
628 * fixed or variable size.
629 *
630 * Each event/notification has a size which is not necessarily known
631 * in advance based on the event code. As well, vendor specific
632 * events/notifications will have a size impossible to determine
633 * unless we know about the device's specific details.
634 *
635 * It was way too smart of the spec writers not to think that it would
636 * be impossible for a generic driver to skip over vendor specific
637 * events/notifications if there are no LENGTH fields in the HEADER of
638 * each message...the transaction size cannot be counted on as the
639 * spec does not forbid to pack more than one event in a single
640 * transaction.
641 *
642 * Thus, we guess sizes with tables (or for events, when you know the
643 * size ahead of time you can use uwb_rc_neh_extra_size*()). We
644 * register tables with the known events and their sizes, and then we
645 * traverse those tables. For those with variable length, we provide a
646 * way to lookup the size inside the event/notification's
647 * payload. This allows device-specific event size tables to be
648 * registered.
649 *
650 * @size: Size of the payload
651 *
652 * @offset: if != 0, at offset @offset-1 starts a field with a length
653 * that has to be added to @size. The format of the field is
654 * given by @type.
655 *
656 * @type: Type and length of the offset field. Most common is LE 16
657 * bits (that's why that is zero); others are there mostly to
658 * cover for bugs and weirdos.
659 */
660 struct uwb_est_entry {
661 size_t size;
662 unsigned offset;
663 enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
664 };
665
666 int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
667 const struct uwb_est_entry *, size_t entries);
668 int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
669 const struct uwb_est_entry *, size_t entries);
670 ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
671 size_t len);
672
673 /* -- Misc */
674
675 enum {
676 EDC_MAX_ERRORS = 10,
677 EDC_ERROR_TIMEFRAME = HZ,
678 };
679
680 /* error density counter */
681 struct edc {
682 unsigned long timestart;
683 u16 errorcount;
684 };
685
686 static inline
edc_init(struct edc * edc)687 void edc_init(struct edc *edc)
688 {
689 edc->timestart = jiffies;
690 }
691
692 /* Called when an error occurred.
693 * This is way to determine if the number of acceptable errors per time
694 * period has been exceeded. It is not accurate as there are cases in which
695 * this scheme will not work, for example if there are periodic occurrences
696 * of errors that straddle updates to the start time. This scheme is
697 * sufficient for our usage.
698 *
699 * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
700 */
edc_inc(struct edc * err_hist,u16 max_err,u16 timeframe)701 static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
702 {
703 unsigned long now;
704
705 now = jiffies;
706 if (now - err_hist->timestart > timeframe) {
707 err_hist->errorcount = 1;
708 err_hist->timestart = now;
709 } else if (++err_hist->errorcount > max_err) {
710 err_hist->errorcount = 0;
711 err_hist->timestart = now;
712 return 1;
713 }
714 return 0;
715 }
716
717
718 /* Information Element handling */
719
720 struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
721 int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size);
722 int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id);
723
724 /*
725 * Transmission statistics
726 *
727 * UWB uses LQI and RSSI (one byte values) for reporting radio signal
728 * strength and line quality indication. We do quick and dirty
729 * averages of those. They are signed values, btw.
730 *
731 * For 8 bit quantities, we keep the min, the max, an accumulator
732 * (@sigma) and a # of samples. When @samples gets to 255, we compute
733 * the average (@sigma / @samples), place it in @sigma and reset
734 * @samples to 1 (so we use it as the first sample).
735 *
736 * Now, statistically speaking, probably I am kicking the kidneys of
737 * some books I have in my shelves collecting dust, but I just want to
738 * get an approx, not the Nobel.
739 *
740 * LOCKING: there is no locking per se, but we try to keep a lockless
741 * schema. Only _add_samples() modifies the values--as long as you
742 * have other locking on top that makes sure that no two calls of
743 * _add_sample() happen at the same time, then we are fine. Now, for
744 * resetting the values we just set @samples to 0 and that makes the
745 * next _add_sample() to start with defaults. Reading the values in
746 * _show() currently can race, so you need to make sure the calls are
747 * under the same lock that protects calls to _add_sample(). FIXME:
748 * currently unlocked (It is not ultraprecise but does the trick. Bite
749 * me).
750 */
751 struct stats {
752 s8 min, max;
753 s16 sigma;
754 atomic_t samples;
755 };
756
757 static inline
stats_init(struct stats * stats)758 void stats_init(struct stats *stats)
759 {
760 atomic_set(&stats->samples, 0);
761 wmb();
762 }
763
764 static inline
stats_add_sample(struct stats * stats,s8 sample)765 void stats_add_sample(struct stats *stats, s8 sample)
766 {
767 s8 min, max;
768 s16 sigma;
769 unsigned samples = atomic_read(&stats->samples);
770 if (samples == 0) { /* it was zero before, so we initialize */
771 min = 127;
772 max = -128;
773 sigma = 0;
774 } else {
775 min = stats->min;
776 max = stats->max;
777 sigma = stats->sigma;
778 }
779
780 if (sample < min) /* compute new values */
781 min = sample;
782 else if (sample > max)
783 max = sample;
784 sigma += sample;
785
786 stats->min = min; /* commit */
787 stats->max = max;
788 stats->sigma = sigma;
789 if (atomic_add_return(1, &stats->samples) > 255) {
790 /* wrapped around! reset */
791 stats->sigma = sigma / 256;
792 atomic_set(&stats->samples, 1);
793 }
794 }
795
stats_show(struct stats * stats,char * buf)796 static inline ssize_t stats_show(struct stats *stats, char *buf)
797 {
798 int min, max, avg;
799 int samples = atomic_read(&stats->samples);
800 if (samples == 0)
801 min = max = avg = 0;
802 else {
803 min = stats->min;
804 max = stats->max;
805 avg = stats->sigma / samples;
806 }
807 return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
808 }
809
stats_store(struct stats * stats,const char * buf,size_t size)810 static inline ssize_t stats_store(struct stats *stats, const char *buf,
811 size_t size)
812 {
813 stats_init(stats);
814 return size;
815 }
816
817 #endif /* #ifndef __LINUX__UWB_H__ */
818