1 /*
2  * Ultra Wide Band
3  * UWB API
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * FIXME: doc: overview of the API, different parts and pointers
24  */
25 
26 #ifndef __LINUX__UWB_H__
27 #define __LINUX__UWB_H__
28 
29 #include <linux/limits.h>
30 #include <linux/device.h>
31 #include <linux/mutex.h>
32 #include <linux/timer.h>
33 #include <linux/wait.h>
34 #include <linux/workqueue.h>
35 #include <linux/uwb/spec.h>
36 #include <asm/page.h>
37 
38 struct uwb_dev;
39 struct uwb_beca_e;
40 struct uwb_rc;
41 struct uwb_rsv;
42 struct uwb_dbg;
43 
44 /**
45  * struct uwb_dev - a UWB Device
46  * @rc: UWB Radio Controller that discovered the device (kind of its
47  *     parent).
48  * @bce: a beacon cache entry for this device; or NULL if the device
49  *     is a local radio controller.
50  * @mac_addr: the EUI-48 address of this device.
51  * @dev_addr: the current DevAddr used by this device.
52  * @beacon_slot: the slot number the beacon is using.
53  * @streams: bitmap of streams allocated to reservations targeted at
54  *     this device.  For an RC, this is the streams allocated for
55  *     reservations targeted at DevAddrs.
56  *
57  * A UWB device may either by a neighbor or part of a local radio
58  * controller.
59  */
60 struct uwb_dev {
61 	struct mutex mutex;
62 	struct list_head list_node;
63 	struct device dev;
64 	struct uwb_rc *rc;		/* radio controller */
65 	struct uwb_beca_e *bce;		/* Beacon Cache Entry */
66 
67 	struct uwb_mac_addr mac_addr;
68 	struct uwb_dev_addr dev_addr;
69 	int beacon_slot;
70 	DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
71 	DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
72 };
73 #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
74 
75 /**
76  * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
77  *
78  * RC[CE]Bs have a 'context ID' field that matches the command with
79  * the event received to confirm it.
80  *
81  * Maximum number of context IDs
82  */
83 enum { UWB_RC_CTX_MAX = 256 };
84 
85 
86 /** Notification chain head for UWB generated events to listeners */
87 struct uwb_notifs_chain {
88 	struct list_head list;
89 	struct mutex mutex;
90 };
91 
92 /* Beacon cache list */
93 struct uwb_beca {
94 	struct list_head list;
95 	size_t entries;
96 	struct mutex mutex;
97 };
98 
99 /* Event handling thread. */
100 struct uwbd {
101 	int pid;
102 	struct task_struct *task;
103 	wait_queue_head_t wq;
104 	struct list_head event_list;
105 	spinlock_t event_list_lock;
106 };
107 
108 /**
109  * struct uwb_mas_bm - a bitmap of all MAS in a superframe
110  * @bm: a bitmap of length #UWB_NUM_MAS
111  */
112 struct uwb_mas_bm {
113 	DECLARE_BITMAP(bm, UWB_NUM_MAS);
114 	DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
115 	int safe;
116 	int unsafe;
117 };
118 
119 /**
120  * uwb_rsv_state - UWB Reservation state.
121  *
122  * NONE - reservation is not active (no DRP IE being transmitted).
123  *
124  * Owner reservation states:
125  *
126  * INITIATED - owner has sent an initial DRP request.
127  * PENDING - target responded with pending Reason Code.
128  * MODIFIED - reservation manager is modifying an established
129  * reservation with a different MAS allocation.
130  * ESTABLISHED - the reservation has been successfully negotiated.
131  *
132  * Target reservation states:
133  *
134  * DENIED - request is denied.
135  * ACCEPTED - request is accepted.
136  * PENDING - PAL has yet to make a decision to whether to accept or
137  * deny.
138  *
139  * FIXME: further target states TBD.
140  */
141 enum uwb_rsv_state {
142 	UWB_RSV_STATE_NONE = 0,
143 	UWB_RSV_STATE_O_INITIATED,
144 	UWB_RSV_STATE_O_PENDING,
145 	UWB_RSV_STATE_O_MODIFIED,
146 	UWB_RSV_STATE_O_ESTABLISHED,
147 	UWB_RSV_STATE_O_TO_BE_MOVED,
148 	UWB_RSV_STATE_O_MOVE_EXPANDING,
149 	UWB_RSV_STATE_O_MOVE_COMBINING,
150 	UWB_RSV_STATE_O_MOVE_REDUCING,
151 	UWB_RSV_STATE_T_ACCEPTED,
152 	UWB_RSV_STATE_T_DENIED,
153 	UWB_RSV_STATE_T_CONFLICT,
154 	UWB_RSV_STATE_T_PENDING,
155 	UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
156 	UWB_RSV_STATE_T_EXPANDING_CONFLICT,
157 	UWB_RSV_STATE_T_EXPANDING_PENDING,
158 	UWB_RSV_STATE_T_EXPANDING_DENIED,
159 	UWB_RSV_STATE_T_RESIZED,
160 
161 	UWB_RSV_STATE_LAST,
162 };
163 
164 enum uwb_rsv_target_type {
165 	UWB_RSV_TARGET_DEV,
166 	UWB_RSV_TARGET_DEVADDR,
167 };
168 
169 /**
170  * struct uwb_rsv_target - the target of a reservation.
171  *
172  * Reservations unicast and targeted at a single device
173  * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
174  * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
175  */
176 struct uwb_rsv_target {
177 	enum uwb_rsv_target_type type;
178 	union {
179 		struct uwb_dev *dev;
180 		struct uwb_dev_addr devaddr;
181 	};
182 };
183 
184 struct uwb_rsv_move {
185 	struct uwb_mas_bm final_mas;
186 	struct uwb_ie_drp *companion_drp_ie;
187 	struct uwb_mas_bm companion_mas;
188 };
189 
190 /*
191  * Number of streams reserved for reservations targeted at DevAddrs.
192  */
193 #define UWB_NUM_GLOBAL_STREAMS 1
194 
195 typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
196 
197 /**
198  * struct uwb_rsv - a DRP reservation
199  *
200  * Data structure management:
201  *
202  * @rc:             the radio controller this reservation is for
203  *                  (as target or owner)
204  * @rc_node:        a list node for the RC
205  * @pal_node:       a list node for the PAL
206  *
207  * Owner and target parameters:
208  *
209  * @owner:          the UWB device owning this reservation
210  * @target:         the target UWB device
211  * @type:           reservation type
212  *
213  * Owner parameters:
214  *
215  * @max_mas:        maxiumum number of MAS
216  * @min_mas:        minimum number of MAS
217  * @sparsity:       owner selected sparsity
218  * @is_multicast:   true iff multicast
219  *
220  * @callback:       callback function when the reservation completes
221  * @pal_priv:       private data for the PAL making the reservation
222  *
223  * Reservation status:
224  *
225  * @status:         negotiation status
226  * @stream:         stream index allocated for this reservation
227  * @tiebreaker:     conflict tiebreaker for this reservation
228  * @mas:            reserved MAS
229  * @drp_ie:         the DRP IE
230  * @ie_valid:       true iff the DRP IE matches the reservation parameters
231  *
232  * DRP reservations are uniquely identified by the owner, target and
233  * stream index.  However, when using a DevAddr as a target (e.g., for
234  * a WUSB cluster reservation) the responses may be received from
235  * devices with different DevAddrs.  In this case, reservations are
236  * uniquely identified by just the stream index.  A number of stream
237  * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
238  */
239 struct uwb_rsv {
240 	struct uwb_rc *rc;
241 	struct list_head rc_node;
242 	struct list_head pal_node;
243 	struct kref kref;
244 
245 	struct uwb_dev *owner;
246 	struct uwb_rsv_target target;
247 	enum uwb_drp_type type;
248 	int max_mas;
249 	int min_mas;
250 	int max_interval;
251 	bool is_multicast;
252 
253 	uwb_rsv_cb_f callback;
254 	void *pal_priv;
255 
256 	enum uwb_rsv_state state;
257 	bool needs_release_companion_mas;
258 	u8 stream;
259 	u8 tiebreaker;
260 	struct uwb_mas_bm mas;
261 	struct uwb_ie_drp *drp_ie;
262 	struct uwb_rsv_move mv;
263 	bool ie_valid;
264 	struct timer_list timer;
265 	struct work_struct handle_timeout_work;
266 };
267 
268 static const
269 struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
270 
uwb_mas_bm_copy_le(void * dst,const struct uwb_mas_bm * mas)271 static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
272 {
273 	bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
274 }
275 
276 /**
277  * struct uwb_drp_avail - a radio controller's view of MAS usage
278  * @global:   MAS unused by neighbors (excluding reservations targeted
279  *            or owned by the local radio controller) or the beaon period
280  * @local:    MAS unused by local established reservations
281  * @pending:  MAS unused by local pending reservations
282  * @ie:       DRP Availability IE to be included in the beacon
283  * @ie_valid: true iff @ie is valid and does not need to regenerated from
284  *            @global and @local
285  *
286  * Each radio controller maintains a view of MAS usage or
287  * availability. MAS available for a new reservation are determined
288  * from the intersection of @global, @local, and @pending.
289  *
290  * The radio controller must transmit a DRP Availability IE that's the
291  * intersection of @global and @local.
292  *
293  * A set bit indicates the MAS is unused and available.
294  *
295  * rc->rsvs_mutex should be held before accessing this data structure.
296  *
297  * [ECMA-368] section 17.4.3.
298  */
299 struct uwb_drp_avail {
300 	DECLARE_BITMAP(global, UWB_NUM_MAS);
301 	DECLARE_BITMAP(local, UWB_NUM_MAS);
302 	DECLARE_BITMAP(pending, UWB_NUM_MAS);
303 	struct uwb_ie_drp_avail ie;
304 	bool ie_valid;
305 };
306 
307 struct uwb_drp_backoff_win {
308 	u8 window;
309 	u8 n;
310 	int total_expired;
311 	struct timer_list timer;
312 	bool can_reserve_extra_mases;
313 };
314 
315 const char *uwb_rsv_state_str(enum uwb_rsv_state state);
316 const char *uwb_rsv_type_str(enum uwb_drp_type type);
317 
318 struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
319 			       void *pal_priv);
320 void uwb_rsv_destroy(struct uwb_rsv *rsv);
321 
322 int uwb_rsv_establish(struct uwb_rsv *rsv);
323 int uwb_rsv_modify(struct uwb_rsv *rsv,
324 		   int max_mas, int min_mas, int sparsity);
325 void uwb_rsv_terminate(struct uwb_rsv *rsv);
326 
327 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
328 
329 void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
330 
331 /**
332  * Radio Control Interface instance
333  *
334  *
335  * Life cycle rules: those of the UWB Device.
336  *
337  * @index:    an index number for this radio controller, as used in the
338  *            device name.
339  * @version:  version of protocol supported by this device
340  * @priv:     Backend implementation; rw with uwb_dev.dev.sem taken.
341  * @cmd:      Backend implementation to execute commands; rw and call
342  *            only  with uwb_dev.dev.sem taken.
343  * @reset:    Hardware reset of radio controller and any PAL controllers.
344  * @filter:   Backend implementation to manipulate data to and from device
345  *            to be compliant to specification assumed by driver (WHCI
346  *            0.95).
347  *
348  *            uwb_dev.dev.mutex is used to execute commands and update
349  *            the corresponding structures; can't use a spinlock
350  *            because rc->cmd() can sleep.
351  * @ies:         This is a dynamically allocated array cacheing the
352  *               IEs (settable by the host) that the beacon of this
353  *               radio controller is currently sending.
354  *
355  *               In reality, we store here the full command we set to
356  *               the radio controller (which is basically a command
357  *               prefix followed by all the IEs the beacon currently
358  *               contains). This way we don't have to realloc and
359  *               memcpy when setting it.
360  *
361  *               We set this up in uwb_rc_ie_setup(), where we alloc
362  *               this struct, call get_ie() [so we know which IEs are
363  *               currently being sent, if any].
364  *
365  * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
366  *               amount used is given by sizeof(*ies) plus ies->wIELength
367  *               (which is a little endian quantity all the time).
368  * @ies_mutex:   protect the IE cache
369  * @dbg:         information for the debug interface
370  */
371 struct uwb_rc {
372 	struct uwb_dev uwb_dev;
373 	int index;
374 	u16 version;
375 
376 	struct module *owner;
377 	void *priv;
378 	int (*start)(struct uwb_rc *rc);
379 	void (*stop)(struct uwb_rc *rc);
380 	int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
381 	int (*reset)(struct uwb_rc *rc);
382 	int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
383 	int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
384 			    size_t *, size_t *);
385 
386 	spinlock_t neh_lock;		/* protects neh_* and ctx_* */
387 	struct list_head neh_list;	/* Open NE handles */
388 	unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
389 	u8 ctx_roll;
390 
391 	int beaconing;			/* Beaconing state [channel number] */
392 	int beaconing_forced;
393 	int scanning;
394 	enum uwb_scan_type scan_type:3;
395 	unsigned ready:1;
396 	struct uwb_notifs_chain notifs_chain;
397 	struct uwb_beca uwb_beca;
398 
399 	struct uwbd uwbd;
400 
401 	struct uwb_drp_backoff_win bow;
402 	struct uwb_drp_avail drp_avail;
403 	struct list_head reservations;
404 	struct list_head cnflt_alien_list;
405 	struct uwb_mas_bm cnflt_alien_bitmap;
406 	struct mutex rsvs_mutex;
407 	spinlock_t rsvs_lock;
408 	struct workqueue_struct *rsv_workq;
409 
410 	struct delayed_work rsv_update_work;
411 	struct delayed_work rsv_alien_bp_work;
412 	int set_drp_ie_pending;
413 	struct mutex ies_mutex;
414 	struct uwb_rc_cmd_set_ie *ies;
415 	size_t ies_capacity;
416 
417 	struct list_head pals;
418 	int active_pals;
419 
420 	struct uwb_dbg *dbg;
421 };
422 
423 
424 /**
425  * struct uwb_pal - a UWB PAL
426  * @name:    descriptive name for this PAL (wusbhc, wlp, etc.).
427  * @device:  a device for the PAL.  Used to link the PAL and the radio
428  *           controller in sysfs.
429  * @rc:      the radio controller the PAL uses.
430  * @channel_changed: called when the channel used by the radio changes.
431  *           A channel of -1 means the channel has been stopped.
432  * @new_rsv: called when a peer requests a reservation (may be NULL if
433  *           the PAL cannot accept reservation requests).
434  * @channel: channel being used by the PAL; 0 if the PAL isn't using
435  *           the radio; -1 if the PAL wishes to use the radio but
436  *           cannot.
437  * @debugfs_dir: a debugfs directory which the PAL can use for its own
438  *           debugfs files.
439  *
440  * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
441  * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
442  *
443  * The PALs using a radio controller must register themselves to
444  * permit the UWB stack to coordinate usage of the radio between the
445  * various PALs or to allow PALs to response to certain requests from
446  * peers.
447  *
448  * A struct uwb_pal should be embedded in a containing structure
449  * belonging to the PAL and initialized with uwb_pal_init()).  Fields
450  * should be set appropriately by the PAL before registering the PAL
451  * with uwb_pal_register().
452  */
453 struct uwb_pal {
454 	struct list_head node;
455 	const char *name;
456 	struct device *device;
457 	struct uwb_rc *rc;
458 
459 	void (*channel_changed)(struct uwb_pal *pal, int channel);
460 	void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv);
461 
462 	int channel;
463 	struct dentry *debugfs_dir;
464 };
465 
466 void uwb_pal_init(struct uwb_pal *pal);
467 int uwb_pal_register(struct uwb_pal *pal);
468 void uwb_pal_unregister(struct uwb_pal *pal);
469 
470 int uwb_radio_start(struct uwb_pal *pal);
471 void uwb_radio_stop(struct uwb_pal *pal);
472 
473 /*
474  * General public API
475  *
476  * This API can be used by UWB device drivers or by those implementing
477  * UWB Radio Controllers
478  */
479 struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
480 				       const struct uwb_dev_addr *devaddr);
481 struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
uwb_dev_get(struct uwb_dev * uwb_dev)482 static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
483 {
484 	get_device(&uwb_dev->dev);
485 }
uwb_dev_put(struct uwb_dev * uwb_dev)486 static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
487 {
488 	put_device(&uwb_dev->dev);
489 }
490 struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
491 
492 /**
493  * Callback function for 'uwb_{dev,rc}_foreach()'.
494  *
495  * @dev:  Linux device instance
496  *        'uwb_dev = container_of(dev, struct uwb_dev, dev)'
497  * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
498  *
499  * @returns: 0 to continue the iterations, any other val to stop
500  *           iterating and return the value to the caller of
501  *           _foreach().
502  */
503 typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
504 int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
505 
506 struct uwb_rc *uwb_rc_alloc(void);
507 struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
508 struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
509 void uwb_rc_put(struct uwb_rc *rc);
510 
511 typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
512                                 struct uwb_rceb *reply, ssize_t reply_size);
513 
514 int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
515 		     struct uwb_rccb *cmd, size_t cmd_size,
516 		     u8 expected_type, u16 expected_event,
517 		     uwb_rc_cmd_cb_f cb, void *arg);
518 ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
519 		   struct uwb_rccb *cmd, size_t cmd_size,
520 		   struct uwb_rceb *reply, size_t reply_size);
521 ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
522 		    struct uwb_rccb *cmd, size_t cmd_size,
523 		    u8 expected_type, u16 expected_event,
524 		    struct uwb_rceb **preply);
525 
526 size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
527 
528 int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
529 int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
530 int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
531 int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
532 int __uwb_mac_addr_assigned_check(struct device *, void *);
533 int __uwb_dev_addr_assigned_check(struct device *, void *);
534 
535 /* Print in @buf a pretty repr of @addr */
uwb_dev_addr_print(char * buf,size_t buf_size,const struct uwb_dev_addr * addr)536 static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
537 					const struct uwb_dev_addr *addr)
538 {
539 	return __uwb_addr_print(buf, buf_size, addr->data, 0);
540 }
541 
542 /* Print in @buf a pretty repr of @addr */
uwb_mac_addr_print(char * buf,size_t buf_size,const struct uwb_mac_addr * addr)543 static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
544 					const struct uwb_mac_addr *addr)
545 {
546 	return __uwb_addr_print(buf, buf_size, addr->data, 1);
547 }
548 
549 /* @returns 0 if device addresses @addr2 and @addr1 are equal */
uwb_dev_addr_cmp(const struct uwb_dev_addr * addr1,const struct uwb_dev_addr * addr2)550 static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
551 				   const struct uwb_dev_addr *addr2)
552 {
553 	return memcmp(addr1, addr2, sizeof(*addr1));
554 }
555 
556 /* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
uwb_mac_addr_cmp(const struct uwb_mac_addr * addr1,const struct uwb_mac_addr * addr2)557 static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
558 				   const struct uwb_mac_addr *addr2)
559 {
560 	return memcmp(addr1, addr2, sizeof(*addr1));
561 }
562 
563 /* @returns !0 if a MAC @addr is a broadcast address */
uwb_mac_addr_bcast(const struct uwb_mac_addr * addr)564 static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
565 {
566 	struct uwb_mac_addr bcast = {
567 		.data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
568 	};
569 	return !uwb_mac_addr_cmp(addr, &bcast);
570 }
571 
572 /* @returns !0 if a MAC @addr is all zeroes*/
uwb_mac_addr_unset(const struct uwb_mac_addr * addr)573 static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
574 {
575 	struct uwb_mac_addr unset = {
576 		.data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
577 	};
578 	return !uwb_mac_addr_cmp(addr, &unset);
579 }
580 
581 /* @returns !0 if the address is in use. */
__uwb_dev_addr_assigned(struct uwb_rc * rc,struct uwb_dev_addr * addr)582 static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
583 					       struct uwb_dev_addr *addr)
584 {
585 	return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
586 }
587 
588 /*
589  * UWB Radio Controller API
590  *
591  * This API is used (in addition to the general API) to implement UWB
592  * Radio Controllers.
593  */
594 void uwb_rc_init(struct uwb_rc *);
595 int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
596 void uwb_rc_rm(struct uwb_rc *);
597 void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
598 void uwb_rc_neh_error(struct uwb_rc *, int);
599 void uwb_rc_reset_all(struct uwb_rc *rc);
600 void uwb_rc_pre_reset(struct uwb_rc *rc);
601 int uwb_rc_post_reset(struct uwb_rc *rc);
602 
603 /**
604  * uwb_rsv_is_owner - is the owner of this reservation the RC?
605  * @rsv: the reservation
606  */
uwb_rsv_is_owner(struct uwb_rsv * rsv)607 static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
608 {
609 	return rsv->owner == &rsv->rc->uwb_dev;
610 }
611 
612 /**
613  * enum uwb_notifs - UWB events that can be passed to any listeners
614  * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group.
615  * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group.
616  *
617  * Higher layers can register callback functions with the radio
618  * controller using uwb_notifs_register(). The radio controller
619  * maintains a list of all registered handlers and will notify all
620  * nodes when an event occurs.
621  */
622 enum uwb_notifs {
623 	UWB_NOTIF_ONAIR,
624 	UWB_NOTIF_OFFAIR,
625 };
626 
627 /* Callback function registered with UWB */
628 struct uwb_notifs_handler {
629 	struct list_head list_node;
630 	void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
631 	void *data;
632 };
633 
634 int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
635 int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
636 
637 
638 /**
639  * UWB radio controller Event Size Entry (for creating entry tables)
640  *
641  * WUSB and WHCI define events and notifications, and they might have
642  * fixed or variable size.
643  *
644  * Each event/notification has a size which is not necessarily known
645  * in advance based on the event code. As well, vendor specific
646  * events/notifications will have a size impossible to determine
647  * unless we know about the device's specific details.
648  *
649  * It was way too smart of the spec writers not to think that it would
650  * be impossible for a generic driver to skip over vendor specific
651  * events/notifications if there are no LENGTH fields in the HEADER of
652  * each message...the transaction size cannot be counted on as the
653  * spec does not forbid to pack more than one event in a single
654  * transaction.
655  *
656  * Thus, we guess sizes with tables (or for events, when you know the
657  * size ahead of time you can use uwb_rc_neh_extra_size*()). We
658  * register tables with the known events and their sizes, and then we
659  * traverse those tables. For those with variable length, we provide a
660  * way to lookup the size inside the event/notification's
661  * payload. This allows device-specific event size tables to be
662  * registered.
663  *
664  * @size:   Size of the payload
665  *
666  * @offset: if != 0, at offset @offset-1 starts a field with a length
667  *          that has to be added to @size. The format of the field is
668  *          given by @type.
669  *
670  * @type:   Type and length of the offset field. Most common is LE 16
671  *          bits (that's why that is zero); others are there mostly to
672  *          cover for bugs and weirdos.
673  */
674 struct uwb_est_entry {
675 	size_t size;
676 	unsigned offset;
677 	enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
678 };
679 
680 int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
681 		     const struct uwb_est_entry *, size_t entries);
682 int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
683 		       const struct uwb_est_entry *, size_t entries);
684 ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
685 			  size_t len);
686 
687 /* -- Misc */
688 
689 enum {
690 	EDC_MAX_ERRORS = 10,
691 	EDC_ERROR_TIMEFRAME = HZ,
692 };
693 
694 /* error density counter */
695 struct edc {
696 	unsigned long timestart;
697 	u16 errorcount;
698 };
699 
700 static inline
edc_init(struct edc * edc)701 void edc_init(struct edc *edc)
702 {
703 	edc->timestart = jiffies;
704 }
705 
706 /* Called when an error occurred.
707  * This is way to determine if the number of acceptable errors per time
708  * period has been exceeded. It is not accurate as there are cases in which
709  * this scheme will not work, for example if there are periodic occurrences
710  * of errors that straddle updates to the start time. This scheme is
711  * sufficient for our usage.
712  *
713  * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
714  */
edc_inc(struct edc * err_hist,u16 max_err,u16 timeframe)715 static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
716 {
717 	unsigned long now;
718 
719 	now = jiffies;
720 	if (now - err_hist->timestart > timeframe) {
721 		err_hist->errorcount = 1;
722 		err_hist->timestart = now;
723 	} else if (++err_hist->errorcount > max_err) {
724 			err_hist->errorcount = 0;
725 			err_hist->timestart = now;
726 			return 1;
727 	}
728 	return 0;
729 }
730 
731 
732 /* Information Element handling */
733 
734 struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
735 int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size);
736 int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id);
737 
738 /*
739  * Transmission statistics
740  *
741  * UWB uses LQI and RSSI (one byte values) for reporting radio signal
742  * strength and line quality indication. We do quick and dirty
743  * averages of those. They are signed values, btw.
744  *
745  * For 8 bit quantities, we keep the min, the max, an accumulator
746  * (@sigma) and a # of samples. When @samples gets to 255, we compute
747  * the average (@sigma / @samples), place it in @sigma and reset
748  * @samples to 1 (so we use it as the first sample).
749  *
750  * Now, statistically speaking, probably I am kicking the kidneys of
751  * some books I have in my shelves collecting dust, but I just want to
752  * get an approx, not the Nobel.
753  *
754  * LOCKING: there is no locking per se, but we try to keep a lockless
755  * schema. Only _add_samples() modifies the values--as long as you
756  * have other locking on top that makes sure that no two calls of
757  * _add_sample() happen at the same time, then we are fine. Now, for
758  * resetting the values we just set @samples to 0 and that makes the
759  * next _add_sample() to start with defaults. Reading the values in
760  * _show() currently can race, so you need to make sure the calls are
761  * under the same lock that protects calls to _add_sample(). FIXME:
762  * currently unlocked (It is not ultraprecise but does the trick. Bite
763  * me).
764  */
765 struct stats {
766 	s8 min, max;
767 	s16 sigma;
768 	atomic_t samples;
769 };
770 
771 static inline
stats_init(struct stats * stats)772 void stats_init(struct stats *stats)
773 {
774 	atomic_set(&stats->samples, 0);
775 	wmb();
776 }
777 
778 static inline
stats_add_sample(struct stats * stats,s8 sample)779 void stats_add_sample(struct stats *stats, s8 sample)
780 {
781 	s8 min, max;
782 	s16 sigma;
783 	unsigned samples = atomic_read(&stats->samples);
784 	if (samples == 0) {	/* it was zero before, so we initialize */
785 		min = 127;
786 		max = -128;
787 		sigma = 0;
788 	} else {
789 		min = stats->min;
790 		max = stats->max;
791 		sigma = stats->sigma;
792 	}
793 
794 	if (sample < min)	/* compute new values */
795 		min = sample;
796 	else if (sample > max)
797 		max = sample;
798 	sigma += sample;
799 
800 	stats->min = min;	/* commit */
801 	stats->max = max;
802 	stats->sigma = sigma;
803 	if (atomic_add_return(1, &stats->samples) > 255) {
804 		/* wrapped around! reset */
805 		stats->sigma = sigma / 256;
806 		atomic_set(&stats->samples, 1);
807 	}
808 }
809 
stats_show(struct stats * stats,char * buf)810 static inline ssize_t stats_show(struct stats *stats, char *buf)
811 {
812 	int min, max, avg;
813 	int samples = atomic_read(&stats->samples);
814 	if (samples == 0)
815 		min = max = avg = 0;
816 	else {
817 		min = stats->min;
818 		max = stats->max;
819 		avg = stats->sigma / samples;
820 	}
821 	return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
822 }
823 
stats_store(struct stats * stats,const char * buf,size_t size)824 static inline ssize_t stats_store(struct stats *stats, const char *buf,
825 				  size_t size)
826 {
827 	stats_init(stats);
828 	return size;
829 }
830 
831 #endif /* #ifndef __LINUX__UWB_H__ */
832