1 /*
2  *
3  *  Bluetooth HCI Three-wire UART driver
4  *
5  *  Copyright (C) 2012  Intel Corporation
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/acpi.h>
25 #include <linux/errno.h>
26 #include <linux/gpio/consumer.h>
27 #include <linux/kernel.h>
28 #include <linux/mod_devicetable.h>
29 #include <linux/serdev.h>
30 #include <linux/skbuff.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 
35 #include "btrtl.h"
36 #include "hci_uart.h"
37 
38 #define HCI_3WIRE_ACK_PKT	0
39 #define HCI_3WIRE_LINK_PKT	15
40 
41 /* Sliding window size */
42 #define H5_TX_WIN_MAX		4
43 
44 #define H5_ACK_TIMEOUT	msecs_to_jiffies(250)
45 #define H5_SYNC_TIMEOUT	msecs_to_jiffies(100)
46 
47 /*
48  * Maximum Three-wire packet:
49  *     4 byte header + max value for 12-bit length + 2 bytes for CRC
50  */
51 #define H5_MAX_LEN (4 + 0xfff + 2)
52 
53 /* Convenience macros for reading Three-wire header values */
54 #define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
55 #define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
56 #define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
57 #define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
58 #define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
59 #define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
60 
61 #define SLIP_DELIMITER	0xc0
62 #define SLIP_ESC	0xdb
63 #define SLIP_ESC_DELIM	0xdc
64 #define SLIP_ESC_ESC	0xdd
65 
66 /* H5 state flags */
67 enum {
68 	H5_RX_ESC,	/* SLIP escape mode */
69 	H5_TX_ACK_REQ,	/* Pending ack to send */
70 };
71 
72 struct h5 {
73 	/* Must be the first member, hci_serdev.c expects this. */
74 	struct hci_uart		serdev_hu;
75 
76 	struct sk_buff_head	unack;		/* Unack'ed packets queue */
77 	struct sk_buff_head	rel;		/* Reliable packets queue */
78 	struct sk_buff_head	unrel;		/* Unreliable packets queue */
79 
80 	unsigned long		flags;
81 
82 	struct sk_buff		*rx_skb;	/* Receive buffer */
83 	size_t			rx_pending;	/* Expecting more bytes */
84 	u8			rx_ack;		/* Last ack number received */
85 
86 	int			(*rx_func)(struct hci_uart *hu, u8 c);
87 
88 	struct timer_list	timer;		/* Retransmission timer */
89 	struct hci_uart		*hu;		/* Parent HCI UART */
90 
91 	u8			tx_seq;		/* Next seq number to send */
92 	u8			tx_ack;		/* Next ack number to send */
93 	u8			tx_win;		/* Sliding window size */
94 
95 	enum {
96 		H5_UNINITIALIZED,
97 		H5_INITIALIZED,
98 		H5_ACTIVE,
99 	} state;
100 
101 	enum {
102 		H5_AWAKE,
103 		H5_SLEEPING,
104 		H5_WAKING_UP,
105 	} sleep;
106 
107 	const struct h5_vnd *vnd;
108 	const char *id;
109 
110 	struct gpio_desc *enable_gpio;
111 	struct gpio_desc *device_wake_gpio;
112 };
113 
114 struct h5_vnd {
115 	int (*setup)(struct h5 *h5);
116 	void (*open)(struct h5 *h5);
117 	void (*close)(struct h5 *h5);
118 	const struct acpi_gpio_mapping *acpi_gpio_map;
119 };
120 
121 static void h5_reset_rx(struct h5 *h5);
122 
h5_link_control(struct hci_uart * hu,const void * data,size_t len)123 static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
124 {
125 	struct h5 *h5 = hu->priv;
126 	struct sk_buff *nskb;
127 
128 	nskb = alloc_skb(3, GFP_ATOMIC);
129 	if (!nskb)
130 		return;
131 
132 	hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
133 
134 	skb_put_data(nskb, data, len);
135 
136 	skb_queue_tail(&h5->unrel, nskb);
137 }
138 
h5_cfg_field(struct h5 * h5)139 static u8 h5_cfg_field(struct h5 *h5)
140 {
141 	/* Sliding window size (first 3 bits) */
142 	return h5->tx_win & 0x07;
143 }
144 
h5_timed_event(struct timer_list * t)145 static void h5_timed_event(struct timer_list *t)
146 {
147 	const unsigned char sync_req[] = { 0x01, 0x7e };
148 	unsigned char conf_req[3] = { 0x03, 0xfc };
149 	struct h5 *h5 = from_timer(h5, t, timer);
150 	struct hci_uart *hu = h5->hu;
151 	struct sk_buff *skb;
152 	unsigned long flags;
153 
154 	BT_DBG("%s", hu->hdev->name);
155 
156 	if (h5->state == H5_UNINITIALIZED)
157 		h5_link_control(hu, sync_req, sizeof(sync_req));
158 
159 	if (h5->state == H5_INITIALIZED) {
160 		conf_req[2] = h5_cfg_field(h5);
161 		h5_link_control(hu, conf_req, sizeof(conf_req));
162 	}
163 
164 	if (h5->state != H5_ACTIVE) {
165 		mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
166 		goto wakeup;
167 	}
168 
169 	if (h5->sleep != H5_AWAKE) {
170 		h5->sleep = H5_SLEEPING;
171 		goto wakeup;
172 	}
173 
174 	BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
175 
176 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
177 
178 	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
179 		h5->tx_seq = (h5->tx_seq - 1) & 0x07;
180 		skb_queue_head(&h5->rel, skb);
181 	}
182 
183 	spin_unlock_irqrestore(&h5->unack.lock, flags);
184 
185 wakeup:
186 	hci_uart_tx_wakeup(hu);
187 }
188 
h5_peer_reset(struct hci_uart * hu)189 static void h5_peer_reset(struct hci_uart *hu)
190 {
191 	struct h5 *h5 = hu->priv;
192 
193 	BT_ERR("Peer device has reset");
194 
195 	h5->state = H5_UNINITIALIZED;
196 
197 	del_timer(&h5->timer);
198 
199 	skb_queue_purge(&h5->rel);
200 	skb_queue_purge(&h5->unrel);
201 	skb_queue_purge(&h5->unack);
202 
203 	h5->tx_seq = 0;
204 	h5->tx_ack = 0;
205 
206 	/* Send reset request to upper stack */
207 	hci_reset_dev(hu->hdev);
208 }
209 
h5_open(struct hci_uart * hu)210 static int h5_open(struct hci_uart *hu)
211 {
212 	struct h5 *h5;
213 	const unsigned char sync[] = { 0x01, 0x7e };
214 
215 	BT_DBG("hu %p", hu);
216 
217 	if (hu->serdev) {
218 		h5 = serdev_device_get_drvdata(hu->serdev);
219 	} else {
220 		h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
221 		if (!h5)
222 			return -ENOMEM;
223 	}
224 
225 	hu->priv = h5;
226 	h5->hu = hu;
227 
228 	skb_queue_head_init(&h5->unack);
229 	skb_queue_head_init(&h5->rel);
230 	skb_queue_head_init(&h5->unrel);
231 
232 	h5_reset_rx(h5);
233 
234 	timer_setup(&h5->timer, h5_timed_event, 0);
235 
236 	h5->tx_win = H5_TX_WIN_MAX;
237 
238 	if (h5->vnd && h5->vnd->open)
239 		h5->vnd->open(h5);
240 
241 	set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
242 
243 	/* Send initial sync request */
244 	h5_link_control(hu, sync, sizeof(sync));
245 	mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
246 
247 	return 0;
248 }
249 
h5_close(struct hci_uart * hu)250 static int h5_close(struct hci_uart *hu)
251 {
252 	struct h5 *h5 = hu->priv;
253 
254 	del_timer_sync(&h5->timer);
255 
256 	skb_queue_purge(&h5->unack);
257 	skb_queue_purge(&h5->rel);
258 	skb_queue_purge(&h5->unrel);
259 
260 	kfree_skb(h5->rx_skb);
261 	h5->rx_skb = NULL;
262 
263 	if (h5->vnd && h5->vnd->close)
264 		h5->vnd->close(h5);
265 
266 	if (!hu->serdev)
267 		kfree(h5);
268 
269 	return 0;
270 }
271 
h5_setup(struct hci_uart * hu)272 static int h5_setup(struct hci_uart *hu)
273 {
274 	struct h5 *h5 = hu->priv;
275 
276 	if (h5->vnd && h5->vnd->setup)
277 		return h5->vnd->setup(h5);
278 
279 	return 0;
280 }
281 
h5_pkt_cull(struct h5 * h5)282 static void h5_pkt_cull(struct h5 *h5)
283 {
284 	struct sk_buff *skb, *tmp;
285 	unsigned long flags;
286 	int i, to_remove;
287 	u8 seq;
288 
289 	spin_lock_irqsave(&h5->unack.lock, flags);
290 
291 	to_remove = skb_queue_len(&h5->unack);
292 	if (to_remove == 0)
293 		goto unlock;
294 
295 	seq = h5->tx_seq;
296 
297 	while (to_remove > 0) {
298 		if (h5->rx_ack == seq)
299 			break;
300 
301 		to_remove--;
302 		seq = (seq - 1) & 0x07;
303 	}
304 
305 	if (seq != h5->rx_ack)
306 		BT_ERR("Controller acked invalid packet");
307 
308 	i = 0;
309 	skb_queue_walk_safe(&h5->unack, skb, tmp) {
310 		if (i++ >= to_remove)
311 			break;
312 
313 		__skb_unlink(skb, &h5->unack);
314 		dev_kfree_skb_irq(skb);
315 	}
316 
317 	if (skb_queue_empty(&h5->unack))
318 		del_timer(&h5->timer);
319 
320 unlock:
321 	spin_unlock_irqrestore(&h5->unack.lock, flags);
322 }
323 
h5_handle_internal_rx(struct hci_uart * hu)324 static void h5_handle_internal_rx(struct hci_uart *hu)
325 {
326 	struct h5 *h5 = hu->priv;
327 	const unsigned char sync_req[] = { 0x01, 0x7e };
328 	const unsigned char sync_rsp[] = { 0x02, 0x7d };
329 	unsigned char conf_req[3] = { 0x03, 0xfc };
330 	const unsigned char conf_rsp[] = { 0x04, 0x7b };
331 	const unsigned char wakeup_req[] = { 0x05, 0xfa };
332 	const unsigned char woken_req[] = { 0x06, 0xf9 };
333 	const unsigned char sleep_req[] = { 0x07, 0x78 };
334 	const unsigned char *hdr = h5->rx_skb->data;
335 	const unsigned char *data = &h5->rx_skb->data[4];
336 
337 	BT_DBG("%s", hu->hdev->name);
338 
339 	if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
340 		return;
341 
342 	if (H5_HDR_LEN(hdr) < 2)
343 		return;
344 
345 	conf_req[2] = h5_cfg_field(h5);
346 
347 	if (memcmp(data, sync_req, 2) == 0) {
348 		if (h5->state == H5_ACTIVE)
349 			h5_peer_reset(hu);
350 		h5_link_control(hu, sync_rsp, 2);
351 	} else if (memcmp(data, sync_rsp, 2) == 0) {
352 		if (h5->state == H5_ACTIVE)
353 			h5_peer_reset(hu);
354 		h5->state = H5_INITIALIZED;
355 		h5_link_control(hu, conf_req, 3);
356 	} else if (memcmp(data, conf_req, 2) == 0) {
357 		h5_link_control(hu, conf_rsp, 2);
358 		h5_link_control(hu, conf_req, 3);
359 	} else if (memcmp(data, conf_rsp, 2) == 0) {
360 		if (H5_HDR_LEN(hdr) > 2)
361 			h5->tx_win = (data[2] & 0x07);
362 		BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
363 		h5->state = H5_ACTIVE;
364 		hci_uart_init_ready(hu);
365 		return;
366 	} else if (memcmp(data, sleep_req, 2) == 0) {
367 		BT_DBG("Peer went to sleep");
368 		h5->sleep = H5_SLEEPING;
369 		return;
370 	} else if (memcmp(data, woken_req, 2) == 0) {
371 		BT_DBG("Peer woke up");
372 		h5->sleep = H5_AWAKE;
373 	} else if (memcmp(data, wakeup_req, 2) == 0) {
374 		BT_DBG("Peer requested wakeup");
375 		h5_link_control(hu, woken_req, 2);
376 		h5->sleep = H5_AWAKE;
377 	} else {
378 		BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
379 		return;
380 	}
381 
382 	hci_uart_tx_wakeup(hu);
383 }
384 
h5_complete_rx_pkt(struct hci_uart * hu)385 static void h5_complete_rx_pkt(struct hci_uart *hu)
386 {
387 	struct h5 *h5 = hu->priv;
388 	const unsigned char *hdr = h5->rx_skb->data;
389 
390 	if (H5_HDR_RELIABLE(hdr)) {
391 		h5->tx_ack = (h5->tx_ack + 1) % 8;
392 		set_bit(H5_TX_ACK_REQ, &h5->flags);
393 		hci_uart_tx_wakeup(hu);
394 	}
395 
396 	h5->rx_ack = H5_HDR_ACK(hdr);
397 
398 	h5_pkt_cull(h5);
399 
400 	switch (H5_HDR_PKT_TYPE(hdr)) {
401 	case HCI_EVENT_PKT:
402 	case HCI_ACLDATA_PKT:
403 	case HCI_SCODATA_PKT:
404 		hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
405 
406 		/* Remove Three-wire header */
407 		skb_pull(h5->rx_skb, 4);
408 
409 		hci_recv_frame(hu->hdev, h5->rx_skb);
410 		h5->rx_skb = NULL;
411 
412 		break;
413 
414 	default:
415 		h5_handle_internal_rx(hu);
416 		break;
417 	}
418 
419 	h5_reset_rx(h5);
420 }
421 
h5_rx_crc(struct hci_uart * hu,unsigned char c)422 static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
423 {
424 	h5_complete_rx_pkt(hu);
425 
426 	return 0;
427 }
428 
h5_rx_payload(struct hci_uart * hu,unsigned char c)429 static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
430 {
431 	struct h5 *h5 = hu->priv;
432 	const unsigned char *hdr = h5->rx_skb->data;
433 
434 	if (H5_HDR_CRC(hdr)) {
435 		h5->rx_func = h5_rx_crc;
436 		h5->rx_pending = 2;
437 	} else {
438 		h5_complete_rx_pkt(hu);
439 	}
440 
441 	return 0;
442 }
443 
h5_rx_3wire_hdr(struct hci_uart * hu,unsigned char c)444 static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
445 {
446 	struct h5 *h5 = hu->priv;
447 	const unsigned char *hdr = h5->rx_skb->data;
448 
449 	BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
450 	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
451 	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
452 	       H5_HDR_LEN(hdr));
453 
454 	if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
455 		BT_ERR("Invalid header checksum");
456 		h5_reset_rx(h5);
457 		return 0;
458 	}
459 
460 	if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
461 		BT_ERR("Out-of-order packet arrived (%u != %u)",
462 		       H5_HDR_SEQ(hdr), h5->tx_ack);
463 		h5_reset_rx(h5);
464 		return 0;
465 	}
466 
467 	if (h5->state != H5_ACTIVE &&
468 	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
469 		BT_ERR("Non-link packet received in non-active state");
470 		h5_reset_rx(h5);
471 		return 0;
472 	}
473 
474 	h5->rx_func = h5_rx_payload;
475 	h5->rx_pending = H5_HDR_LEN(hdr);
476 
477 	return 0;
478 }
479 
h5_rx_pkt_start(struct hci_uart * hu,unsigned char c)480 static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
481 {
482 	struct h5 *h5 = hu->priv;
483 
484 	if (c == SLIP_DELIMITER)
485 		return 1;
486 
487 	h5->rx_func = h5_rx_3wire_hdr;
488 	h5->rx_pending = 4;
489 
490 	h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
491 	if (!h5->rx_skb) {
492 		BT_ERR("Can't allocate mem for new packet");
493 		h5_reset_rx(h5);
494 		return -ENOMEM;
495 	}
496 
497 	h5->rx_skb->dev = (void *)hu->hdev;
498 
499 	return 0;
500 }
501 
h5_rx_delimiter(struct hci_uart * hu,unsigned char c)502 static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
503 {
504 	struct h5 *h5 = hu->priv;
505 
506 	if (c == SLIP_DELIMITER)
507 		h5->rx_func = h5_rx_pkt_start;
508 
509 	return 1;
510 }
511 
h5_unslip_one_byte(struct h5 * h5,unsigned char c)512 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
513 {
514 	const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
515 	const u8 *byte = &c;
516 
517 	if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
518 		set_bit(H5_RX_ESC, &h5->flags);
519 		return;
520 	}
521 
522 	if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
523 		switch (c) {
524 		case SLIP_ESC_DELIM:
525 			byte = &delim;
526 			break;
527 		case SLIP_ESC_ESC:
528 			byte = &esc;
529 			break;
530 		default:
531 			BT_ERR("Invalid esc byte 0x%02hhx", c);
532 			h5_reset_rx(h5);
533 			return;
534 		}
535 	}
536 
537 	skb_put_data(h5->rx_skb, byte, 1);
538 	h5->rx_pending--;
539 
540 	BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
541 }
542 
h5_reset_rx(struct h5 * h5)543 static void h5_reset_rx(struct h5 *h5)
544 {
545 	if (h5->rx_skb) {
546 		kfree_skb(h5->rx_skb);
547 		h5->rx_skb = NULL;
548 	}
549 
550 	h5->rx_func = h5_rx_delimiter;
551 	h5->rx_pending = 0;
552 	clear_bit(H5_RX_ESC, &h5->flags);
553 }
554 
h5_recv(struct hci_uart * hu,const void * data,int count)555 static int h5_recv(struct hci_uart *hu, const void *data, int count)
556 {
557 	struct h5 *h5 = hu->priv;
558 	const unsigned char *ptr = data;
559 
560 	BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
561 	       count);
562 
563 	while (count > 0) {
564 		int processed;
565 
566 		if (h5->rx_pending > 0) {
567 			if (*ptr == SLIP_DELIMITER) {
568 				BT_ERR("Too short H5 packet");
569 				h5_reset_rx(h5);
570 				continue;
571 			}
572 
573 			h5_unslip_one_byte(h5, *ptr);
574 
575 			ptr++; count--;
576 			continue;
577 		}
578 
579 		processed = h5->rx_func(hu, *ptr);
580 		if (processed < 0)
581 			return processed;
582 
583 		ptr += processed;
584 		count -= processed;
585 	}
586 
587 	return 0;
588 }
589 
h5_enqueue(struct hci_uart * hu,struct sk_buff * skb)590 static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
591 {
592 	struct h5 *h5 = hu->priv;
593 
594 	if (skb->len > 0xfff) {
595 		BT_ERR("Packet too long (%u bytes)", skb->len);
596 		kfree_skb(skb);
597 		return 0;
598 	}
599 
600 	if (h5->state != H5_ACTIVE) {
601 		BT_ERR("Ignoring HCI data in non-active state");
602 		kfree_skb(skb);
603 		return 0;
604 	}
605 
606 	switch (hci_skb_pkt_type(skb)) {
607 	case HCI_ACLDATA_PKT:
608 	case HCI_COMMAND_PKT:
609 		skb_queue_tail(&h5->rel, skb);
610 		break;
611 
612 	case HCI_SCODATA_PKT:
613 		skb_queue_tail(&h5->unrel, skb);
614 		break;
615 
616 	default:
617 		BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
618 		kfree_skb(skb);
619 		break;
620 	}
621 
622 	return 0;
623 }
624 
h5_slip_delim(struct sk_buff * skb)625 static void h5_slip_delim(struct sk_buff *skb)
626 {
627 	const char delim = SLIP_DELIMITER;
628 
629 	skb_put_data(skb, &delim, 1);
630 }
631 
h5_slip_one_byte(struct sk_buff * skb,u8 c)632 static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
633 {
634 	const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
635 	const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
636 
637 	switch (c) {
638 	case SLIP_DELIMITER:
639 		skb_put_data(skb, &esc_delim, 2);
640 		break;
641 	case SLIP_ESC:
642 		skb_put_data(skb, &esc_esc, 2);
643 		break;
644 	default:
645 		skb_put_data(skb, &c, 1);
646 	}
647 }
648 
valid_packet_type(u8 type)649 static bool valid_packet_type(u8 type)
650 {
651 	switch (type) {
652 	case HCI_ACLDATA_PKT:
653 	case HCI_COMMAND_PKT:
654 	case HCI_SCODATA_PKT:
655 	case HCI_3WIRE_LINK_PKT:
656 	case HCI_3WIRE_ACK_PKT:
657 		return true;
658 	default:
659 		return false;
660 	}
661 }
662 
h5_prepare_pkt(struct hci_uart * hu,u8 pkt_type,const u8 * data,size_t len)663 static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
664 				      const u8 *data, size_t len)
665 {
666 	struct h5 *h5 = hu->priv;
667 	struct sk_buff *nskb;
668 	u8 hdr[4];
669 	int i;
670 
671 	if (!valid_packet_type(pkt_type)) {
672 		BT_ERR("Unknown packet type %u", pkt_type);
673 		return NULL;
674 	}
675 
676 	/*
677 	 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
678 	 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
679 	 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
680 	 * delimiters at start and end).
681 	 */
682 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
683 	if (!nskb)
684 		return NULL;
685 
686 	hci_skb_pkt_type(nskb) = pkt_type;
687 
688 	h5_slip_delim(nskb);
689 
690 	hdr[0] = h5->tx_ack << 3;
691 	clear_bit(H5_TX_ACK_REQ, &h5->flags);
692 
693 	/* Reliable packet? */
694 	if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
695 		hdr[0] |= 1 << 7;
696 		hdr[0] |= h5->tx_seq;
697 		h5->tx_seq = (h5->tx_seq + 1) % 8;
698 	}
699 
700 	hdr[1] = pkt_type | ((len & 0x0f) << 4);
701 	hdr[2] = len >> 4;
702 	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
703 
704 	BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
705 	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
706 	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
707 	       H5_HDR_LEN(hdr));
708 
709 	for (i = 0; i < 4; i++)
710 		h5_slip_one_byte(nskb, hdr[i]);
711 
712 	for (i = 0; i < len; i++)
713 		h5_slip_one_byte(nskb, data[i]);
714 
715 	h5_slip_delim(nskb);
716 
717 	return nskb;
718 }
719 
h5_dequeue(struct hci_uart * hu)720 static struct sk_buff *h5_dequeue(struct hci_uart *hu)
721 {
722 	struct h5 *h5 = hu->priv;
723 	unsigned long flags;
724 	struct sk_buff *skb, *nskb;
725 
726 	if (h5->sleep != H5_AWAKE) {
727 		const unsigned char wakeup_req[] = { 0x05, 0xfa };
728 
729 		if (h5->sleep == H5_WAKING_UP)
730 			return NULL;
731 
732 		h5->sleep = H5_WAKING_UP;
733 		BT_DBG("Sending wakeup request");
734 
735 		mod_timer(&h5->timer, jiffies + HZ / 100);
736 		return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
737 	}
738 
739 	skb = skb_dequeue(&h5->unrel);
740 	if (skb) {
741 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
742 				      skb->data, skb->len);
743 		if (nskb) {
744 			kfree_skb(skb);
745 			return nskb;
746 		}
747 
748 		skb_queue_head(&h5->unrel, skb);
749 		BT_ERR("Could not dequeue pkt because alloc_skb failed");
750 	}
751 
752 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
753 
754 	if (h5->unack.qlen >= h5->tx_win)
755 		goto unlock;
756 
757 	skb = skb_dequeue(&h5->rel);
758 	if (skb) {
759 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
760 				      skb->data, skb->len);
761 		if (nskb) {
762 			__skb_queue_tail(&h5->unack, skb);
763 			mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
764 			spin_unlock_irqrestore(&h5->unack.lock, flags);
765 			return nskb;
766 		}
767 
768 		skb_queue_head(&h5->rel, skb);
769 		BT_ERR("Could not dequeue pkt because alloc_skb failed");
770 	}
771 
772 unlock:
773 	spin_unlock_irqrestore(&h5->unack.lock, flags);
774 
775 	if (test_bit(H5_TX_ACK_REQ, &h5->flags))
776 		return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
777 
778 	return NULL;
779 }
780 
h5_flush(struct hci_uart * hu)781 static int h5_flush(struct hci_uart *hu)
782 {
783 	BT_DBG("hu %p", hu);
784 	return 0;
785 }
786 
787 static const struct hci_uart_proto h5p = {
788 	.id		= HCI_UART_3WIRE,
789 	.name		= "Three-wire (H5)",
790 	.open		= h5_open,
791 	.close		= h5_close,
792 	.setup		= h5_setup,
793 	.recv		= h5_recv,
794 	.enqueue	= h5_enqueue,
795 	.dequeue	= h5_dequeue,
796 	.flush		= h5_flush,
797 };
798 
h5_serdev_probe(struct serdev_device * serdev)799 static int h5_serdev_probe(struct serdev_device *serdev)
800 {
801 	const struct acpi_device_id *match;
802 	struct device *dev = &serdev->dev;
803 	struct h5 *h5;
804 
805 	h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
806 	if (!h5)
807 		return -ENOMEM;
808 
809 	set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
810 
811 	h5->hu = &h5->serdev_hu;
812 	h5->serdev_hu.serdev = serdev;
813 	serdev_device_set_drvdata(serdev, h5);
814 
815 	if (has_acpi_companion(dev)) {
816 		match = acpi_match_device(dev->driver->acpi_match_table, dev);
817 		if (!match)
818 			return -ENODEV;
819 
820 		h5->vnd = (const struct h5_vnd *)match->driver_data;
821 		h5->id  = (char *)match->id;
822 
823 		if (h5->vnd->acpi_gpio_map)
824 			devm_acpi_dev_add_driver_gpios(dev,
825 						       h5->vnd->acpi_gpio_map);
826 	}
827 
828 	h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
829 	if (IS_ERR(h5->enable_gpio))
830 		return PTR_ERR(h5->enable_gpio);
831 
832 	h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
833 						       GPIOD_OUT_LOW);
834 	if (IS_ERR(h5->device_wake_gpio))
835 		return PTR_ERR(h5->device_wake_gpio);
836 
837 	return hci_uart_register_device(&h5->serdev_hu, &h5p);
838 }
839 
h5_serdev_remove(struct serdev_device * serdev)840 static void h5_serdev_remove(struct serdev_device *serdev)
841 {
842 	struct h5 *h5 = serdev_device_get_drvdata(serdev);
843 
844 	hci_uart_unregister_device(&h5->serdev_hu);
845 }
846 
847 #ifdef CONFIG_BT_HCIUART_RTL
h5_btrtl_setup(struct h5 * h5)848 static int h5_btrtl_setup(struct h5 *h5)
849 {
850 	struct btrtl_device_info *btrtl_dev;
851 	struct sk_buff *skb;
852 	__le32 baudrate_data;
853 	u32 device_baudrate;
854 	unsigned int controller_baudrate;
855 	bool flow_control;
856 	int err;
857 
858 	btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
859 	if (IS_ERR(btrtl_dev))
860 		return PTR_ERR(btrtl_dev);
861 
862 	err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
863 				      &controller_baudrate, &device_baudrate,
864 				      &flow_control);
865 	if (err)
866 		goto out_free;
867 
868 	baudrate_data = cpu_to_le32(device_baudrate);
869 	skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
870 			     &baudrate_data, HCI_INIT_TIMEOUT);
871 	if (IS_ERR(skb)) {
872 		rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
873 		err = PTR_ERR(skb);
874 		goto out_free;
875 	} else {
876 		kfree_skb(skb);
877 	}
878 	/* Give the device some time to set up the new baudrate. */
879 	usleep_range(10000, 20000);
880 
881 	serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
882 	serdev_device_set_flow_control(h5->hu->serdev, flow_control);
883 
884 	err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
885 	/* Give the device some time before the hci-core sends it a reset */
886 	usleep_range(10000, 20000);
887 
888 	/* Enable controller to do both LE scan and BR/EDR inquiry
889 	 * simultaneously.
890 	 */
891 	set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
892 
893 out_free:
894 	btrtl_free(btrtl_dev);
895 
896 	return err;
897 }
898 
h5_btrtl_open(struct h5 * h5)899 static void h5_btrtl_open(struct h5 *h5)
900 {
901 	/* Devices always start with these fixed parameters */
902 	serdev_device_set_flow_control(h5->hu->serdev, false);
903 	serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
904 	serdev_device_set_baudrate(h5->hu->serdev, 115200);
905 
906 	/* The controller needs up to 500ms to wakeup */
907 	gpiod_set_value_cansleep(h5->enable_gpio, 1);
908 	gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
909 	msleep(500);
910 }
911 
h5_btrtl_close(struct h5 * h5)912 static void h5_btrtl_close(struct h5 *h5)
913 {
914 	gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
915 	gpiod_set_value_cansleep(h5->enable_gpio, 0);
916 }
917 
918 static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
919 static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
920 static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
921 static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
922 	{ "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
923 	{ "enable-gpios", &btrtl_enable_gpios, 1 },
924 	{ "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
925 	{},
926 };
927 
928 static struct h5_vnd rtl_vnd = {
929 	.setup		= h5_btrtl_setup,
930 	.open		= h5_btrtl_open,
931 	.close		= h5_btrtl_close,
932 	.acpi_gpio_map	= acpi_btrtl_gpios,
933 };
934 #endif
935 
936 #ifdef CONFIG_ACPI
937 static const struct acpi_device_id h5_acpi_match[] = {
938 #ifdef CONFIG_BT_HCIUART_RTL
939 	{ "OBDA8723", (kernel_ulong_t)&rtl_vnd },
940 #endif
941 	{ },
942 };
943 MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
944 #endif
945 
946 static struct serdev_device_driver h5_serdev_driver = {
947 	.probe = h5_serdev_probe,
948 	.remove = h5_serdev_remove,
949 	.driver = {
950 		.name = "hci_uart_h5",
951 		.acpi_match_table = ACPI_PTR(h5_acpi_match),
952 	},
953 };
954 
h5_init(void)955 int __init h5_init(void)
956 {
957 	serdev_device_driver_register(&h5_serdev_driver);
958 	return hci_uart_register_proto(&h5p);
959 }
960 
h5_deinit(void)961 int __exit h5_deinit(void)
962 {
963 	serdev_device_driver_unregister(&h5_serdev_driver);
964 	return hci_uart_unregister_proto(&h5p);
965 }
966