1 /*
2  * Thunderbolt DMA configuration based mailbox support
3  *
4  * Copyright (C) 2017, Intel Corporation
5  * Authors: Michael Jamet <michael.jamet@intel.com>
6  *          Mika Westerberg <mika.westerberg@linux.intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/slab.h>
15 
16 #include "dma_port.h"
17 #include "tb_regs.h"
18 
19 #define DMA_PORT_CAP			0x3e
20 
21 #define MAIL_DATA			1
22 #define MAIL_DATA_DWORDS		16
23 
24 #define MAIL_IN				17
25 #define MAIL_IN_CMD_SHIFT		28
26 #define MAIL_IN_CMD_MASK		GENMASK(31, 28)
27 #define MAIL_IN_CMD_FLASH_WRITE		0x0
28 #define MAIL_IN_CMD_FLASH_UPDATE_AUTH	0x1
29 #define MAIL_IN_CMD_FLASH_READ		0x2
30 #define MAIL_IN_CMD_POWER_CYCLE		0x4
31 #define MAIL_IN_DWORDS_SHIFT		24
32 #define MAIL_IN_DWORDS_MASK		GENMASK(27, 24)
33 #define MAIL_IN_ADDRESS_SHIFT		2
34 #define MAIL_IN_ADDRESS_MASK		GENMASK(23, 2)
35 #define MAIL_IN_CSS			BIT(1)
36 #define MAIL_IN_OP_REQUEST		BIT(0)
37 
38 #define MAIL_OUT			18
39 #define MAIL_OUT_STATUS_RESPONSE	BIT(29)
40 #define MAIL_OUT_STATUS_CMD_SHIFT	4
41 #define MAIL_OUT_STATUS_CMD_MASK	GENMASK(7, 4)
42 #define MAIL_OUT_STATUS_MASK		GENMASK(3, 0)
43 #define MAIL_OUT_STATUS_COMPLETED	0
44 #define MAIL_OUT_STATUS_ERR_AUTH	1
45 #define MAIL_OUT_STATUS_ERR_ACCESS	2
46 
47 #define DMA_PORT_TIMEOUT		5000 /* ms */
48 #define DMA_PORT_RETRIES		3
49 
50 /**
51  * struct tb_dma_port - DMA control port
52  * @sw: Switch the DMA port belongs to
53  * @port: Switch port number where DMA capability is found
54  * @base: Start offset of the mailbox registers
55  * @buf: Temporary buffer to store a single block
56  */
57 struct tb_dma_port {
58 	struct tb_switch *sw;
59 	u8 port;
60 	u32 base;
61 	u8 *buf;
62 };
63 
64 /*
65  * When the switch is in safe mode it supports very little functionality
66  * so we don't validate that much here.
67  */
dma_port_match(const struct tb_cfg_request * req,const struct ctl_pkg * pkg)68 static bool dma_port_match(const struct tb_cfg_request *req,
69 			   const struct ctl_pkg *pkg)
70 {
71 	u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
72 
73 	if (pkg->frame.eof == TB_CFG_PKG_ERROR)
74 		return true;
75 	if (pkg->frame.eof != req->response_type)
76 		return false;
77 	if (route != tb_cfg_get_route(req->request))
78 		return false;
79 	if (pkg->frame.size != req->response_size)
80 		return false;
81 
82 	return true;
83 }
84 
dma_port_copy(struct tb_cfg_request * req,const struct ctl_pkg * pkg)85 static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
86 {
87 	memcpy(req->response, pkg->buffer, req->response_size);
88 	return true;
89 }
90 
dma_port_read(struct tb_ctl * ctl,void * buffer,u64 route,u32 port,u32 offset,u32 length,int timeout_msec)91 static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
92 			 u32 port, u32 offset, u32 length, int timeout_msec)
93 {
94 	struct cfg_read_pkg request = {
95 		.header = tb_cfg_make_header(route),
96 		.addr = {
97 			.seq = 1,
98 			.port = port,
99 			.space = TB_CFG_PORT,
100 			.offset = offset,
101 			.length = length,
102 		},
103 	};
104 	struct tb_cfg_request *req;
105 	struct cfg_write_pkg reply;
106 	struct tb_cfg_result res;
107 
108 	req = tb_cfg_request_alloc();
109 	if (!req)
110 		return -ENOMEM;
111 
112 	req->match = dma_port_match;
113 	req->copy = dma_port_copy;
114 	req->request = &request;
115 	req->request_size = sizeof(request);
116 	req->request_type = TB_CFG_PKG_READ;
117 	req->response = &reply;
118 	req->response_size = 12 + 4 * length;
119 	req->response_type = TB_CFG_PKG_READ;
120 
121 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
122 
123 	tb_cfg_request_put(req);
124 
125 	if (res.err)
126 		return res.err;
127 
128 	memcpy(buffer, &reply.data, 4 * length);
129 	return 0;
130 }
131 
dma_port_write(struct tb_ctl * ctl,const void * buffer,u64 route,u32 port,u32 offset,u32 length,int timeout_msec)132 static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
133 			  u32 port, u32 offset, u32 length, int timeout_msec)
134 {
135 	struct cfg_write_pkg request = {
136 		.header = tb_cfg_make_header(route),
137 		.addr = {
138 			.seq = 1,
139 			.port = port,
140 			.space = TB_CFG_PORT,
141 			.offset = offset,
142 			.length = length,
143 		},
144 	};
145 	struct tb_cfg_request *req;
146 	struct cfg_read_pkg reply;
147 	struct tb_cfg_result res;
148 
149 	memcpy(&request.data, buffer, length * 4);
150 
151 	req = tb_cfg_request_alloc();
152 	if (!req)
153 		return -ENOMEM;
154 
155 	req->match = dma_port_match;
156 	req->copy = dma_port_copy;
157 	req->request = &request;
158 	req->request_size = 12 + 4 * length;
159 	req->request_type = TB_CFG_PKG_WRITE;
160 	req->response = &reply;
161 	req->response_size = sizeof(reply);
162 	req->response_type = TB_CFG_PKG_WRITE;
163 
164 	res = tb_cfg_request_sync(ctl, req, timeout_msec);
165 
166 	tb_cfg_request_put(req);
167 
168 	return res.err;
169 }
170 
dma_find_port(struct tb_switch * sw)171 static int dma_find_port(struct tb_switch *sw)
172 {
173 	static const int ports[] = { 3, 5, 7 };
174 	int i;
175 
176 	/*
177 	 * The DMA (NHI) port is either 3, 5 or 7 depending on the
178 	 * controller. Try all of them.
179 	 */
180 	for (i = 0; i < ARRAY_SIZE(ports); i++) {
181 		u32 type;
182 		int ret;
183 
184 		ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
185 				    2, 1, DMA_PORT_TIMEOUT);
186 		if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
187 			return ports[i];
188 	}
189 
190 	return -ENODEV;
191 }
192 
193 /**
194  * dma_port_alloc() - Finds DMA control port from a switch pointed by route
195  * @sw: Switch from where find the DMA port
196  *
197  * Function checks if the switch NHI port supports DMA configuration
198  * based mailbox capability and if it does, allocates and initializes
199  * DMA port structure. Returns %NULL if the capabity was not found.
200  *
201  * The DMA control port is functional also when the switch is in safe
202  * mode.
203  */
dma_port_alloc(struct tb_switch * sw)204 struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
205 {
206 	struct tb_dma_port *dma;
207 	int port;
208 
209 	port = dma_find_port(sw);
210 	if (port < 0)
211 		return NULL;
212 
213 	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
214 	if (!dma)
215 		return NULL;
216 
217 	dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
218 	if (!dma->buf) {
219 		kfree(dma);
220 		return NULL;
221 	}
222 
223 	dma->sw = sw;
224 	dma->port = port;
225 	dma->base = DMA_PORT_CAP;
226 
227 	return dma;
228 }
229 
230 /**
231  * dma_port_free() - Release DMA control port structure
232  * @dma: DMA control port
233  */
dma_port_free(struct tb_dma_port * dma)234 void dma_port_free(struct tb_dma_port *dma)
235 {
236 	if (dma) {
237 		kfree(dma->buf);
238 		kfree(dma);
239 	}
240 }
241 
dma_port_wait_for_completion(struct tb_dma_port * dma,unsigned int timeout)242 static int dma_port_wait_for_completion(struct tb_dma_port *dma,
243 					unsigned int timeout)
244 {
245 	unsigned long end = jiffies + msecs_to_jiffies(timeout);
246 	struct tb_switch *sw = dma->sw;
247 
248 	do {
249 		int ret;
250 		u32 in;
251 
252 		ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
253 				    dma->base + MAIL_IN, 1, 50);
254 		if (ret) {
255 			if (ret != -ETIMEDOUT)
256 				return ret;
257 		} else if (!(in & MAIL_IN_OP_REQUEST)) {
258 			return 0;
259 		}
260 
261 		usleep_range(50, 100);
262 	} while (time_before(jiffies, end));
263 
264 	return -ETIMEDOUT;
265 }
266 
status_to_errno(u32 status)267 static int status_to_errno(u32 status)
268 {
269 	switch (status & MAIL_OUT_STATUS_MASK) {
270 	case MAIL_OUT_STATUS_COMPLETED:
271 		return 0;
272 	case MAIL_OUT_STATUS_ERR_AUTH:
273 		return -EINVAL;
274 	case MAIL_OUT_STATUS_ERR_ACCESS:
275 		return -EACCES;
276 	}
277 
278 	return -EIO;
279 }
280 
dma_port_request(struct tb_dma_port * dma,u32 in,unsigned int timeout)281 static int dma_port_request(struct tb_dma_port *dma, u32 in,
282 			    unsigned int timeout)
283 {
284 	struct tb_switch *sw = dma->sw;
285 	u32 out;
286 	int ret;
287 
288 	ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
289 			     dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
290 	if (ret)
291 		return ret;
292 
293 	ret = dma_port_wait_for_completion(dma, timeout);
294 	if (ret)
295 		return ret;
296 
297 	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
298 			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
299 	if (ret)
300 		return ret;
301 
302 	return status_to_errno(out);
303 }
304 
dma_port_flash_read_block(struct tb_dma_port * dma,u32 address,void * buf,u32 size)305 static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
306 				     void *buf, u32 size)
307 {
308 	struct tb_switch *sw = dma->sw;
309 	u32 in, dwaddress, dwords;
310 	int ret;
311 
312 	dwaddress = address / 4;
313 	dwords = size / 4;
314 
315 	in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
316 	if (dwords < MAIL_DATA_DWORDS)
317 		in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
318 	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
319 	in |= MAIL_IN_OP_REQUEST;
320 
321 	ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
322 	if (ret)
323 		return ret;
324 
325 	return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
326 			     dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
327 }
328 
dma_port_flash_write_block(struct tb_dma_port * dma,u32 address,const void * buf,u32 size)329 static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
330 				      const void *buf, u32 size)
331 {
332 	struct tb_switch *sw = dma->sw;
333 	u32 in, dwaddress, dwords;
334 	int ret;
335 
336 	dwords = size / 4;
337 
338 	/* Write the block to MAIL_DATA registers */
339 	ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
340 			    dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
341 
342 	in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
343 
344 	/* CSS header write is always done to the same magic address */
345 	if (address >= DMA_PORT_CSS_ADDRESS) {
346 		dwaddress = DMA_PORT_CSS_ADDRESS;
347 		in |= MAIL_IN_CSS;
348 	} else {
349 		dwaddress = address / 4;
350 	}
351 
352 	in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
353 	in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
354 	in |= MAIL_IN_OP_REQUEST;
355 
356 	return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
357 }
358 
359 /**
360  * dma_port_flash_read() - Read from active flash region
361  * @dma: DMA control port
362  * @address: Address relative to the start of active region
363  * @buf: Buffer where the data is read
364  * @size: Size of the buffer
365  */
dma_port_flash_read(struct tb_dma_port * dma,unsigned int address,void * buf,size_t size)366 int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
367 			void *buf, size_t size)
368 {
369 	unsigned int retries = DMA_PORT_RETRIES;
370 
371 	do {
372 		unsigned int offset;
373 		size_t nbytes;
374 		int ret;
375 
376 		offset = address & 3;
377 		nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
378 
379 		ret = dma_port_flash_read_block(dma, address, dma->buf,
380 						ALIGN(nbytes, 4));
381 		if (ret) {
382 			if (ret == -ETIMEDOUT) {
383 				if (retries--)
384 					continue;
385 				ret = -EIO;
386 			}
387 			return ret;
388 		}
389 
390 		nbytes -= offset;
391 		memcpy(buf, dma->buf + offset, nbytes);
392 
393 		size -= nbytes;
394 		address += nbytes;
395 		buf += nbytes;
396 	} while (size > 0);
397 
398 	return 0;
399 }
400 
401 /**
402  * dma_port_flash_write() - Write to non-active flash region
403  * @dma: DMA control port
404  * @address: Address relative to the start of non-active region
405  * @buf: Data to write
406  * @size: Size of the buffer
407  *
408  * Writes block of data to the non-active flash region of the switch. If
409  * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
410  * using CSS command.
411  */
dma_port_flash_write(struct tb_dma_port * dma,unsigned int address,const void * buf,size_t size)412 int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
413 			 const void *buf, size_t size)
414 {
415 	unsigned int retries = DMA_PORT_RETRIES;
416 	unsigned int offset;
417 
418 	if (address >= DMA_PORT_CSS_ADDRESS) {
419 		offset = 0;
420 		if (size > DMA_PORT_CSS_MAX_SIZE)
421 			return -E2BIG;
422 	} else {
423 		offset = address & 3;
424 		address = address & ~3;
425 	}
426 
427 	do {
428 		u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
429 		int ret;
430 
431 		memcpy(dma->buf + offset, buf, nbytes);
432 
433 		ret = dma_port_flash_write_block(dma, address, buf, nbytes);
434 		if (ret) {
435 			if (ret == -ETIMEDOUT) {
436 				if (retries--)
437 					continue;
438 				ret = -EIO;
439 			}
440 			return ret;
441 		}
442 
443 		size -= nbytes;
444 		address += nbytes;
445 		buf += nbytes;
446 	} while (size > 0);
447 
448 	return 0;
449 }
450 
451 /**
452  * dma_port_flash_update_auth() - Starts flash authenticate cycle
453  * @dma: DMA control port
454  *
455  * Starts the flash update authentication cycle. If the image in the
456  * non-active area was valid, the switch starts upgrade process where
457  * active and non-active area get swapped in the end. Caller should call
458  * dma_port_flash_update_auth_status() to get status of this command.
459  * This is because if the switch in question is root switch the
460  * thunderbolt host controller gets reset as well.
461  */
dma_port_flash_update_auth(struct tb_dma_port * dma)462 int dma_port_flash_update_auth(struct tb_dma_port *dma)
463 {
464 	u32 in;
465 
466 	in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
467 	in |= MAIL_IN_OP_REQUEST;
468 
469 	return dma_port_request(dma, in, 150);
470 }
471 
472 /**
473  * dma_port_flash_update_auth_status() - Reads status of update auth command
474  * @dma: DMA control port
475  * @status: Status code of the operation
476  *
477  * The function checks if there is status available from the last update
478  * auth command. Returns %0 if there is no status and no further
479  * action is required. If there is status, %1 is returned instead and
480  * @status holds the failure code.
481  *
482  * Negative return means there was an error reading status from the
483  * switch.
484  */
dma_port_flash_update_auth_status(struct tb_dma_port * dma,u32 * status)485 int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
486 {
487 	struct tb_switch *sw = dma->sw;
488 	u32 out, cmd;
489 	int ret;
490 
491 	ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
492 			    dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
493 	if (ret)
494 		return ret;
495 
496 	/* Check if the status relates to flash update auth */
497 	cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
498 	if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
499 		if (status)
500 			*status = out & MAIL_OUT_STATUS_MASK;
501 
502 		/* Reset is needed in any case */
503 		return 1;
504 	}
505 
506 	return 0;
507 }
508 
509 /**
510  * dma_port_power_cycle() - Power cycles the switch
511  * @dma: DMA control port
512  *
513  * Triggers power cycle to the switch.
514  */
dma_port_power_cycle(struct tb_dma_port * dma)515 int dma_port_power_cycle(struct tb_dma_port *dma)
516 {
517 	u32 in;
518 
519 	in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
520 	in |= MAIL_IN_OP_REQUEST;
521 
522 	return dma_port_request(dma, in, 150);
523 }
524