1 /*
2  * Intel MIC Platform Software Stack (MPSS)
3  *
4  * Copyright(c) 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, version 2, as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * Intel SCIF driver.
16  *
17  */
18 #include "scif_main.h"
19 #include "scif_map.h"
20 
21 /*
22  * struct scif_dma_comp_cb - SCIF DMA completion callback
23  *
24  * @dma_completion_func: DMA completion callback
25  * @cb_cookie: DMA completion callback cookie
26  * @temp_buf: Temporary buffer
27  * @temp_buf_to_free: Temporary buffer to be freed
28  * @is_cache: Is a kmem_cache allocated buffer
29  * @dst_offset: Destination registration offset
30  * @dst_window: Destination registration window
31  * @len: Length of the temp buffer
32  * @temp_phys: DMA address of the temp buffer
33  * @sdev: The SCIF device
34  * @header_padding: padding for cache line alignment
35  */
36 struct scif_dma_comp_cb {
37 	void (*dma_completion_func)(void *cookie);
38 	void *cb_cookie;
39 	u8 *temp_buf;
40 	u8 *temp_buf_to_free;
41 	bool is_cache;
42 	s64 dst_offset;
43 	struct scif_window *dst_window;
44 	size_t len;
45 	dma_addr_t temp_phys;
46 	struct scif_dev *sdev;
47 	int header_padding;
48 };
49 
50 /**
51  * struct scif_copy_work - Work for DMA copy
52  *
53  * @src_offset: Starting source offset
54  * @dst_offset: Starting destination offset
55  * @src_window: Starting src registered window
56  * @dst_window: Starting dst registered window
57  * @loopback: true if this is a loopback DMA transfer
58  * @len: Length of the transfer
59  * @comp_cb: DMA copy completion callback
60  * @remote_dev: The remote SCIF peer device
61  * @fence_type: polling or interrupt based
62  * @ordered: is this a tail byte ordered DMA transfer
63  */
64 struct scif_copy_work {
65 	s64 src_offset;
66 	s64 dst_offset;
67 	struct scif_window *src_window;
68 	struct scif_window *dst_window;
69 	int loopback;
70 	size_t len;
71 	struct scif_dma_comp_cb   *comp_cb;
72 	struct scif_dev	*remote_dev;
73 	int fence_type;
74 	bool ordered;
75 };
76 
77 /**
78  * scif_reserve_dma_chan:
79  * @ep: Endpoint Descriptor.
80  *
81  * This routine reserves a DMA channel for a particular
82  * endpoint. All DMA transfers for an endpoint are always
83  * programmed on the same DMA channel.
84  */
scif_reserve_dma_chan(struct scif_endpt * ep)85 int scif_reserve_dma_chan(struct scif_endpt *ep)
86 {
87 	int err = 0;
88 	struct scif_dev *scifdev;
89 	struct scif_hw_dev *sdev;
90 	struct dma_chan *chan;
91 
92 	/* Loopback DMAs are not supported on the management node */
93 	if (!scif_info.nodeid && scifdev_self(ep->remote_dev))
94 		return 0;
95 	if (scif_info.nodeid)
96 		scifdev = &scif_dev[0];
97 	else
98 		scifdev = ep->remote_dev;
99 	sdev = scifdev->sdev;
100 	if (!sdev->num_dma_ch)
101 		return -ENODEV;
102 	chan = sdev->dma_ch[scifdev->dma_ch_idx];
103 	scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch;
104 	mutex_lock(&ep->rma_info.rma_lock);
105 	ep->rma_info.dma_chan = chan;
106 	mutex_unlock(&ep->rma_info.rma_lock);
107 	return err;
108 }
109 
110 #ifdef CONFIG_MMU_NOTIFIER
111 /**
112  * scif_rma_destroy_tcw:
113  *
114  * This routine destroys temporary cached windows
115  */
116 static
__scif_rma_destroy_tcw(struct scif_mmu_notif * mmn,u64 start,u64 len)117 void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn,
118 			    u64 start, u64 len)
119 {
120 	struct list_head *item, *tmp;
121 	struct scif_window *window;
122 	u64 start_va, end_va;
123 	u64 end = start + len;
124 
125 	if (end <= start)
126 		return;
127 
128 	list_for_each_safe(item, tmp, &mmn->tc_reg_list) {
129 		window = list_entry(item, struct scif_window, list);
130 		if (!len)
131 			break;
132 		start_va = window->va_for_temp;
133 		end_va = start_va + (window->nr_pages << PAGE_SHIFT);
134 		if (start < start_va && end <= start_va)
135 			break;
136 		if (start >= end_va)
137 			continue;
138 		__scif_rma_destroy_tcw_helper(window);
139 	}
140 }
141 
scif_rma_destroy_tcw(struct scif_mmu_notif * mmn,u64 start,u64 len)142 static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len)
143 {
144 	struct scif_endpt *ep = mmn->ep;
145 
146 	spin_lock(&ep->rma_info.tc_lock);
147 	__scif_rma_destroy_tcw(mmn, start, len);
148 	spin_unlock(&ep->rma_info.tc_lock);
149 }
150 
scif_rma_destroy_tcw_ep(struct scif_endpt * ep)151 static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
152 {
153 	struct list_head *item, *tmp;
154 	struct scif_mmu_notif *mmn;
155 
156 	list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
157 		mmn = list_entry(item, struct scif_mmu_notif, list);
158 		scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
159 	}
160 }
161 
__scif_rma_destroy_tcw_ep(struct scif_endpt * ep)162 static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep)
163 {
164 	struct list_head *item, *tmp;
165 	struct scif_mmu_notif *mmn;
166 
167 	spin_lock(&ep->rma_info.tc_lock);
168 	list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) {
169 		mmn = list_entry(item, struct scif_mmu_notif, list);
170 		__scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
171 	}
172 	spin_unlock(&ep->rma_info.tc_lock);
173 }
174 
scif_rma_tc_can_cache(struct scif_endpt * ep,size_t cur_bytes)175 static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
176 {
177 	if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit)
178 		return false;
179 	if ((atomic_read(&ep->rma_info.tcw_total_pages)
180 			+ (cur_bytes >> PAGE_SHIFT)) >
181 			scif_info.rma_tc_limit) {
182 		dev_info(scif_info.mdev.this_device,
183 			 "%s %d total=%d, current=%zu reached max\n",
184 			 __func__, __LINE__,
185 			 atomic_read(&ep->rma_info.tcw_total_pages),
186 			 (1 + (cur_bytes >> PAGE_SHIFT)));
187 		scif_rma_destroy_tcw_invalid();
188 		__scif_rma_destroy_tcw_ep(ep);
189 	}
190 	return true;
191 }
192 
scif_mmu_notifier_release(struct mmu_notifier * mn,struct mm_struct * mm)193 static void scif_mmu_notifier_release(struct mmu_notifier *mn,
194 				      struct mm_struct *mm)
195 {
196 	struct scif_mmu_notif	*mmn;
197 
198 	mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
199 	scif_rma_destroy_tcw(mmn, 0, ULONG_MAX);
200 	schedule_work(&scif_info.misc_work);
201 }
202 
scif_mmu_notifier_invalidate_range_start(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end,bool blockable)203 static int scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
204 						     struct mm_struct *mm,
205 						     unsigned long start,
206 						     unsigned long end,
207 						     bool blockable)
208 {
209 	struct scif_mmu_notif	*mmn;
210 
211 	mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier);
212 	scif_rma_destroy_tcw(mmn, start, end - start);
213 
214 	return 0;
215 }
216 
scif_mmu_notifier_invalidate_range_end(struct mmu_notifier * mn,struct mm_struct * mm,unsigned long start,unsigned long end)217 static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
218 						   struct mm_struct *mm,
219 						   unsigned long start,
220 						   unsigned long end)
221 {
222 	/*
223 	 * Nothing to do here, everything needed was done in
224 	 * invalidate_range_start.
225 	 */
226 }
227 
228 static const struct mmu_notifier_ops scif_mmu_notifier_ops = {
229 	.release = scif_mmu_notifier_release,
230 	.clear_flush_young = NULL,
231 	.invalidate_range_start = scif_mmu_notifier_invalidate_range_start,
232 	.invalidate_range_end = scif_mmu_notifier_invalidate_range_end};
233 
scif_ep_unregister_mmu_notifier(struct scif_endpt * ep)234 static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep)
235 {
236 	struct scif_endpt_rma_info *rma = &ep->rma_info;
237 	struct scif_mmu_notif *mmn = NULL;
238 	struct list_head *item, *tmp;
239 
240 	mutex_lock(&ep->rma_info.mmn_lock);
241 	list_for_each_safe(item, tmp, &rma->mmn_list) {
242 		mmn = list_entry(item, struct scif_mmu_notif, list);
243 		mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm);
244 		list_del(item);
245 		kfree(mmn);
246 	}
247 	mutex_unlock(&ep->rma_info.mmn_lock);
248 }
249 
scif_init_mmu_notifier(struct scif_mmu_notif * mmn,struct mm_struct * mm,struct scif_endpt * ep)250 static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn,
251 				   struct mm_struct *mm, struct scif_endpt *ep)
252 {
253 	mmn->ep = ep;
254 	mmn->mm = mm;
255 	mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops;
256 	INIT_LIST_HEAD(&mmn->list);
257 	INIT_LIST_HEAD(&mmn->tc_reg_list);
258 }
259 
260 static struct scif_mmu_notif *
scif_find_mmu_notifier(struct mm_struct * mm,struct scif_endpt_rma_info * rma)261 scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
262 {
263 	struct scif_mmu_notif *mmn;
264 
265 	list_for_each_entry(mmn, &rma->mmn_list, list)
266 		if (mmn->mm == mm)
267 			return mmn;
268 	return NULL;
269 }
270 
271 static struct scif_mmu_notif *
scif_add_mmu_notifier(struct mm_struct * mm,struct scif_endpt * ep)272 scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
273 {
274 	struct scif_mmu_notif *mmn
275 		 = kzalloc(sizeof(*mmn), GFP_KERNEL);
276 
277 	if (!mmn)
278 		return ERR_PTR(-ENOMEM);
279 
280 	scif_init_mmu_notifier(mmn, current->mm, ep);
281 	if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
282 		kfree(mmn);
283 		return ERR_PTR(-EBUSY);
284 	}
285 	list_add(&mmn->list, &ep->rma_info.mmn_list);
286 	return mmn;
287 }
288 
289 /*
290  * Called from the misc thread to destroy temporary cached windows and
291  * unregister the MMU notifier for the SCIF endpoint.
292  */
scif_mmu_notif_handler(struct work_struct * work)293 void scif_mmu_notif_handler(struct work_struct *work)
294 {
295 	struct list_head *pos, *tmpq;
296 	struct scif_endpt *ep;
297 restart:
298 	scif_rma_destroy_tcw_invalid();
299 	spin_lock(&scif_info.rmalock);
300 	list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) {
301 		ep = list_entry(pos, struct scif_endpt, mmu_list);
302 		list_del(&ep->mmu_list);
303 		spin_unlock(&scif_info.rmalock);
304 		scif_rma_destroy_tcw_ep(ep);
305 		scif_ep_unregister_mmu_notifier(ep);
306 		goto restart;
307 	}
308 	spin_unlock(&scif_info.rmalock);
309 }
310 
scif_is_set_reg_cache(int flags)311 static bool scif_is_set_reg_cache(int flags)
312 {
313 	return !!(flags & SCIF_RMA_USECACHE);
314 }
315 #else
316 static struct scif_mmu_notif *
scif_find_mmu_notifier(struct mm_struct * mm,struct scif_endpt_rma_info * rma)317 scif_find_mmu_notifier(struct mm_struct *mm,
318 		       struct scif_endpt_rma_info *rma)
319 {
320 	return NULL;
321 }
322 
323 static struct scif_mmu_notif *
scif_add_mmu_notifier(struct mm_struct * mm,struct scif_endpt * ep)324 scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
325 {
326 	return NULL;
327 }
328 
scif_mmu_notif_handler(struct work_struct * work)329 void scif_mmu_notif_handler(struct work_struct *work)
330 {
331 }
332 
scif_is_set_reg_cache(int flags)333 static bool scif_is_set_reg_cache(int flags)
334 {
335 	return false;
336 }
337 
scif_rma_tc_can_cache(struct scif_endpt * ep,size_t cur_bytes)338 static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes)
339 {
340 	return false;
341 }
342 #endif
343 
344 /**
345  * scif_register_temp:
346  * @epd: End Point Descriptor.
347  * @addr: virtual address to/from which to copy
348  * @len: length of range to copy
349  * @out_offset: computed offset returned by reference.
350  * @out_window: allocated registered window returned by reference.
351  *
352  * Create a temporary registered window. The peer will not know about this
353  * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's.
354  */
355 static int
scif_register_temp(scif_epd_t epd,unsigned long addr,size_t len,int prot,off_t * out_offset,struct scif_window ** out_window)356 scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot,
357 		   off_t *out_offset, struct scif_window **out_window)
358 {
359 	struct scif_endpt *ep = (struct scif_endpt *)epd;
360 	int err;
361 	scif_pinned_pages_t pinned_pages;
362 	size_t aligned_len;
363 
364 	aligned_len = ALIGN(len, PAGE_SIZE);
365 
366 	err = __scif_pin_pages((void *)(addr & PAGE_MASK),
367 			       aligned_len, &prot, 0, &pinned_pages);
368 	if (err)
369 		return err;
370 
371 	pinned_pages->prot = prot;
372 
373 	/* Compute the offset for this registration */
374 	err = scif_get_window_offset(ep, 0, 0,
375 				     aligned_len >> PAGE_SHIFT,
376 				     (s64 *)out_offset);
377 	if (err)
378 		goto error_unpin;
379 
380 	/* Allocate and prepare self registration window */
381 	*out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT,
382 					*out_offset, true);
383 	if (!*out_window) {
384 		scif_free_window_offset(ep, NULL, *out_offset);
385 		err = -ENOMEM;
386 		goto error_unpin;
387 	}
388 
389 	(*out_window)->pinned_pages = pinned_pages;
390 	(*out_window)->nr_pages = pinned_pages->nr_pages;
391 	(*out_window)->prot = pinned_pages->prot;
392 
393 	(*out_window)->va_for_temp = addr & PAGE_MASK;
394 	err = scif_map_window(ep->remote_dev, *out_window);
395 	if (err) {
396 		/* Something went wrong! Rollback */
397 		scif_destroy_window(ep, *out_window);
398 		*out_window = NULL;
399 	} else {
400 		*out_offset |= (addr - (*out_window)->va_for_temp);
401 	}
402 	return err;
403 error_unpin:
404 	if (err)
405 		dev_err(&ep->remote_dev->sdev->dev,
406 			"%s %d err %d\n", __func__, __LINE__, err);
407 	scif_unpin_pages(pinned_pages);
408 	return err;
409 }
410 
411 #define SCIF_DMA_TO (3 * HZ)
412 
413 /*
414  * scif_sync_dma - Program a DMA without an interrupt descriptor
415  *
416  * @dev - The address of the pointer to the device instance used
417  * for DMA registration.
418  * @chan - DMA channel to be used.
419  * @sync_wait: Wait for DMA to complete?
420  *
421  * Return 0 on success and -errno on error.
422  */
scif_sync_dma(struct scif_hw_dev * sdev,struct dma_chan * chan,bool sync_wait)423 static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan,
424 			 bool sync_wait)
425 {
426 	int err = 0;
427 	struct dma_async_tx_descriptor *tx = NULL;
428 	enum dma_ctrl_flags flags = DMA_PREP_FENCE;
429 	dma_cookie_t cookie;
430 	struct dma_device *ddev;
431 
432 	if (!chan) {
433 		err = -EIO;
434 		dev_err(&sdev->dev, "%s %d err %d\n",
435 			__func__, __LINE__, err);
436 		return err;
437 	}
438 	ddev = chan->device;
439 
440 	tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
441 	if (!tx) {
442 		err = -ENOMEM;
443 		dev_err(&sdev->dev, "%s %d err %d\n",
444 			__func__, __LINE__, err);
445 		goto release;
446 	}
447 	cookie = tx->tx_submit(tx);
448 
449 	if (dma_submit_error(cookie)) {
450 		err = -ENOMEM;
451 		dev_err(&sdev->dev, "%s %d err %d\n",
452 			__func__, __LINE__, err);
453 		goto release;
454 	}
455 	if (!sync_wait) {
456 		dma_async_issue_pending(chan);
457 	} else {
458 		if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) {
459 			err = 0;
460 		} else {
461 			err = -EIO;
462 			dev_err(&sdev->dev, "%s %d err %d\n",
463 				__func__, __LINE__, err);
464 		}
465 	}
466 release:
467 	return err;
468 }
469 
scif_dma_callback(void * arg)470 static void scif_dma_callback(void *arg)
471 {
472 	struct completion *done = (struct completion *)arg;
473 
474 	complete(done);
475 }
476 
477 #define SCIF_DMA_SYNC_WAIT true
478 #define SCIF_DMA_POLL BIT(0)
479 #define SCIF_DMA_INTR BIT(1)
480 
481 /*
482  * scif_async_dma - Program a DMA with an interrupt descriptor
483  *
484  * @dev - The address of the pointer to the device instance used
485  * for DMA registration.
486  * @chan - DMA channel to be used.
487  * Return 0 on success and -errno on error.
488  */
scif_async_dma(struct scif_hw_dev * sdev,struct dma_chan * chan)489 static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan)
490 {
491 	int err = 0;
492 	struct dma_device *ddev;
493 	struct dma_async_tx_descriptor *tx = NULL;
494 	enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
495 	DECLARE_COMPLETION_ONSTACK(done_wait);
496 	dma_cookie_t cookie;
497 	enum dma_status status;
498 
499 	if (!chan) {
500 		err = -EIO;
501 		dev_err(&sdev->dev, "%s %d err %d\n",
502 			__func__, __LINE__, err);
503 		return err;
504 	}
505 	ddev = chan->device;
506 
507 	tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags);
508 	if (!tx) {
509 		err = -ENOMEM;
510 		dev_err(&sdev->dev, "%s %d err %d\n",
511 			__func__, __LINE__, err);
512 		goto release;
513 	}
514 	reinit_completion(&done_wait);
515 	tx->callback = scif_dma_callback;
516 	tx->callback_param = &done_wait;
517 	cookie = tx->tx_submit(tx);
518 
519 	if (dma_submit_error(cookie)) {
520 		err = -ENOMEM;
521 		dev_err(&sdev->dev, "%s %d err %d\n",
522 			__func__, __LINE__, err);
523 		goto release;
524 	}
525 	dma_async_issue_pending(chan);
526 
527 	err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO);
528 	if (!err) {
529 		err = -EIO;
530 		dev_err(&sdev->dev, "%s %d err %d\n",
531 			__func__, __LINE__, err);
532 		goto release;
533 	}
534 	err = 0;
535 	status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
536 	if (status != DMA_COMPLETE) {
537 		err = -EIO;
538 		dev_err(&sdev->dev, "%s %d err %d\n",
539 			__func__, __LINE__, err);
540 		goto release;
541 	}
542 release:
543 	return err;
544 }
545 
546 /*
547  * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular
548  * DMA channel via polling.
549  *
550  * @sdev - The SCIF device
551  * @chan - DMA channel
552  * Return 0 on success and -errno on error.
553  */
scif_drain_dma_poll(struct scif_hw_dev * sdev,struct dma_chan * chan)554 static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan)
555 {
556 	if (!chan)
557 		return -EINVAL;
558 	return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT);
559 }
560 
561 /*
562  * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular
563  * DMA channel via interrupt based blocking wait.
564  *
565  * @sdev - The SCIF device
566  * @chan - DMA channel
567  * Return 0 on success and -errno on error.
568  */
scif_drain_dma_intr(struct scif_hw_dev * sdev,struct dma_chan * chan)569 int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan)
570 {
571 	if (!chan)
572 		return -EINVAL;
573 	return scif_async_dma(sdev, chan);
574 }
575 
576 /**
577  * scif_rma_destroy_windows:
578  *
579  * This routine destroys all windows queued for cleanup
580  */
scif_rma_destroy_windows(void)581 void scif_rma_destroy_windows(void)
582 {
583 	struct list_head *item, *tmp;
584 	struct scif_window *window;
585 	struct scif_endpt *ep;
586 	struct dma_chan *chan;
587 
588 	might_sleep();
589 restart:
590 	spin_lock(&scif_info.rmalock);
591 	list_for_each_safe(item, tmp, &scif_info.rma) {
592 		window = list_entry(item, struct scif_window,
593 				    list);
594 		ep = (struct scif_endpt *)window->ep;
595 		chan = ep->rma_info.dma_chan;
596 
597 		list_del_init(&window->list);
598 		spin_unlock(&scif_info.rmalock);
599 		if (!chan || !scifdev_alive(ep) ||
600 		    !scif_drain_dma_intr(ep->remote_dev->sdev,
601 					 ep->rma_info.dma_chan))
602 			/* Remove window from global list */
603 			window->unreg_state = OP_COMPLETED;
604 		else
605 			dev_warn(&ep->remote_dev->sdev->dev,
606 				 "DMA engine hung?\n");
607 		if (window->unreg_state == OP_COMPLETED) {
608 			if (window->type == SCIF_WINDOW_SELF)
609 				scif_destroy_window(ep, window);
610 			else
611 				scif_destroy_remote_window(window);
612 			atomic_dec(&ep->rma_info.tw_refcount);
613 		}
614 		goto restart;
615 	}
616 	spin_unlock(&scif_info.rmalock);
617 }
618 
619 /**
620  * scif_rma_destroy_tcw:
621  *
622  * This routine destroys temporary cached registered windows
623  * which have been queued for cleanup.
624  */
scif_rma_destroy_tcw_invalid(void)625 void scif_rma_destroy_tcw_invalid(void)
626 {
627 	struct list_head *item, *tmp;
628 	struct scif_window *window;
629 	struct scif_endpt *ep;
630 	struct dma_chan *chan;
631 
632 	might_sleep();
633 restart:
634 	spin_lock(&scif_info.rmalock);
635 	list_for_each_safe(item, tmp, &scif_info.rma_tc) {
636 		window = list_entry(item, struct scif_window, list);
637 		ep = (struct scif_endpt *)window->ep;
638 		chan = ep->rma_info.dma_chan;
639 		list_del_init(&window->list);
640 		spin_unlock(&scif_info.rmalock);
641 		mutex_lock(&ep->rma_info.rma_lock);
642 		if (!chan || !scifdev_alive(ep) ||
643 		    !scif_drain_dma_intr(ep->remote_dev->sdev,
644 					 ep->rma_info.dma_chan)) {
645 			atomic_sub(window->nr_pages,
646 				   &ep->rma_info.tcw_total_pages);
647 			scif_destroy_window(ep, window);
648 			atomic_dec(&ep->rma_info.tcw_refcount);
649 		} else {
650 			dev_warn(&ep->remote_dev->sdev->dev,
651 				 "DMA engine hung?\n");
652 		}
653 		mutex_unlock(&ep->rma_info.rma_lock);
654 		goto restart;
655 	}
656 	spin_unlock(&scif_info.rmalock);
657 }
658 
659 static inline
_get_local_va(off_t off,struct scif_window * window,size_t len)660 void *_get_local_va(off_t off, struct scif_window *window, size_t len)
661 {
662 	int page_nr = (off - window->offset) >> PAGE_SHIFT;
663 	off_t page_off = off & ~PAGE_MASK;
664 	void *va = NULL;
665 
666 	if (window->type == SCIF_WINDOW_SELF) {
667 		struct page **pages = window->pinned_pages->pages;
668 
669 		va = page_address(pages[page_nr]) + page_off;
670 	}
671 	return va;
672 }
673 
674 static inline
ioremap_remote(off_t off,struct scif_window * window,size_t len,struct scif_dev * dev,struct scif_window_iter * iter)675 void *ioremap_remote(off_t off, struct scif_window *window,
676 		     size_t len, struct scif_dev *dev,
677 		     struct scif_window_iter *iter)
678 {
679 	dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter);
680 
681 	/*
682 	 * If the DMA address is not card relative then we need the DMA
683 	 * addresses to be an offset into the bar. The aperture base was already
684 	 * added so subtract it here since scif_ioremap is going to add it again
685 	 */
686 	if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER &&
687 	    dev->sdev->aper && !dev->sdev->card_rel_da)
688 		phys = phys - dev->sdev->aper->pa;
689 	return scif_ioremap(phys, len, dev);
690 }
691 
692 static inline void
iounmap_remote(void * virt,size_t size,struct scif_copy_work * work)693 iounmap_remote(void *virt, size_t size, struct scif_copy_work *work)
694 {
695 	scif_iounmap(virt, size, work->remote_dev);
696 }
697 
698 /*
699  * Takes care of ordering issue caused by
700  * 1. Hardware:  Only in the case of cpu copy from mgmt node to card
701  * because of WC memory.
702  * 2. Software: If memcpy reorders copy instructions for optimization.
703  * This could happen at both mgmt node and card.
704  */
705 static inline void
scif_ordered_memcpy_toio(char * dst,const char * src,size_t count)706 scif_ordered_memcpy_toio(char *dst, const char *src, size_t count)
707 {
708 	if (!count)
709 		return;
710 
711 	memcpy_toio((void __iomem __force *)dst, src, --count);
712 	/* Order the last byte with the previous stores */
713 	wmb();
714 	*(dst + count) = *(src + count);
715 }
716 
scif_unaligned_cpy_toio(char * dst,const char * src,size_t count,bool ordered)717 static inline void scif_unaligned_cpy_toio(char *dst, const char *src,
718 					   size_t count, bool ordered)
719 {
720 	if (ordered)
721 		scif_ordered_memcpy_toio(dst, src, count);
722 	else
723 		memcpy_toio((void __iomem __force *)dst, src, count);
724 }
725 
726 static inline
scif_ordered_memcpy_fromio(char * dst,const char * src,size_t count)727 void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count)
728 {
729 	if (!count)
730 		return;
731 
732 	memcpy_fromio(dst, (void __iomem __force *)src, --count);
733 	/* Order the last byte with the previous loads */
734 	rmb();
735 	*(dst + count) = *(src + count);
736 }
737 
scif_unaligned_cpy_fromio(char * dst,const char * src,size_t count,bool ordered)738 static inline void scif_unaligned_cpy_fromio(char *dst, const char *src,
739 					     size_t count, bool ordered)
740 {
741 	if (ordered)
742 		scif_ordered_memcpy_fromio(dst, src, count);
743 	else
744 		memcpy_fromio(dst, (void __iomem __force *)src, count);
745 }
746 
747 #define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0)
748 
749 /*
750  * scif_off_to_dma_addr:
751  * Obtain the dma_addr given the window and the offset.
752  * @window: Registered window.
753  * @off: Window offset.
754  * @nr_bytes: Return the number of contiguous bytes till next DMA addr index.
755  * @index: Return the index of the dma_addr array found.
756  * @start_off: start offset of index of the dma addr array found.
757  * The nr_bytes provides the callee an estimate of the maximum possible
758  * DMA xfer possible while the index/start_off provide faster lookups
759  * for the next iteration.
760  */
scif_off_to_dma_addr(struct scif_window * window,s64 off,size_t * nr_bytes,struct scif_window_iter * iter)761 dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off,
762 				size_t *nr_bytes, struct scif_window_iter *iter)
763 {
764 	int i, page_nr;
765 	s64 start, end;
766 	off_t page_off;
767 
768 	if (window->nr_pages == window->nr_contig_chunks) {
769 		page_nr = (off - window->offset) >> PAGE_SHIFT;
770 		page_off = off & ~PAGE_MASK;
771 
772 		if (nr_bytes)
773 			*nr_bytes = PAGE_SIZE - page_off;
774 		return window->dma_addr[page_nr] | page_off;
775 	}
776 	if (iter) {
777 		i = iter->index;
778 		start = iter->offset;
779 	} else {
780 		i =  0;
781 		start =  window->offset;
782 	}
783 	for (; i < window->nr_contig_chunks; i++) {
784 		end = start + (window->num_pages[i] << PAGE_SHIFT);
785 		if (off >= start && off < end) {
786 			if (iter) {
787 				iter->index = i;
788 				iter->offset = start;
789 			}
790 			if (nr_bytes)
791 				*nr_bytes = end - off;
792 			return (window->dma_addr[i] + (off - start));
793 		}
794 		start += (window->num_pages[i] << PAGE_SHIFT);
795 	}
796 	dev_err(scif_info.mdev.this_device,
797 		"%s %d BUG. Addr not found? window %p off 0x%llx\n",
798 		__func__, __LINE__, window, off);
799 	return SCIF_RMA_ERROR_CODE;
800 }
801 
802 /*
803  * Copy between rma window and temporary buffer
804  */
scif_rma_local_cpu_copy(s64 offset,struct scif_window * window,u8 * temp,size_t rem_len,bool to_temp)805 static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
806 				    u8 *temp, size_t rem_len, bool to_temp)
807 {
808 	void *window_virt;
809 	size_t loop_len;
810 	int offset_in_page;
811 	s64 end_offset;
812 
813 	offset_in_page = offset & ~PAGE_MASK;
814 	loop_len = PAGE_SIZE - offset_in_page;
815 
816 	if (rem_len < loop_len)
817 		loop_len = rem_len;
818 
819 	window_virt = _get_local_va(offset, window, loop_len);
820 	if (!window_virt)
821 		return;
822 	if (to_temp)
823 		memcpy(temp, window_virt, loop_len);
824 	else
825 		memcpy(window_virt, temp, loop_len);
826 
827 	offset += loop_len;
828 	temp += loop_len;
829 	rem_len -= loop_len;
830 
831 	end_offset = window->offset +
832 		(window->nr_pages << PAGE_SHIFT);
833 	while (rem_len) {
834 		if (offset == end_offset) {
835 			window = list_next_entry(window, list);
836 			end_offset = window->offset +
837 				(window->nr_pages << PAGE_SHIFT);
838 		}
839 		loop_len = min(PAGE_SIZE, rem_len);
840 		window_virt = _get_local_va(offset, window, loop_len);
841 		if (!window_virt)
842 			return;
843 		if (to_temp)
844 			memcpy(temp, window_virt, loop_len);
845 		else
846 			memcpy(window_virt, temp, loop_len);
847 		offset	+= loop_len;
848 		temp	+= loop_len;
849 		rem_len	-= loop_len;
850 	}
851 }
852 
853 /**
854  * scif_rma_completion_cb:
855  * @data: RMA cookie
856  *
857  * RMA interrupt completion callback.
858  */
scif_rma_completion_cb(void * data)859 static void scif_rma_completion_cb(void *data)
860 {
861 	struct scif_dma_comp_cb *comp_cb = data;
862 
863 	/* Free DMA Completion CB. */
864 	if (comp_cb->dst_window)
865 		scif_rma_local_cpu_copy(comp_cb->dst_offset,
866 					comp_cb->dst_window,
867 					comp_cb->temp_buf +
868 					comp_cb->header_padding,
869 					comp_cb->len, false);
870 	scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev,
871 			  SCIF_KMEM_UNALIGNED_BUF_SIZE);
872 	if (comp_cb->is_cache)
873 		kmem_cache_free(unaligned_cache,
874 				comp_cb->temp_buf_to_free);
875 	else
876 		kfree(comp_cb->temp_buf_to_free);
877 }
878 
879 /* Copies between temporary buffer and offsets provided in work */
880 static int
scif_rma_list_dma_copy_unaligned(struct scif_copy_work * work,u8 * temp,struct dma_chan * chan,bool src_local)881 scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
882 				 u8 *temp, struct dma_chan *chan,
883 				 bool src_local)
884 {
885 	struct scif_dma_comp_cb *comp_cb = work->comp_cb;
886 	dma_addr_t window_dma_addr, temp_dma_addr;
887 	dma_addr_t temp_phys = comp_cb->temp_phys;
888 	size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len;
889 	int offset_in_ca, ret = 0;
890 	s64 end_offset, offset;
891 	struct scif_window *window;
892 	void *window_virt_addr;
893 	size_t tail_len;
894 	struct dma_async_tx_descriptor *tx;
895 	struct dma_device *dev = chan->device;
896 	dma_cookie_t cookie;
897 
898 	if (src_local) {
899 		offset = work->dst_offset;
900 		window = work->dst_window;
901 	} else {
902 		offset = work->src_offset;
903 		window = work->src_window;
904 	}
905 
906 	offset_in_ca = offset & (L1_CACHE_BYTES - 1);
907 	if (offset_in_ca) {
908 		loop_len = L1_CACHE_BYTES - offset_in_ca;
909 		loop_len = min(loop_len, remaining_len);
910 		window_virt_addr = ioremap_remote(offset, window,
911 						  loop_len,
912 						  work->remote_dev,
913 						  NULL);
914 		if (!window_virt_addr)
915 			return -ENOMEM;
916 		if (src_local)
917 			scif_unaligned_cpy_toio(window_virt_addr, temp,
918 						loop_len,
919 						work->ordered &&
920 						!(remaining_len - loop_len));
921 		else
922 			scif_unaligned_cpy_fromio(temp, window_virt_addr,
923 						  loop_len, work->ordered &&
924 						  !(remaining_len - loop_len));
925 		iounmap_remote(window_virt_addr, loop_len, work);
926 
927 		offset += loop_len;
928 		temp += loop_len;
929 		temp_phys += loop_len;
930 		remaining_len -= loop_len;
931 	}
932 
933 	offset_in_ca = offset & ~PAGE_MASK;
934 	end_offset = window->offset +
935 		(window->nr_pages << PAGE_SHIFT);
936 
937 	tail_len = remaining_len & (L1_CACHE_BYTES - 1);
938 	remaining_len -= tail_len;
939 	while (remaining_len) {
940 		if (offset == end_offset) {
941 			window = list_next_entry(window, list);
942 			end_offset = window->offset +
943 				(window->nr_pages << PAGE_SHIFT);
944 		}
945 		if (scif_is_mgmt_node())
946 			temp_dma_addr = temp_phys;
947 		else
948 			/* Fix if we ever enable IOMMU on the card */
949 			temp_dma_addr = (dma_addr_t)virt_to_phys(temp);
950 		window_dma_addr = scif_off_to_dma_addr(window, offset,
951 						       &nr_contig_bytes,
952 						       NULL);
953 		loop_len = min(nr_contig_bytes, remaining_len);
954 		if (src_local) {
955 			if (work->ordered && !tail_len &&
956 			    !(remaining_len - loop_len) &&
957 			    loop_len != L1_CACHE_BYTES) {
958 				/*
959 				 * Break up the last chunk of the transfer into
960 				 * two steps. if there is no tail to guarantee
961 				 * DMA ordering. SCIF_DMA_POLLING inserts
962 				 * a status update descriptor in step 1 which
963 				 * acts as a double sided synchronization fence
964 				 * for the DMA engine to ensure that the last
965 				 * cache line in step 2 is updated last.
966 				 */
967 				/* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
968 				tx =
969 				dev->device_prep_dma_memcpy(chan,
970 							    window_dma_addr,
971 							    temp_dma_addr,
972 							    loop_len -
973 							    L1_CACHE_BYTES,
974 							    DMA_PREP_FENCE);
975 				if (!tx) {
976 					ret = -ENOMEM;
977 					goto err;
978 				}
979 				cookie = tx->tx_submit(tx);
980 				if (dma_submit_error(cookie)) {
981 					ret = -ENOMEM;
982 					goto err;
983 				}
984 				dma_async_issue_pending(chan);
985 				offset += (loop_len - L1_CACHE_BYTES);
986 				temp_dma_addr += (loop_len - L1_CACHE_BYTES);
987 				window_dma_addr += (loop_len - L1_CACHE_BYTES);
988 				remaining_len -= (loop_len - L1_CACHE_BYTES);
989 				loop_len = remaining_len;
990 
991 				/* Step 2) DMA: L1_CACHE_BYTES */
992 				tx =
993 				dev->device_prep_dma_memcpy(chan,
994 							    window_dma_addr,
995 							    temp_dma_addr,
996 							    loop_len, 0);
997 				if (!tx) {
998 					ret = -ENOMEM;
999 					goto err;
1000 				}
1001 				cookie = tx->tx_submit(tx);
1002 				if (dma_submit_error(cookie)) {
1003 					ret = -ENOMEM;
1004 					goto err;
1005 				}
1006 				dma_async_issue_pending(chan);
1007 			} else {
1008 				tx =
1009 				dev->device_prep_dma_memcpy(chan,
1010 							    window_dma_addr,
1011 							    temp_dma_addr,
1012 							    loop_len, 0);
1013 				if (!tx) {
1014 					ret = -ENOMEM;
1015 					goto err;
1016 				}
1017 				cookie = tx->tx_submit(tx);
1018 				if (dma_submit_error(cookie)) {
1019 					ret = -ENOMEM;
1020 					goto err;
1021 				}
1022 				dma_async_issue_pending(chan);
1023 			}
1024 		} else {
1025 			tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr,
1026 					window_dma_addr, loop_len, 0);
1027 			if (!tx) {
1028 				ret = -ENOMEM;
1029 				goto err;
1030 			}
1031 			cookie = tx->tx_submit(tx);
1032 			if (dma_submit_error(cookie)) {
1033 				ret = -ENOMEM;
1034 				goto err;
1035 			}
1036 			dma_async_issue_pending(chan);
1037 		}
1038 		if (ret < 0)
1039 			goto err;
1040 		offset += loop_len;
1041 		temp += loop_len;
1042 		temp_phys += loop_len;
1043 		remaining_len -= loop_len;
1044 		offset_in_ca = 0;
1045 	}
1046 	if (tail_len) {
1047 		if (offset == end_offset) {
1048 			window = list_next_entry(window, list);
1049 			end_offset = window->offset +
1050 				(window->nr_pages << PAGE_SHIFT);
1051 		}
1052 		window_virt_addr = ioremap_remote(offset, window, tail_len,
1053 						  work->remote_dev,
1054 						  NULL);
1055 		if (!window_virt_addr)
1056 			return -ENOMEM;
1057 		/*
1058 		 * The CPU copy for the tail bytes must be initiated only once
1059 		 * previous DMA transfers for this endpoint have completed
1060 		 * to guarantee ordering.
1061 		 */
1062 		if (work->ordered) {
1063 			struct scif_dev *rdev = work->remote_dev;
1064 
1065 			ret = scif_drain_dma_intr(rdev->sdev, chan);
1066 			if (ret)
1067 				return ret;
1068 		}
1069 		if (src_local)
1070 			scif_unaligned_cpy_toio(window_virt_addr, temp,
1071 						tail_len, work->ordered);
1072 		else
1073 			scif_unaligned_cpy_fromio(temp, window_virt_addr,
1074 						  tail_len, work->ordered);
1075 		iounmap_remote(window_virt_addr, tail_len, work);
1076 	}
1077 	tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT);
1078 	if (!tx) {
1079 		ret = -ENOMEM;
1080 		return ret;
1081 	}
1082 	tx->callback = &scif_rma_completion_cb;
1083 	tx->callback_param = comp_cb;
1084 	cookie = tx->tx_submit(tx);
1085 
1086 	if (dma_submit_error(cookie)) {
1087 		ret = -ENOMEM;
1088 		return ret;
1089 	}
1090 	dma_async_issue_pending(chan);
1091 	return 0;
1092 err:
1093 	dev_err(scif_info.mdev.this_device,
1094 		"%s %d Desc Prog Failed ret %d\n",
1095 		__func__, __LINE__, ret);
1096 	return ret;
1097 }
1098 
1099 /*
1100  * _scif_rma_list_dma_copy_aligned:
1101  *
1102  * Traverse all the windows and perform DMA copy.
1103  */
_scif_rma_list_dma_copy_aligned(struct scif_copy_work * work,struct dma_chan * chan)1104 static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
1105 					   struct dma_chan *chan)
1106 {
1107 	dma_addr_t src_dma_addr, dst_dma_addr;
1108 	size_t loop_len, remaining_len, src_contig_bytes = 0;
1109 	size_t dst_contig_bytes = 0;
1110 	struct scif_window_iter src_win_iter;
1111 	struct scif_window_iter dst_win_iter;
1112 	s64 end_src_offset, end_dst_offset;
1113 	struct scif_window *src_window = work->src_window;
1114 	struct scif_window *dst_window = work->dst_window;
1115 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1116 	int ret = 0;
1117 	struct dma_async_tx_descriptor *tx;
1118 	struct dma_device *dev = chan->device;
1119 	dma_cookie_t cookie;
1120 
1121 	remaining_len = work->len;
1122 
1123 	scif_init_window_iter(src_window, &src_win_iter);
1124 	scif_init_window_iter(dst_window, &dst_win_iter);
1125 	end_src_offset = src_window->offset +
1126 		(src_window->nr_pages << PAGE_SHIFT);
1127 	end_dst_offset = dst_window->offset +
1128 		(dst_window->nr_pages << PAGE_SHIFT);
1129 	while (remaining_len) {
1130 		if (src_offset == end_src_offset) {
1131 			src_window = list_next_entry(src_window, list);
1132 			end_src_offset = src_window->offset +
1133 				(src_window->nr_pages << PAGE_SHIFT);
1134 			scif_init_window_iter(src_window, &src_win_iter);
1135 		}
1136 		if (dst_offset == end_dst_offset) {
1137 			dst_window = list_next_entry(dst_window, list);
1138 			end_dst_offset = dst_window->offset +
1139 				(dst_window->nr_pages << PAGE_SHIFT);
1140 			scif_init_window_iter(dst_window, &dst_win_iter);
1141 		}
1142 
1143 		/* compute dma addresses for transfer */
1144 		src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
1145 						    &src_contig_bytes,
1146 						    &src_win_iter);
1147 		dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
1148 						    &dst_contig_bytes,
1149 						    &dst_win_iter);
1150 		loop_len = min(src_contig_bytes, dst_contig_bytes);
1151 		loop_len = min(loop_len, remaining_len);
1152 		if (work->ordered && !(remaining_len - loop_len)) {
1153 			/*
1154 			 * Break up the last chunk of the transfer into two
1155 			 * steps to ensure that the last byte in step 2 is
1156 			 * updated last.
1157 			 */
1158 			/* Step 1) DMA: Body Length - 1 */
1159 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1160 							 src_dma_addr,
1161 							 loop_len - 1,
1162 							 DMA_PREP_FENCE);
1163 			if (!tx) {
1164 				ret = -ENOMEM;
1165 				goto err;
1166 			}
1167 			cookie = tx->tx_submit(tx);
1168 			if (dma_submit_error(cookie)) {
1169 				ret = -ENOMEM;
1170 				goto err;
1171 			}
1172 			src_offset += (loop_len - 1);
1173 			dst_offset += (loop_len - 1);
1174 			src_dma_addr += (loop_len - 1);
1175 			dst_dma_addr += (loop_len - 1);
1176 			remaining_len -= (loop_len - 1);
1177 			loop_len = remaining_len;
1178 
1179 			/* Step 2) DMA: 1 BYTES */
1180 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1181 					src_dma_addr, loop_len, 0);
1182 			if (!tx) {
1183 				ret = -ENOMEM;
1184 				goto err;
1185 			}
1186 			cookie = tx->tx_submit(tx);
1187 			if (dma_submit_error(cookie)) {
1188 				ret = -ENOMEM;
1189 				goto err;
1190 			}
1191 			dma_async_issue_pending(chan);
1192 		} else {
1193 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1194 					src_dma_addr, loop_len, 0);
1195 			if (!tx) {
1196 				ret = -ENOMEM;
1197 				goto err;
1198 			}
1199 			cookie = tx->tx_submit(tx);
1200 			if (dma_submit_error(cookie)) {
1201 				ret = -ENOMEM;
1202 				goto err;
1203 			}
1204 		}
1205 		src_offset += loop_len;
1206 		dst_offset += loop_len;
1207 		remaining_len -= loop_len;
1208 	}
1209 	return ret;
1210 err:
1211 	dev_err(scif_info.mdev.this_device,
1212 		"%s %d Desc Prog Failed ret %d\n",
1213 		__func__, __LINE__, ret);
1214 	return ret;
1215 }
1216 
1217 /*
1218  * scif_rma_list_dma_copy_aligned:
1219  *
1220  * Traverse all the windows and perform DMA copy.
1221  */
scif_rma_list_dma_copy_aligned(struct scif_copy_work * work,struct dma_chan * chan)1222 static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
1223 					  struct dma_chan *chan)
1224 {
1225 	dma_addr_t src_dma_addr, dst_dma_addr;
1226 	size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0;
1227 	size_t dst_contig_bytes = 0;
1228 	int src_cache_off;
1229 	s64 end_src_offset, end_dst_offset;
1230 	struct scif_window_iter src_win_iter;
1231 	struct scif_window_iter dst_win_iter;
1232 	void *src_virt, *dst_virt;
1233 	struct scif_window *src_window = work->src_window;
1234 	struct scif_window *dst_window = work->dst_window;
1235 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1236 	int ret = 0;
1237 	struct dma_async_tx_descriptor *tx;
1238 	struct dma_device *dev = chan->device;
1239 	dma_cookie_t cookie;
1240 
1241 	remaining_len = work->len;
1242 	scif_init_window_iter(src_window, &src_win_iter);
1243 	scif_init_window_iter(dst_window, &dst_win_iter);
1244 
1245 	src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
1246 	if (src_cache_off != 0) {
1247 		/* Head */
1248 		loop_len = L1_CACHE_BYTES - src_cache_off;
1249 		loop_len = min(loop_len, remaining_len);
1250 		src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
1251 		dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
1252 		if (src_window->type == SCIF_WINDOW_SELF)
1253 			src_virt = _get_local_va(src_offset, src_window,
1254 						 loop_len);
1255 		else
1256 			src_virt = ioremap_remote(src_offset, src_window,
1257 						  loop_len,
1258 						  work->remote_dev, NULL);
1259 		if (!src_virt)
1260 			return -ENOMEM;
1261 		if (dst_window->type == SCIF_WINDOW_SELF)
1262 			dst_virt = _get_local_va(dst_offset, dst_window,
1263 						 loop_len);
1264 		else
1265 			dst_virt = ioremap_remote(dst_offset, dst_window,
1266 						  loop_len,
1267 						  work->remote_dev, NULL);
1268 		if (!dst_virt) {
1269 			if (src_window->type != SCIF_WINDOW_SELF)
1270 				iounmap_remote(src_virt, loop_len, work);
1271 			return -ENOMEM;
1272 		}
1273 		if (src_window->type == SCIF_WINDOW_SELF)
1274 			scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
1275 						remaining_len == loop_len ?
1276 						work->ordered : false);
1277 		else
1278 			scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len,
1279 						  remaining_len == loop_len ?
1280 						  work->ordered : false);
1281 		if (src_window->type != SCIF_WINDOW_SELF)
1282 			iounmap_remote(src_virt, loop_len, work);
1283 		if (dst_window->type != SCIF_WINDOW_SELF)
1284 			iounmap_remote(dst_virt, loop_len, work);
1285 		src_offset += loop_len;
1286 		dst_offset += loop_len;
1287 		remaining_len -= loop_len;
1288 	}
1289 
1290 	end_src_offset = src_window->offset +
1291 		(src_window->nr_pages << PAGE_SHIFT);
1292 	end_dst_offset = dst_window->offset +
1293 		(dst_window->nr_pages << PAGE_SHIFT);
1294 	tail_len = remaining_len & (L1_CACHE_BYTES - 1);
1295 	remaining_len -= tail_len;
1296 	while (remaining_len) {
1297 		if (src_offset == end_src_offset) {
1298 			src_window = list_next_entry(src_window, list);
1299 			end_src_offset = src_window->offset +
1300 				(src_window->nr_pages << PAGE_SHIFT);
1301 			scif_init_window_iter(src_window, &src_win_iter);
1302 		}
1303 		if (dst_offset == end_dst_offset) {
1304 			dst_window = list_next_entry(dst_window, list);
1305 			end_dst_offset = dst_window->offset +
1306 				(dst_window->nr_pages << PAGE_SHIFT);
1307 			scif_init_window_iter(dst_window, &dst_win_iter);
1308 		}
1309 
1310 		/* compute dma addresses for transfer */
1311 		src_dma_addr = scif_off_to_dma_addr(src_window, src_offset,
1312 						    &src_contig_bytes,
1313 						    &src_win_iter);
1314 		dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset,
1315 						    &dst_contig_bytes,
1316 						    &dst_win_iter);
1317 		loop_len = min(src_contig_bytes, dst_contig_bytes);
1318 		loop_len = min(loop_len, remaining_len);
1319 		if (work->ordered && !tail_len &&
1320 		    !(remaining_len - loop_len)) {
1321 			/*
1322 			 * Break up the last chunk of the transfer into two
1323 			 * steps. if there is no tail to gurantee DMA ordering.
1324 			 * Passing SCIF_DMA_POLLING inserts a status update
1325 			 * descriptor in step 1 which acts as a double sided
1326 			 * synchronization fence for the DMA engine to ensure
1327 			 * that the last cache line in step 2 is updated last.
1328 			 */
1329 			/* Step 1) DMA: Body Length - L1_CACHE_BYTES. */
1330 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1331 							 src_dma_addr,
1332 							 loop_len -
1333 							 L1_CACHE_BYTES,
1334 							 DMA_PREP_FENCE);
1335 			if (!tx) {
1336 				ret = -ENOMEM;
1337 				goto err;
1338 			}
1339 			cookie = tx->tx_submit(tx);
1340 			if (dma_submit_error(cookie)) {
1341 				ret = -ENOMEM;
1342 				goto err;
1343 			}
1344 			dma_async_issue_pending(chan);
1345 			src_offset += (loop_len - L1_CACHE_BYTES);
1346 			dst_offset += (loop_len - L1_CACHE_BYTES);
1347 			src_dma_addr += (loop_len - L1_CACHE_BYTES);
1348 			dst_dma_addr += (loop_len - L1_CACHE_BYTES);
1349 			remaining_len -= (loop_len - L1_CACHE_BYTES);
1350 			loop_len = remaining_len;
1351 
1352 			/* Step 2) DMA: L1_CACHE_BYTES */
1353 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1354 							 src_dma_addr,
1355 							 loop_len, 0);
1356 			if (!tx) {
1357 				ret = -ENOMEM;
1358 				goto err;
1359 			}
1360 			cookie = tx->tx_submit(tx);
1361 			if (dma_submit_error(cookie)) {
1362 				ret = -ENOMEM;
1363 				goto err;
1364 			}
1365 			dma_async_issue_pending(chan);
1366 		} else {
1367 			tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr,
1368 							 src_dma_addr,
1369 							 loop_len, 0);
1370 			if (!tx) {
1371 				ret = -ENOMEM;
1372 				goto err;
1373 			}
1374 			cookie = tx->tx_submit(tx);
1375 			if (dma_submit_error(cookie)) {
1376 				ret = -ENOMEM;
1377 				goto err;
1378 			}
1379 			dma_async_issue_pending(chan);
1380 		}
1381 		src_offset += loop_len;
1382 		dst_offset += loop_len;
1383 		remaining_len -= loop_len;
1384 	}
1385 	remaining_len = tail_len;
1386 	if (remaining_len) {
1387 		loop_len = remaining_len;
1388 		if (src_offset == end_src_offset)
1389 			src_window = list_next_entry(src_window, list);
1390 		if (dst_offset == end_dst_offset)
1391 			dst_window = list_next_entry(dst_window, list);
1392 
1393 		src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
1394 		dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
1395 		/*
1396 		 * The CPU copy for the tail bytes must be initiated only once
1397 		 * previous DMA transfers for this endpoint have completed to
1398 		 * guarantee ordering.
1399 		 */
1400 		if (work->ordered) {
1401 			struct scif_dev *rdev = work->remote_dev;
1402 
1403 			ret = scif_drain_dma_poll(rdev->sdev, chan);
1404 			if (ret)
1405 				return ret;
1406 		}
1407 		if (src_window->type == SCIF_WINDOW_SELF)
1408 			src_virt = _get_local_va(src_offset, src_window,
1409 						 loop_len);
1410 		else
1411 			src_virt = ioremap_remote(src_offset, src_window,
1412 						  loop_len,
1413 						  work->remote_dev, NULL);
1414 		if (!src_virt)
1415 			return -ENOMEM;
1416 
1417 		if (dst_window->type == SCIF_WINDOW_SELF)
1418 			dst_virt = _get_local_va(dst_offset, dst_window,
1419 						 loop_len);
1420 		else
1421 			dst_virt = ioremap_remote(dst_offset, dst_window,
1422 						  loop_len,
1423 						  work->remote_dev, NULL);
1424 		if (!dst_virt) {
1425 			if (src_window->type != SCIF_WINDOW_SELF)
1426 				iounmap_remote(src_virt, loop_len, work);
1427 			return -ENOMEM;
1428 		}
1429 
1430 		if (src_window->type == SCIF_WINDOW_SELF)
1431 			scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len,
1432 						work->ordered);
1433 		else
1434 			scif_unaligned_cpy_fromio(dst_virt, src_virt,
1435 						  loop_len, work->ordered);
1436 		if (src_window->type != SCIF_WINDOW_SELF)
1437 			iounmap_remote(src_virt, loop_len, work);
1438 
1439 		if (dst_window->type != SCIF_WINDOW_SELF)
1440 			iounmap_remote(dst_virt, loop_len, work);
1441 		remaining_len -= loop_len;
1442 	}
1443 	return ret;
1444 err:
1445 	dev_err(scif_info.mdev.this_device,
1446 		"%s %d Desc Prog Failed ret %d\n",
1447 		__func__, __LINE__, ret);
1448 	return ret;
1449 }
1450 
1451 /*
1452  * scif_rma_list_cpu_copy:
1453  *
1454  * Traverse all the windows and perform CPU copy.
1455  */
scif_rma_list_cpu_copy(struct scif_copy_work * work)1456 static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
1457 {
1458 	void *src_virt, *dst_virt;
1459 	size_t loop_len, remaining_len;
1460 	int src_page_off, dst_page_off;
1461 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1462 	struct scif_window *src_window = work->src_window;
1463 	struct scif_window *dst_window = work->dst_window;
1464 	s64 end_src_offset, end_dst_offset;
1465 	int ret = 0;
1466 	struct scif_window_iter src_win_iter;
1467 	struct scif_window_iter dst_win_iter;
1468 
1469 	remaining_len = work->len;
1470 
1471 	scif_init_window_iter(src_window, &src_win_iter);
1472 	scif_init_window_iter(dst_window, &dst_win_iter);
1473 	while (remaining_len) {
1474 		src_page_off = src_offset & ~PAGE_MASK;
1475 		dst_page_off = dst_offset & ~PAGE_MASK;
1476 		loop_len = min(PAGE_SIZE -
1477 			       max(src_page_off, dst_page_off),
1478 			       remaining_len);
1479 
1480 		if (src_window->type == SCIF_WINDOW_SELF)
1481 			src_virt = _get_local_va(src_offset, src_window,
1482 						 loop_len);
1483 		else
1484 			src_virt = ioremap_remote(src_offset, src_window,
1485 						  loop_len,
1486 						  work->remote_dev,
1487 						  &src_win_iter);
1488 		if (!src_virt) {
1489 			ret = -ENOMEM;
1490 			goto error;
1491 		}
1492 
1493 		if (dst_window->type == SCIF_WINDOW_SELF)
1494 			dst_virt = _get_local_va(dst_offset, dst_window,
1495 						 loop_len);
1496 		else
1497 			dst_virt = ioremap_remote(dst_offset, dst_window,
1498 						  loop_len,
1499 						  work->remote_dev,
1500 						  &dst_win_iter);
1501 		if (!dst_virt) {
1502 			if (src_window->type == SCIF_WINDOW_PEER)
1503 				iounmap_remote(src_virt, loop_len, work);
1504 			ret = -ENOMEM;
1505 			goto error;
1506 		}
1507 
1508 		if (work->loopback) {
1509 			memcpy(dst_virt, src_virt, loop_len);
1510 		} else {
1511 			if (src_window->type == SCIF_WINDOW_SELF)
1512 				memcpy_toio((void __iomem __force *)dst_virt,
1513 					    src_virt, loop_len);
1514 			else
1515 				memcpy_fromio(dst_virt,
1516 					      (void __iomem __force *)src_virt,
1517 					      loop_len);
1518 		}
1519 		if (src_window->type == SCIF_WINDOW_PEER)
1520 			iounmap_remote(src_virt, loop_len, work);
1521 
1522 		if (dst_window->type == SCIF_WINDOW_PEER)
1523 			iounmap_remote(dst_virt, loop_len, work);
1524 
1525 		src_offset += loop_len;
1526 		dst_offset += loop_len;
1527 		remaining_len -= loop_len;
1528 		if (remaining_len) {
1529 			end_src_offset = src_window->offset +
1530 				(src_window->nr_pages << PAGE_SHIFT);
1531 			end_dst_offset = dst_window->offset +
1532 				(dst_window->nr_pages << PAGE_SHIFT);
1533 			if (src_offset == end_src_offset) {
1534 				src_window = list_next_entry(src_window, list);
1535 				scif_init_window_iter(src_window,
1536 						      &src_win_iter);
1537 			}
1538 			if (dst_offset == end_dst_offset) {
1539 				dst_window = list_next_entry(dst_window, list);
1540 				scif_init_window_iter(dst_window,
1541 						      &dst_win_iter);
1542 			}
1543 		}
1544 	}
1545 error:
1546 	return ret;
1547 }
1548 
scif_rma_list_dma_copy_wrapper(struct scif_endpt * epd,struct scif_copy_work * work,struct dma_chan * chan,off_t loffset)1549 static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
1550 					  struct scif_copy_work *work,
1551 					  struct dma_chan *chan, off_t loffset)
1552 {
1553 	int src_cache_off, dst_cache_off;
1554 	s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
1555 	u8 *temp = NULL;
1556 	bool src_local = true, dst_local = false;
1557 	struct scif_dma_comp_cb *comp_cb;
1558 	dma_addr_t src_dma_addr, dst_dma_addr;
1559 	int err;
1560 
1561 	if (is_dma_copy_aligned(chan->device, 1, 1, 1))
1562 		return _scif_rma_list_dma_copy_aligned(work, chan);
1563 
1564 	src_cache_off = src_offset & (L1_CACHE_BYTES - 1);
1565 	dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1);
1566 
1567 	if (dst_cache_off == src_cache_off)
1568 		return scif_rma_list_dma_copy_aligned(work, chan);
1569 
1570 	if (work->loopback)
1571 		return scif_rma_list_cpu_copy(work);
1572 	src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset);
1573 	dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset);
1574 	src_local = work->src_window->type == SCIF_WINDOW_SELF;
1575 	dst_local = work->dst_window->type == SCIF_WINDOW_SELF;
1576 
1577 	dst_local = dst_local;
1578 	/* Allocate dma_completion cb */
1579 	comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
1580 	if (!comp_cb)
1581 		goto error;
1582 
1583 	work->comp_cb = comp_cb;
1584 	comp_cb->cb_cookie = comp_cb;
1585 	comp_cb->dma_completion_func = &scif_rma_completion_cb;
1586 
1587 	if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) {
1588 		comp_cb->is_cache = false;
1589 		/* Allocate padding bytes to align to a cache line */
1590 		temp = kmalloc(work->len + (L1_CACHE_BYTES << 1),
1591 			       GFP_KERNEL);
1592 		if (!temp)
1593 			goto free_comp_cb;
1594 		comp_cb->temp_buf_to_free = temp;
1595 		/* kmalloc(..) does not guarantee cache line alignment */
1596 		if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES))
1597 			temp = PTR_ALIGN(temp, L1_CACHE_BYTES);
1598 	} else {
1599 		comp_cb->is_cache = true;
1600 		temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL);
1601 		if (!temp)
1602 			goto free_comp_cb;
1603 		comp_cb->temp_buf_to_free = temp;
1604 	}
1605 
1606 	if (src_local) {
1607 		temp += dst_cache_off;
1608 		scif_rma_local_cpu_copy(work->src_offset, work->src_window,
1609 					temp, work->len, true);
1610 	} else {
1611 		comp_cb->dst_window = work->dst_window;
1612 		comp_cb->dst_offset = work->dst_offset;
1613 		work->src_offset = work->src_offset - src_cache_off;
1614 		comp_cb->len = work->len;
1615 		work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES);
1616 		comp_cb->header_padding = src_cache_off;
1617 	}
1618 	comp_cb->temp_buf = temp;
1619 
1620 	err = scif_map_single(&comp_cb->temp_phys, temp,
1621 			      work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE);
1622 	if (err)
1623 		goto free_temp_buf;
1624 	comp_cb->sdev = work->remote_dev;
1625 	if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0)
1626 		goto free_temp_buf;
1627 	if (!src_local)
1628 		work->fence_type = SCIF_DMA_INTR;
1629 	return 0;
1630 free_temp_buf:
1631 	if (comp_cb->is_cache)
1632 		kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free);
1633 	else
1634 		kfree(comp_cb->temp_buf_to_free);
1635 free_comp_cb:
1636 	kfree(comp_cb);
1637 error:
1638 	return -ENOMEM;
1639 }
1640 
1641 /**
1642  * scif_rma_copy:
1643  * @epd: end point descriptor.
1644  * @loffset: offset in local registered address space to/from which to copy
1645  * @addr: user virtual address to/from which to copy
1646  * @len: length of range to copy
1647  * @roffset: offset in remote registered address space to/from which to copy
1648  * @flags: flags
1649  * @dir: LOCAL->REMOTE or vice versa.
1650  * @last_chunk: true if this is the last chunk of a larger transfer
1651  *
1652  * Validate parameters, check if src/dst registered ranges requested for copy
1653  * are valid and initiate either CPU or DMA copy.
1654  */
scif_rma_copy(scif_epd_t epd,off_t loffset,unsigned long addr,size_t len,off_t roffset,int flags,enum scif_rma_dir dir,bool last_chunk)1655 static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
1656 			 size_t len, off_t roffset, int flags,
1657 			 enum scif_rma_dir dir, bool last_chunk)
1658 {
1659 	struct scif_endpt *ep = (struct scif_endpt *)epd;
1660 	struct scif_rma_req remote_req;
1661 	struct scif_rma_req req;
1662 	struct scif_window *local_window = NULL;
1663 	struct scif_window *remote_window = NULL;
1664 	struct scif_copy_work copy_work;
1665 	bool loopback;
1666 	int err = 0;
1667 	struct dma_chan *chan;
1668 	struct scif_mmu_notif *mmn = NULL;
1669 	bool cache = false;
1670 	struct device *spdev;
1671 
1672 	err = scif_verify_epd(ep);
1673 	if (err)
1674 		return err;
1675 
1676 	if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE |
1677 				SCIF_RMA_SYNC | SCIF_RMA_ORDERED)))
1678 		return -EINVAL;
1679 
1680 	loopback = scifdev_self(ep->remote_dev) ? true : false;
1681 	copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ?
1682 				SCIF_DMA_POLL : 0;
1683 	copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk);
1684 
1685 	/* Use CPU for Mgmt node <-> Mgmt node copies */
1686 	if (loopback && scif_is_mgmt_node()) {
1687 		flags |= SCIF_RMA_USECPU;
1688 		copy_work.fence_type = 0x0;
1689 	}
1690 
1691 	cache = scif_is_set_reg_cache(flags);
1692 
1693 	remote_req.out_window = &remote_window;
1694 	remote_req.offset = roffset;
1695 	remote_req.nr_bytes = len;
1696 	/*
1697 	 * If transfer is from local to remote then the remote window
1698 	 * must be writeable and vice versa.
1699 	 */
1700 	remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ;
1701 	remote_req.type = SCIF_WINDOW_PARTIAL;
1702 	remote_req.head = &ep->rma_info.remote_reg_list;
1703 
1704 	spdev = scif_get_peer_dev(ep->remote_dev);
1705 	if (IS_ERR(spdev)) {
1706 		err = PTR_ERR(spdev);
1707 		return err;
1708 	}
1709 
1710 	if (addr && cache) {
1711 		mutex_lock(&ep->rma_info.mmn_lock);
1712 		mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
1713 		if (!mmn)
1714 			mmn = scif_add_mmu_notifier(current->mm, ep);
1715 		mutex_unlock(&ep->rma_info.mmn_lock);
1716 		if (IS_ERR(mmn)) {
1717 			scif_put_peer_dev(spdev);
1718 			return PTR_ERR(mmn);
1719 		}
1720 		cache = cache && !scif_rma_tc_can_cache(ep, len);
1721 	}
1722 	mutex_lock(&ep->rma_info.rma_lock);
1723 	if (addr) {
1724 		req.out_window = &local_window;
1725 		req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK),
1726 				     PAGE_SIZE);
1727 		req.va_for_temp = addr & PAGE_MASK;
1728 		req.prot = (dir == SCIF_LOCAL_TO_REMOTE ?
1729 			    VM_READ : VM_WRITE | VM_READ);
1730 		/* Does a valid local window exist? */
1731 		if (mmn) {
1732 			spin_lock(&ep->rma_info.tc_lock);
1733 			req.head = &mmn->tc_reg_list;
1734 			err = scif_query_tcw(ep, &req);
1735 			spin_unlock(&ep->rma_info.tc_lock);
1736 		}
1737 		if (!mmn || err) {
1738 			err = scif_register_temp(epd, req.va_for_temp,
1739 						 req.nr_bytes, req.prot,
1740 						 &loffset, &local_window);
1741 			if (err) {
1742 				mutex_unlock(&ep->rma_info.rma_lock);
1743 				goto error;
1744 			}
1745 			if (!cache)
1746 				goto skip_cache;
1747 			atomic_inc(&ep->rma_info.tcw_refcount);
1748 			atomic_add_return(local_window->nr_pages,
1749 					  &ep->rma_info.tcw_total_pages);
1750 			if (mmn) {
1751 				spin_lock(&ep->rma_info.tc_lock);
1752 				scif_insert_tcw(local_window,
1753 						&mmn->tc_reg_list);
1754 				spin_unlock(&ep->rma_info.tc_lock);
1755 			}
1756 		}
1757 skip_cache:
1758 		loffset = local_window->offset +
1759 				(addr - local_window->va_for_temp);
1760 	} else {
1761 		req.out_window = &local_window;
1762 		req.offset = loffset;
1763 		/*
1764 		 * If transfer is from local to remote then the self window
1765 		 * must be readable and vice versa.
1766 		 */
1767 		req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE;
1768 		req.nr_bytes = len;
1769 		req.type = SCIF_WINDOW_PARTIAL;
1770 		req.head = &ep->rma_info.reg_list;
1771 		/* Does a valid local window exist? */
1772 		err = scif_query_window(&req);
1773 		if (err) {
1774 			mutex_unlock(&ep->rma_info.rma_lock);
1775 			goto error;
1776 		}
1777 	}
1778 
1779 	/* Does a valid remote window exist? */
1780 	err = scif_query_window(&remote_req);
1781 	if (err) {
1782 		mutex_unlock(&ep->rma_info.rma_lock);
1783 		goto error;
1784 	}
1785 
1786 	/*
1787 	 * Prepare copy_work for submitting work to the DMA kernel thread
1788 	 * or CPU copy routine.
1789 	 */
1790 	copy_work.len = len;
1791 	copy_work.loopback = loopback;
1792 	copy_work.remote_dev = ep->remote_dev;
1793 	if (dir == SCIF_LOCAL_TO_REMOTE) {
1794 		copy_work.src_offset = loffset;
1795 		copy_work.src_window = local_window;
1796 		copy_work.dst_offset = roffset;
1797 		copy_work.dst_window = remote_window;
1798 	} else {
1799 		copy_work.src_offset = roffset;
1800 		copy_work.src_window = remote_window;
1801 		copy_work.dst_offset = loffset;
1802 		copy_work.dst_window = local_window;
1803 	}
1804 
1805 	if (flags & SCIF_RMA_USECPU) {
1806 		scif_rma_list_cpu_copy(&copy_work);
1807 	} else {
1808 		chan = ep->rma_info.dma_chan;
1809 		err = scif_rma_list_dma_copy_wrapper(epd, &copy_work,
1810 						     chan, loffset);
1811 	}
1812 	if (addr && !cache)
1813 		atomic_inc(&ep->rma_info.tw_refcount);
1814 
1815 	mutex_unlock(&ep->rma_info.rma_lock);
1816 
1817 	if (last_chunk) {
1818 		struct scif_dev *rdev = ep->remote_dev;
1819 
1820 		if (copy_work.fence_type == SCIF_DMA_POLL)
1821 			err = scif_drain_dma_poll(rdev->sdev,
1822 						  ep->rma_info.dma_chan);
1823 		else if (copy_work.fence_type == SCIF_DMA_INTR)
1824 			err = scif_drain_dma_intr(rdev->sdev,
1825 						  ep->rma_info.dma_chan);
1826 	}
1827 
1828 	if (addr && !cache)
1829 		scif_queue_for_cleanup(local_window, &scif_info.rma);
1830 	scif_put_peer_dev(spdev);
1831 	return err;
1832 error:
1833 	if (err) {
1834 		if (addr && local_window && !cache)
1835 			scif_destroy_window(ep, local_window);
1836 		dev_err(scif_info.mdev.this_device,
1837 			"%s %d err %d len 0x%lx\n",
1838 			__func__, __LINE__, err, len);
1839 	}
1840 	scif_put_peer_dev(spdev);
1841 	return err;
1842 }
1843 
scif_readfrom(scif_epd_t epd,off_t loffset,size_t len,off_t roffset,int flags)1844 int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len,
1845 		  off_t roffset, int flags)
1846 {
1847 	int err;
1848 
1849 	dev_dbg(scif_info.mdev.this_device,
1850 		"SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n",
1851 		epd, loffset, len, roffset, flags);
1852 	if (scif_unaligned(loffset, roffset)) {
1853 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1854 			err = scif_rma_copy(epd, loffset, 0x0,
1855 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1856 					    roffset, flags,
1857 					    SCIF_REMOTE_TO_LOCAL, false);
1858 			if (err)
1859 				goto readfrom_err;
1860 			loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1861 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1862 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1863 		}
1864 	}
1865 	err = scif_rma_copy(epd, loffset, 0x0, len,
1866 			    roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
1867 readfrom_err:
1868 	return err;
1869 }
1870 EXPORT_SYMBOL_GPL(scif_readfrom);
1871 
scif_writeto(scif_epd_t epd,off_t loffset,size_t len,off_t roffset,int flags)1872 int scif_writeto(scif_epd_t epd, off_t loffset, size_t len,
1873 		 off_t roffset, int flags)
1874 {
1875 	int err;
1876 
1877 	dev_dbg(scif_info.mdev.this_device,
1878 		"SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n",
1879 		epd, loffset, len, roffset, flags);
1880 	if (scif_unaligned(loffset, roffset)) {
1881 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1882 			err = scif_rma_copy(epd, loffset, 0x0,
1883 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1884 					    roffset, flags,
1885 					    SCIF_LOCAL_TO_REMOTE, false);
1886 			if (err)
1887 				goto writeto_err;
1888 			loffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1889 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1890 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1891 		}
1892 	}
1893 	err = scif_rma_copy(epd, loffset, 0x0, len,
1894 			    roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
1895 writeto_err:
1896 	return err;
1897 }
1898 EXPORT_SYMBOL_GPL(scif_writeto);
1899 
scif_vreadfrom(scif_epd_t epd,void * addr,size_t len,off_t roffset,int flags)1900 int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len,
1901 		   off_t roffset, int flags)
1902 {
1903 	int err;
1904 
1905 	dev_dbg(scif_info.mdev.this_device,
1906 		"SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1907 		epd, addr, len, roffset, flags);
1908 	if (scif_unaligned((off_t __force)addr, roffset)) {
1909 		if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
1910 			flags &= ~SCIF_RMA_USECACHE;
1911 
1912 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1913 			err = scif_rma_copy(epd, 0, (u64)addr,
1914 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1915 					    roffset, flags,
1916 					    SCIF_REMOTE_TO_LOCAL, false);
1917 			if (err)
1918 				goto vreadfrom_err;
1919 			addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
1920 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1921 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1922 		}
1923 	}
1924 	err = scif_rma_copy(epd, 0, (u64)addr, len,
1925 			    roffset, flags, SCIF_REMOTE_TO_LOCAL, true);
1926 vreadfrom_err:
1927 	return err;
1928 }
1929 EXPORT_SYMBOL_GPL(scif_vreadfrom);
1930 
scif_vwriteto(scif_epd_t epd,void * addr,size_t len,off_t roffset,int flags)1931 int scif_vwriteto(scif_epd_t epd, void *addr, size_t len,
1932 		  off_t roffset, int flags)
1933 {
1934 	int err;
1935 
1936 	dev_dbg(scif_info.mdev.this_device,
1937 		"SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n",
1938 		epd, addr, len, roffset, flags);
1939 	if (scif_unaligned((off_t __force)addr, roffset)) {
1940 		if (len > SCIF_MAX_UNALIGNED_BUF_SIZE)
1941 			flags &= ~SCIF_RMA_USECACHE;
1942 
1943 		while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) {
1944 			err = scif_rma_copy(epd, 0, (u64)addr,
1945 					    SCIF_MAX_UNALIGNED_BUF_SIZE,
1946 					    roffset, flags,
1947 					    SCIF_LOCAL_TO_REMOTE, false);
1948 			if (err)
1949 				goto vwriteto_err;
1950 			addr += SCIF_MAX_UNALIGNED_BUF_SIZE;
1951 			roffset += SCIF_MAX_UNALIGNED_BUF_SIZE;
1952 			len -= SCIF_MAX_UNALIGNED_BUF_SIZE;
1953 		}
1954 	}
1955 	err = scif_rma_copy(epd, 0, (u64)addr, len,
1956 			    roffset, flags, SCIF_LOCAL_TO_REMOTE, true);
1957 vwriteto_err:
1958 	return err;
1959 }
1960 EXPORT_SYMBOL_GPL(scif_vwriteto);
1961