1 /******************************************************************************
2 * grant_table.h
3 *
4 * Two sets of functionality:
5 * 1. Granting foreign access to our memory reservation.
6 * 2. Accessing others' memory reservations via grant references.
7 * (i.e., mechanisms for both sender and recipient of grant references)
8 *
9 * Copyright (c) 2004-2005, K A Fraser
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37 #ifndef __ASM_GNTTAB_H__
38 #define __ASM_GNTTAB_H__
39
40 #include <asm/page.h>
41
42 #include <xen/interface/xen.h>
43 #include <xen/interface/grant_table.h>
44
45 #include <asm/xen/hypervisor.h>
46
47 #include <xen/features.h>
48 #include <xen/page.h>
49 #include <linux/mm_types.h>
50 #include <linux/page-flags.h>
51 #include <linux/kernel.h>
52
53 #define GNTTAB_RESERVED_XENSTORE 1
54
55 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
56 #define NR_GRANT_FRAMES 4
57
58 struct gnttab_free_callback {
59 struct gnttab_free_callback *next;
60 void (*fn)(void *);
61 void *arg;
62 u16 count;
63 };
64
65 struct gntab_unmap_queue_data;
66
67 typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
68
69 struct gntab_unmap_queue_data
70 {
71 struct delayed_work gnttab_work;
72 void *data;
73 gnttab_unmap_refs_done done;
74 struct gnttab_unmap_grant_ref *unmap_ops;
75 struct gnttab_unmap_grant_ref *kunmap_ops;
76 struct page **pages;
77 unsigned int count;
78 unsigned int age;
79 };
80
81 int gnttab_init(void);
82 int gnttab_suspend(void);
83 int gnttab_resume(void);
84
85 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
86 int readonly);
87
88 /*
89 * End access through the given grant reference, iff the grant entry is no
90 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
91 * use.
92 */
93 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
94
95 /*
96 * Eventually end access through the given grant reference, and once that
97 * access has been ended, free the given page too. Access will be ended
98 * immediately iff the grant entry is not in use, otherwise it will happen
99 * some time later. page may be 0, in which case no freeing will occur.
100 * Note that the granted page might still be accessed (read or write) by the
101 * other side after gnttab_end_foreign_access() returns, so even if page was
102 * specified as 0 it is not allowed to just reuse the page for other
103 * purposes immediately. gnttab_end_foreign_access() will take an additional
104 * reference to the granted page in this case, which is dropped only after
105 * the grant is no longer in use.
106 * This requires that multi page allocations for areas subject to
107 * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
108 * via free_pages_exact()) in order to avoid high order pages.
109 */
110 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
111 unsigned long page);
112
113 /*
114 * End access through the given grant reference, iff the grant entry is
115 * no longer in use. In case of success ending foreign access, the
116 * grant reference is deallocated.
117 * Return 1 if the grant entry was freed, 0 if it is still in use.
118 */
119 int gnttab_try_end_foreign_access(grant_ref_t ref);
120
121 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
122
123 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
124 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
125
126 /*
127 * operations on reserved batches of grant references
128 */
129 int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
130
131 void gnttab_free_grant_reference(grant_ref_t ref);
132
133 void gnttab_free_grant_references(grant_ref_t head);
134
135 int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
136
137 int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
138
139 void gnttab_release_grant_reference(grant_ref_t *private_head,
140 grant_ref_t release);
141
142 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
143 void (*fn)(void *), void *arg, u16 count);
144 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
145
146 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
147 unsigned long frame, int readonly);
148
149 /* Give access to the first 4K of the page */
gnttab_page_grant_foreign_access_ref_one(grant_ref_t ref,domid_t domid,struct page * page,int readonly)150 static inline void gnttab_page_grant_foreign_access_ref_one(
151 grant_ref_t ref, domid_t domid,
152 struct page *page, int readonly)
153 {
154 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
155 readonly);
156 }
157
158 void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
159 unsigned long pfn);
160
161 static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref * map,phys_addr_t addr,uint32_t flags,grant_ref_t ref,domid_t domid)162 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
163 uint32_t flags, grant_ref_t ref, domid_t domid)
164 {
165 if (flags & GNTMAP_contains_pte)
166 map->host_addr = addr;
167 else if (xen_feature(XENFEAT_auto_translated_physmap))
168 map->host_addr = __pa(addr);
169 else
170 map->host_addr = addr;
171
172 map->flags = flags;
173 map->ref = ref;
174 map->dom = domid;
175 map->status = 1; /* arbitrary positive value */
176 }
177
178 static inline void
gnttab_set_unmap_op(struct gnttab_unmap_grant_ref * unmap,phys_addr_t addr,uint32_t flags,grant_handle_t handle)179 gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
180 uint32_t flags, grant_handle_t handle)
181 {
182 if (flags & GNTMAP_contains_pte)
183 unmap->host_addr = addr;
184 else if (xen_feature(XENFEAT_auto_translated_physmap))
185 unmap->host_addr = __pa(addr);
186 else
187 unmap->host_addr = addr;
188
189 unmap->handle = handle;
190 unmap->dev_bus_addr = 0;
191 }
192
193 int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
194 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
195 unsigned long max_nr_gframes,
196 void **__shared);
197 int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
198 unsigned long max_nr_gframes,
199 grant_status_t **__shared);
200 void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
201
202 struct grant_frames {
203 xen_pfn_t *pfn;
204 unsigned int count;
205 void *vaddr;
206 };
207 extern struct grant_frames xen_auto_xlat_grant_frames;
208 unsigned int gnttab_max_grant_frames(void);
209 int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
210 void gnttab_free_auto_xlat_frames(void);
211
212 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
213
214 int gnttab_alloc_pages(int nr_pages, struct page **pages);
215 void gnttab_free_pages(int nr_pages, struct page **pages);
216
217 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
218 struct gnttab_dma_alloc_args {
219 /* Device for which DMA memory will be/was allocated. */
220 struct device *dev;
221 /* If set then DMA buffer is coherent and write-combine otherwise. */
222 bool coherent;
223
224 int nr_pages;
225 struct page **pages;
226 xen_pfn_t *frames;
227 void *vaddr;
228 dma_addr_t dev_bus_addr;
229 };
230
231 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
232 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
233 #endif
234
235 int gnttab_pages_set_private(int nr_pages, struct page **pages);
236 void gnttab_pages_clear_private(int nr_pages, struct page **pages);
237
238 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
239 struct gnttab_map_grant_ref *kmap_ops,
240 struct page **pages, unsigned int count);
241 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
242 struct gnttab_unmap_grant_ref *kunmap_ops,
243 struct page **pages, unsigned int count);
244 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
245 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
246
247
248 /* Perform a batch of grant map/copy operations. Retry every batch slot
249 * for which the hypervisor returns GNTST_eagain. This is typically due
250 * to paged out target frames.
251 *
252 * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
253 *
254 * Return value in each iand every status field of the batch guaranteed
255 * to not be GNTST_eagain.
256 */
257 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
258 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
259
260
261 struct xen_page_foreign {
262 domid_t domid;
263 grant_ref_t gref;
264 };
265
xen_page_foreign(struct page * page)266 static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
267 {
268 if (!PageForeign(page))
269 return NULL;
270 #if BITS_PER_LONG < 64
271 return (struct xen_page_foreign *)page->private;
272 #else
273 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
274 return (struct xen_page_foreign *)&page->private;
275 #endif
276 }
277
278 /* Split Linux page in chunk of the size of the grant and call fn
279 *
280 * Parameters of fn:
281 * gfn: guest frame number
282 * offset: offset in the grant
283 * len: length of the data in the grant.
284 * data: internal information
285 */
286 typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
287 unsigned int len, void *data);
288
289 void gnttab_foreach_grant_in_range(struct page *page,
290 unsigned int offset,
291 unsigned int len,
292 xen_grant_fn_t fn,
293 void *data);
294
295 /* Helper to get to call fn only on the first "grant chunk" */
gnttab_for_one_grant(struct page * page,unsigned int offset,unsigned len,xen_grant_fn_t fn,void * data)296 static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
297 unsigned len, xen_grant_fn_t fn,
298 void *data)
299 {
300 /* The first request is limited to the size of one grant */
301 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
302 len);
303
304 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
305 }
306
307 /* Get @nr_grefs grants from an array of page and call fn for each grant */
308 void gnttab_foreach_grant(struct page **pages,
309 unsigned int nr_grefs,
310 xen_grant_fn_t fn,
311 void *data);
312
313 /* Get the number of grant in a specified region
314 *
315 * start: Offset from the beginning of the first page
316 * len: total length of data (can cross multiple page)
317 */
gnttab_count_grant(unsigned int start,unsigned int len)318 static inline unsigned int gnttab_count_grant(unsigned int start,
319 unsigned int len)
320 {
321 return XEN_PFN_UP(xen_offset_in_page(start) + len);
322 }
323
324 #endif /* __ASM_GNTTAB_H__ */
325