1 /****************************************************************************** 2 * ring.h 3 * 4 * Shared producer-consumer ring macros. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Tim Deegan and Andrew Warfield November 2004. 25 */ 26 27 #ifndef __XEN_PUBLIC_IO_RING_H__ 28 #define __XEN_PUBLIC_IO_RING_H__ 29 30 /* 31 * When #include'ing this header, you need to provide the following 32 * declaration upfront: 33 * - standard integers types (uint8_t, uint16_t, etc) 34 * They are provided by stdint.h of the standard headers. 35 * 36 * In addition, if you intend to use the FLEX macros, you also need to 37 * provide the following, before invoking the FLEX macros: 38 * - size_t 39 * - memcpy 40 * - grant_ref_t 41 * These declarations are provided by string.h of the standard headers, 42 * and grant_table.h from the Xen public headers. 43 */ 44 45 #include <xen/interface/grant_table.h> 46 47 typedef unsigned int RING_IDX; 48 49 /* Round a 32-bit unsigned constant down to the nearest power of two. */ 50 #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) 51 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) 52 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) 53 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) 54 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) 55 56 /* 57 * Calculate size of a shared ring, given the total available space for the 58 * ring and indexes (_sz), and the name tag of the request/response structure. 59 * A ring contains as many entries as will fit, rounded down to the nearest 60 * power of two (so we can mask with (size-1) to loop around). 61 */ 62 #define __CONST_RING_SIZE(_s, _sz) \ 63 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 64 sizeof(((struct _s##_sring *)0)->ring[0]))) 65 /* 66 * The same for passing in an actual pointer instead of a name tag. 67 */ 68 #define __RING_SIZE(_s, _sz) \ 69 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 70 71 /* 72 * Macros to make the correct C datatypes for a new kind of ring. 73 * 74 * To make a new ring datatype, you need to have two message structures, 75 * let's say request_t, and response_t already defined. 76 * 77 * In a header where you want the ring datatype declared, you then do: 78 * 79 * DEFINE_RING_TYPES(mytag, request_t, response_t); 80 * 81 * These expand out to give you a set of types, as you can see below. 82 * The most important of these are: 83 * 84 * mytag_sring_t - The shared ring. 85 * mytag_front_ring_t - The 'front' half of the ring. 86 * mytag_back_ring_t - The 'back' half of the ring. 87 * 88 * To initialize a ring in your code you need to know the location and size 89 * of the shared memory area (PAGE_SIZE, for instance). To initialise 90 * the front half: 91 * 92 * mytag_front_ring_t front_ring; 93 * SHARED_RING_INIT((mytag_sring_t *)shared_page); 94 * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 95 * 96 * Initializing the back follows similarly (note that only the front 97 * initializes the shared ring): 98 * 99 * mytag_back_ring_t back_ring; 100 * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 101 */ 102 103 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ 104 \ 105 /* Shared ring entry */ \ 106 union __name##_sring_entry { \ 107 __req_t req; \ 108 __rsp_t rsp; \ 109 }; \ 110 \ 111 /* Shared ring page */ \ 112 struct __name##_sring { \ 113 RING_IDX req_prod, req_event; \ 114 RING_IDX rsp_prod, rsp_event; \ 115 uint8_t __pad[48]; \ 116 union __name##_sring_entry ring[1]; /* variable-length */ \ 117 }; \ 118 \ 119 /* "Front" end's private variables */ \ 120 struct __name##_front_ring { \ 121 RING_IDX req_prod_pvt; \ 122 RING_IDX rsp_cons; \ 123 unsigned int nr_ents; \ 124 struct __name##_sring *sring; \ 125 }; \ 126 \ 127 /* "Back" end's private variables */ \ 128 struct __name##_back_ring { \ 129 RING_IDX rsp_prod_pvt; \ 130 RING_IDX req_cons; \ 131 unsigned int nr_ents; \ 132 struct __name##_sring *sring; \ 133 }; \ 134 \ 135 /* 136 * Macros for manipulating rings. 137 * 138 * FRONT_RING_whatever works on the "front end" of a ring: here 139 * requests are pushed on to the ring and responses taken off it. 140 * 141 * BACK_RING_whatever works on the "back end" of a ring: here 142 * requests are taken off the ring and responses put on. 143 * 144 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 145 * This is OK in 1-for-1 request-response situations where the 146 * requestor (front end) never has more than RING_SIZE()-1 147 * outstanding requests. 148 */ 149 150 /* Initialising empty rings */ 151 #define SHARED_RING_INIT(_s) do { \ 152 (_s)->req_prod = (_s)->rsp_prod = 0; \ 153 (_s)->req_event = (_s)->rsp_event = 1; \ 154 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ 155 } while(0) 156 157 #define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ 158 (_r)->req_prod_pvt = (_i); \ 159 (_r)->rsp_cons = (_i); \ 160 (_r)->nr_ents = __RING_SIZE(_s, __size); \ 161 (_r)->sring = (_s); \ 162 } while (0) 163 164 #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) 165 166 #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ 167 (_r)->rsp_prod_pvt = (_i); \ 168 (_r)->req_cons = (_i); \ 169 (_r)->nr_ents = __RING_SIZE(_s, __size); \ 170 (_r)->sring = (_s); \ 171 } while (0) 172 173 #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) 174 175 /* How big is this ring? */ 176 #define RING_SIZE(_r) \ 177 ((_r)->nr_ents) 178 179 /* Number of free requests (for use on front side only). */ 180 #define RING_FREE_REQUESTS(_r) \ 181 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) 182 183 /* Test if there is an empty slot available on the front ring. 184 * (This is only meaningful from the front. ) 185 */ 186 #define RING_FULL(_r) \ 187 (RING_FREE_REQUESTS(_r) == 0) 188 189 /* Test if there are outstanding messages to be processed on a ring. */ 190 #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 191 ((_r)->sring->rsp_prod - (_r)->rsp_cons) 192 193 #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ 194 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ 195 unsigned int rsp = RING_SIZE(_r) - \ 196 ((_r)->req_cons - (_r)->rsp_prod_pvt); \ 197 req < rsp ? req : rsp; \ 198 }) 199 200 /* Direct access to individual ring elements, by index. */ 201 #define RING_GET_REQUEST(_r, _idx) \ 202 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 203 204 #define RING_GET_RESPONSE(_r, _idx) \ 205 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 206 207 /* 208 * Get a local copy of a request/response. 209 * 210 * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is 211 * done on a local copy that cannot be modified by the other end. 212 * 213 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 214 * to be ineffective where dest is a struct which consists of only bitfields. 215 */ 216 #define RING_COPY_(type, r, idx, dest) do { \ 217 /* Use volatile to force the copy into dest. */ \ 218 *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \ 219 } while (0) 220 221 #define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) 222 #define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) 223 224 /* Loop termination condition: Would the specified index overflow the ring? */ 225 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 226 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 227 228 /* Ill-behaved frontend determination: Can there be this many requests? */ 229 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ 230 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) 231 232 /* Ill-behaved backend determination: Can there be this many responses? */ 233 #define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ 234 (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) 235 236 #define RING_PUSH_REQUESTS(_r) do { \ 237 virt_wmb(); /* back sees requests /before/ updated producer index */\ 238 (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 239 } while (0) 240 241 #define RING_PUSH_RESPONSES(_r) do { \ 242 virt_wmb(); /* front sees resps /before/ updated producer index */ \ 243 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ 244 } while (0) 245 246 /* 247 * Notification hold-off (req_event and rsp_event): 248 * 249 * When queueing requests or responses on a shared ring, it may not always be 250 * necessary to notify the remote end. For example, if requests are in flight 251 * in a backend, the front may be able to queue further requests without 252 * notifying the back (if the back checks for new requests when it queues 253 * responses). 254 * 255 * When enqueuing requests or responses: 256 * 257 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument 258 * is a boolean return value. True indicates that the receiver requires an 259 * asynchronous notification. 260 * 261 * After dequeuing requests or responses (before sleeping the connection): 262 * 263 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). 264 * The second argument is a boolean return value. True indicates that there 265 * are pending messages on the ring (i.e., the connection should not be put 266 * to sleep). 267 * 268 * These macros will set the req_event/rsp_event field to trigger a 269 * notification on the very next message that is enqueued. If you want to 270 * create batches of work (i.e., only receive a notification after several 271 * messages have been enqueued) then you will need to create a customised 272 * version of the FINAL_CHECK macro in your own code, which sets the event 273 * field appropriately. 274 */ 275 276 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ 277 RING_IDX __old = (_r)->sring->req_prod; \ 278 RING_IDX __new = (_r)->req_prod_pvt; \ 279 virt_wmb(); /* back sees requests /before/ updated producer index */\ 280 (_r)->sring->req_prod = __new; \ 281 virt_mb(); /* back sees new requests /before/ we check req_event */ \ 282 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ 283 (RING_IDX)(__new - __old)); \ 284 } while (0) 285 286 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ 287 RING_IDX __old = (_r)->sring->rsp_prod; \ 288 RING_IDX __new = (_r)->rsp_prod_pvt; \ 289 virt_wmb(); /* front sees resps /before/ updated producer index */ \ 290 (_r)->sring->rsp_prod = __new; \ 291 virt_mb(); /* front sees new resps /before/ we check rsp_event */ \ 292 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ 293 (RING_IDX)(__new - __old)); \ 294 } while (0) 295 296 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ 297 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 298 if (_work_to_do) break; \ 299 (_r)->sring->req_event = (_r)->req_cons + 1; \ 300 virt_mb(); \ 301 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 302 } while (0) 303 304 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ 305 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 306 if (_work_to_do) break; \ 307 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ 308 virt_mb(); \ 309 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 310 } while (0) 311 312 313 /* 314 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and 315 * functions to check if there is data on the ring, and to read and 316 * write to them. 317 * 318 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but 319 * does not define the indexes page. As different protocols can have 320 * extensions to the basic format, this macro allow them to define their 321 * own struct. 322 * 323 * XEN_FLEX_RING_SIZE 324 * Convenience macro to calculate the size of one of the two rings 325 * from the overall order. 326 * 327 * $NAME_mask 328 * Function to apply the size mask to an index, to reduce the index 329 * within the range [0-size]. 330 * 331 * $NAME_read_packet 332 * Function to read data from the ring. The amount of data to read is 333 * specified by the "size" argument. 334 * 335 * $NAME_write_packet 336 * Function to write data to the ring. The amount of data to write is 337 * specified by the "size" argument. 338 * 339 * $NAME_get_ring_ptr 340 * Convenience function that returns a pointer to read/write to the 341 * ring at the right location. 342 * 343 * $NAME_data_intf 344 * Indexes page, shared between frontend and backend. It also 345 * contains the array of grant refs. 346 * 347 * $NAME_queued 348 * Function to calculate how many bytes are currently on the ring, 349 * ready to be read. It can also be used to calculate how much free 350 * space is currently on the ring (XEN_FLEX_RING_SIZE() - 351 * $NAME_queued()). 352 */ 353 354 #ifndef XEN_PAGE_SHIFT 355 /* The PAGE_SIZE for ring protocols and hypercall interfaces is always 356 * 4K, regardless of the architecture, and page granularity chosen by 357 * operating systems. 358 */ 359 #define XEN_PAGE_SHIFT 12 360 #endif 361 #define XEN_FLEX_RING_SIZE(order) \ 362 (1UL << ((order) + XEN_PAGE_SHIFT - 1)) 363 364 #define DEFINE_XEN_FLEX_RING(name) \ 365 static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ 366 { \ 367 return idx & (ring_size - 1); \ 368 } \ 369 \ 370 static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ 371 RING_IDX idx, \ 372 RING_IDX ring_size) \ 373 { \ 374 return buf + name##_mask(idx, ring_size); \ 375 } \ 376 \ 377 static inline void name##_read_packet(void *opaque, \ 378 const unsigned char *buf, \ 379 size_t size, \ 380 RING_IDX masked_prod, \ 381 RING_IDX *masked_cons, \ 382 RING_IDX ring_size) \ 383 { \ 384 if (*masked_cons < masked_prod || \ 385 size <= ring_size - *masked_cons) { \ 386 memcpy(opaque, buf + *masked_cons, size); \ 387 } else { \ 388 memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ 389 memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ 390 size - (ring_size - *masked_cons)); \ 391 } \ 392 *masked_cons = name##_mask(*masked_cons + size, ring_size); \ 393 } \ 394 \ 395 static inline void name##_write_packet(unsigned char *buf, \ 396 const void *opaque, \ 397 size_t size, \ 398 RING_IDX *masked_prod, \ 399 RING_IDX masked_cons, \ 400 RING_IDX ring_size) \ 401 { \ 402 if (*masked_prod < masked_cons || \ 403 size <= ring_size - *masked_prod) { \ 404 memcpy(buf + *masked_prod, opaque, size); \ 405 } else { \ 406 memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ 407 memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ 408 size - (ring_size - *masked_prod)); \ 409 } \ 410 *masked_prod = name##_mask(*masked_prod + size, ring_size); \ 411 } \ 412 \ 413 static inline RING_IDX name##_queued(RING_IDX prod, \ 414 RING_IDX cons, \ 415 RING_IDX ring_size) \ 416 { \ 417 RING_IDX size; \ 418 \ 419 if (prod == cons) \ 420 return 0; \ 421 \ 422 prod = name##_mask(prod, ring_size); \ 423 cons = name##_mask(cons, ring_size); \ 424 \ 425 if (prod == cons) \ 426 return ring_size; \ 427 \ 428 if (prod > cons) \ 429 size = prod - cons; \ 430 else \ 431 size = ring_size - (cons - prod); \ 432 return size; \ 433 } \ 434 \ 435 struct name##_data { \ 436 unsigned char *in; /* half of the allocation */ \ 437 unsigned char *out; /* half of the allocation */ \ 438 } 439 440 #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ 441 struct name##_data_intf { \ 442 RING_IDX in_cons, in_prod; \ 443 \ 444 uint8_t pad1[56]; \ 445 \ 446 RING_IDX out_cons, out_prod; \ 447 \ 448 uint8_t pad2[56]; \ 449 \ 450 RING_IDX ring_order; \ 451 grant_ref_t ref[]; \ 452 }; \ 453 DEFINE_XEN_FLEX_RING(name) 454 455 #endif /* __XEN_PUBLIC_IO_RING_H__ */ 456