Commit | Line | Data |
---|---|---|
4bac07c9 JF |
1 | /****************************************************************************** |
2 | * Client-facing interface for the Xenbus driver. In other words, the | |
3 | * interface between the Xenbus and the device-specific code, be it the | |
4 | * frontend or the backend of that driver. | |
5 | * | |
6 | * Copyright (C) 2005 XenSource Ltd | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License version 2 | |
10 | * as published by the Free Software Foundation; or, when distributed | |
11 | * separately from the Linux kernel or incorporated into other | |
12 | * software packages, subject to the following license: | |
13 | * | |
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
15 | * of this source file (the "Software"), to deal in the Software without | |
16 | * restriction, including without limitation the rights to use, copy, modify, | |
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
18 | * and to permit persons to whom the Software is furnished to do so, subject to | |
19 | * the following conditions: | |
20 | * | |
21 | * The above copyright notice and this permission notice shall be included in | |
22 | * all copies or substantial portions of the Software. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
30 | * IN THE SOFTWARE. | |
31 | */ | |
32 | ||
45e27161 | 33 | #include <linux/mm.h> |
5a0e3ad6 | 34 | #include <linux/slab.h> |
4bac07c9 | 35 | #include <linux/types.h> |
2c5d37d3 | 36 | #include <linux/spinlock.h> |
4bac07c9 | 37 | #include <linux/vmalloc.h> |
63c9744b | 38 | #include <linux/export.h> |
4bac07c9 | 39 | #include <asm/xen/hypervisor.h> |
cd12909c | 40 | #include <asm/xen/page.h> |
4bac07c9 JF |
41 | #include <xen/interface/xen.h> |
42 | #include <xen/interface/event_channel.h> | |
2c5d37d3 | 43 | #include <xen/balloon.h> |
4bac07c9 JF |
44 | #include <xen/events.h> |
45 | #include <xen/grant_table.h> | |
46 | #include <xen/xenbus.h> | |
2c5d37d3 DDG |
47 | #include <xen/xen.h> |
48 | ||
49 | #include "xenbus_probe.h" | |
50 | ||
51 | struct xenbus_map_node { | |
52 | struct list_head next; | |
53 | union { | |
54 | struct vm_struct *area; /* PV */ | |
55 | struct page *page; /* HVM */ | |
56 | }; | |
57 | grant_handle_t handle; | |
58 | }; | |
59 | ||
60 | static DEFINE_SPINLOCK(xenbus_valloc_lock); | |
61 | static LIST_HEAD(xenbus_valloc_pages); | |
62 | ||
63 | struct xenbus_ring_ops { | |
64 | int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); | |
65 | int (*unmap)(struct xenbus_device *dev, void *vaddr); | |
66 | }; | |
67 | ||
68 | static const struct xenbus_ring_ops *ring_ops __read_mostly; | |
4bac07c9 JF |
69 | |
70 | const char *xenbus_strstate(enum xenbus_state state) | |
71 | { | |
72 | static const char *const name[] = { | |
73 | [ XenbusStateUnknown ] = "Unknown", | |
74 | [ XenbusStateInitialising ] = "Initialising", | |
75 | [ XenbusStateInitWait ] = "InitWait", | |
76 | [ XenbusStateInitialised ] = "Initialised", | |
77 | [ XenbusStateConnected ] = "Connected", | |
78 | [ XenbusStateClosing ] = "Closing", | |
79 | [ XenbusStateClosed ] = "Closed", | |
89afb6e4 YI |
80 | [XenbusStateReconfiguring] = "Reconfiguring", |
81 | [XenbusStateReconfigured] = "Reconfigured", | |
4bac07c9 JF |
82 | }; |
83 | return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; | |
84 | } | |
85 | EXPORT_SYMBOL_GPL(xenbus_strstate); | |
86 | ||
87 | /** | |
88 | * xenbus_watch_path - register a watch | |
89 | * @dev: xenbus device | |
90 | * @path: path to watch | |
91 | * @watch: watch to register | |
92 | * @callback: callback to register | |
93 | * | |
94 | * Register a @watch on the given path, using the given xenbus_watch structure | |
95 | * for storage, and the given @callback function as the callback. Return 0 on | |
96 | * success, or -errno on error. On success, the given @path will be saved as | |
97 | * @watch->node, and remains the caller's to free. On error, @watch->node will | |
98 | * be NULL, the device will switch to %XenbusStateClosing, and the error will | |
99 | * be saved in the store. | |
100 | */ | |
101 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, | |
102 | struct xenbus_watch *watch, | |
103 | void (*callback)(struct xenbus_watch *, | |
104 | const char **, unsigned int)) | |
105 | { | |
106 | int err; | |
107 | ||
108 | watch->node = path; | |
109 | watch->callback = callback; | |
110 | ||
111 | err = register_xenbus_watch(watch); | |
112 | ||
113 | if (err) { | |
114 | watch->node = NULL; | |
115 | watch->callback = NULL; | |
116 | xenbus_dev_fatal(dev, err, "adding watch on %s", path); | |
117 | } | |
118 | ||
119 | return err; | |
120 | } | |
121 | EXPORT_SYMBOL_GPL(xenbus_watch_path); | |
122 | ||
123 | ||
124 | /** | |
125 | * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path | |
126 | * @dev: xenbus device | |
127 | * @watch: watch to register | |
128 | * @callback: callback to register | |
129 | * @pathfmt: format of path to watch | |
130 | * | |
131 | * Register a watch on the given @path, using the given xenbus_watch | |
132 | * structure for storage, and the given @callback function as the callback. | |
133 | * Return 0 on success, or -errno on error. On success, the watched path | |
134 | * (@path/@path2) will be saved as @watch->node, and becomes the caller's to | |
135 | * kfree(). On error, watch->node will be NULL, so the caller has nothing to | |
136 | * free, the device will switch to %XenbusStateClosing, and the error will be | |
137 | * saved in the store. | |
138 | */ | |
139 | int xenbus_watch_pathfmt(struct xenbus_device *dev, | |
140 | struct xenbus_watch *watch, | |
141 | void (*callback)(struct xenbus_watch *, | |
142 | const char **, unsigned int), | |
143 | const char *pathfmt, ...) | |
144 | { | |
145 | int err; | |
146 | va_list ap; | |
147 | char *path; | |
148 | ||
149 | va_start(ap, pathfmt); | |
a144ff09 | 150 | path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); |
4bac07c9 JF |
151 | va_end(ap); |
152 | ||
153 | if (!path) { | |
154 | xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); | |
155 | return -ENOMEM; | |
156 | } | |
157 | err = xenbus_watch_path(dev, path, watch, callback); | |
158 | ||
159 | if (err) | |
160 | kfree(path); | |
161 | return err; | |
162 | } | |
163 | EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); | |
164 | ||
5b61cb90 DS |
165 | static void xenbus_switch_fatal(struct xenbus_device *, int, int, |
166 | const char *, ...); | |
4bac07c9 | 167 | |
5b61cb90 DS |
168 | static int |
169 | __xenbus_switch_state(struct xenbus_device *dev, | |
170 | enum xenbus_state state, int depth) | |
4bac07c9 JF |
171 | { |
172 | /* We check whether the state is currently set to the given value, and | |
173 | if not, then the state is set. We don't want to unconditionally | |
174 | write the given state, because we don't want to fire watches | |
175 | unnecessarily. Furthermore, if the node has gone, we don't write | |
176 | to it, as the device will be tearing down, and we don't want to | |
177 | resurrect that directory. | |
178 | ||
5b61cb90 DS |
179 | Note that, because of this cached value of our state, this |
180 | function will not take a caller's Xenstore transaction | |
181 | (something it was trying to in the past) because dev->state | |
182 | would not get reset if the transaction was aborted. | |
4bac07c9 JF |
183 | */ |
184 | ||
5b61cb90 | 185 | struct xenbus_transaction xbt; |
4bac07c9 | 186 | int current_state; |
5b61cb90 | 187 | int err, abort; |
4bac07c9 JF |
188 | |
189 | if (state == dev->state) | |
190 | return 0; | |
191 | ||
5b61cb90 DS |
192 | again: |
193 | abort = 1; | |
194 | ||
195 | err = xenbus_transaction_start(&xbt); | |
196 | if (err) { | |
197 | xenbus_switch_fatal(dev, depth, err, "starting transaction"); | |
4bac07c9 | 198 | return 0; |
5b61cb90 | 199 | } |
4bac07c9 | 200 | |
5b61cb90 DS |
201 | err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state); |
202 | if (err != 1) | |
203 | goto abort; | |
204 | ||
205 | err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); | |
4bac07c9 | 206 | if (err) { |
5b61cb90 DS |
207 | xenbus_switch_fatal(dev, depth, err, "writing new state"); |
208 | goto abort; | |
4bac07c9 JF |
209 | } |
210 | ||
5b61cb90 DS |
211 | abort = 0; |
212 | abort: | |
213 | err = xenbus_transaction_end(xbt, abort); | |
214 | if (err) { | |
215 | if (err == -EAGAIN && !abort) | |
216 | goto again; | |
217 | xenbus_switch_fatal(dev, depth, err, "ending transaction"); | |
218 | } else | |
219 | dev->state = state; | |
4bac07c9 JF |
220 | |
221 | return 0; | |
222 | } | |
5b61cb90 DS |
223 | |
224 | /** | |
225 | * xenbus_switch_state | |
226 | * @dev: xenbus device | |
227 | * @state: new state | |
228 | * | |
229 | * Advertise in the store a change of the given driver to the given new_state. | |
230 | * Return 0 on success, or -errno on error. On error, the device will switch | |
231 | * to XenbusStateClosing, and the error will be saved in the store. | |
232 | */ | |
233 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) | |
234 | { | |
235 | return __xenbus_switch_state(dev, state, 0); | |
236 | } | |
237 | ||
4bac07c9 JF |
238 | EXPORT_SYMBOL_GPL(xenbus_switch_state); |
239 | ||
240 | int xenbus_frontend_closed(struct xenbus_device *dev) | |
241 | { | |
242 | xenbus_switch_state(dev, XenbusStateClosed); | |
243 | complete(&dev->down); | |
244 | return 0; | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(xenbus_frontend_closed); | |
247 | ||
248 | /** | |
249 | * Return the path to the error node for the given device, or NULL on failure. | |
250 | * If the value returned is non-NULL, then it is the caller's to kfree. | |
251 | */ | |
252 | static char *error_path(struct xenbus_device *dev) | |
253 | { | |
254 | return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); | |
255 | } | |
256 | ||
257 | ||
258 | static void xenbus_va_dev_error(struct xenbus_device *dev, int err, | |
259 | const char *fmt, va_list ap) | |
260 | { | |
261 | int ret; | |
262 | unsigned int len; | |
263 | char *printf_buffer = NULL; | |
264 | char *path_buffer = NULL; | |
265 | ||
266 | #define PRINTF_BUFFER_SIZE 4096 | |
267 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); | |
268 | if (printf_buffer == NULL) | |
269 | goto fail; | |
270 | ||
271 | len = sprintf(printf_buffer, "%i ", -err); | |
272 | ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); | |
273 | ||
274 | BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); | |
275 | ||
276 | dev_err(&dev->dev, "%s\n", printf_buffer); | |
277 | ||
278 | path_buffer = error_path(dev); | |
279 | ||
280 | if (path_buffer == NULL) { | |
281 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", | |
282 | dev->nodename, printf_buffer); | |
283 | goto fail; | |
284 | } | |
285 | ||
286 | if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { | |
287 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", | |
288 | dev->nodename, printf_buffer); | |
289 | goto fail; | |
290 | } | |
291 | ||
292 | fail: | |
293 | kfree(printf_buffer); | |
294 | kfree(path_buffer); | |
295 | } | |
296 | ||
297 | ||
298 | /** | |
299 | * xenbus_dev_error | |
300 | * @dev: xenbus device | |
301 | * @err: error to report | |
302 | * @fmt: error message format | |
303 | * | |
304 | * Report the given negative errno into the store, along with the given | |
305 | * formatted message. | |
306 | */ | |
307 | void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) | |
308 | { | |
309 | va_list ap; | |
310 | ||
311 | va_start(ap, fmt); | |
312 | xenbus_va_dev_error(dev, err, fmt, ap); | |
313 | va_end(ap); | |
314 | } | |
315 | EXPORT_SYMBOL_GPL(xenbus_dev_error); | |
316 | ||
317 | /** | |
318 | * xenbus_dev_fatal | |
319 | * @dev: xenbus device | |
320 | * @err: error to report | |
321 | * @fmt: error message format | |
322 | * | |
323 | * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by | |
d8220347 | 324 | * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly |
4bac07c9 JF |
325 | * closedown of this driver and its peer. |
326 | */ | |
327 | ||
328 | void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) | |
329 | { | |
330 | va_list ap; | |
331 | ||
332 | va_start(ap, fmt); | |
333 | xenbus_va_dev_error(dev, err, fmt, ap); | |
334 | va_end(ap); | |
335 | ||
336 | xenbus_switch_state(dev, XenbusStateClosing); | |
337 | } | |
338 | EXPORT_SYMBOL_GPL(xenbus_dev_fatal); | |
339 | ||
5b61cb90 DS |
340 | /** |
341 | * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps | |
342 | * avoiding recursion within xenbus_switch_state. | |
343 | */ | |
344 | static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, | |
345 | const char *fmt, ...) | |
346 | { | |
347 | va_list ap; | |
348 | ||
349 | va_start(ap, fmt); | |
350 | xenbus_va_dev_error(dev, err, fmt, ap); | |
351 | va_end(ap); | |
352 | ||
353 | if (!depth) | |
354 | __xenbus_switch_state(dev, XenbusStateClosing, 1); | |
355 | } | |
356 | ||
4bac07c9 JF |
357 | /** |
358 | * xenbus_grant_ring | |
359 | * @dev: xenbus device | |
360 | * @ring_mfn: mfn of ring to grant | |
361 | ||
362 | * Grant access to the given @ring_mfn to the peer of the given device. Return | |
363 | * 0 on success, or -errno on error. On error, the device will switch to | |
364 | * XenbusStateClosing, and the error will be saved in the store. | |
365 | */ | |
366 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) | |
367 | { | |
368 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); | |
369 | if (err < 0) | |
370 | xenbus_dev_fatal(dev, err, "granting access to ring page"); | |
371 | return err; | |
372 | } | |
373 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); | |
374 | ||
375 | ||
376 | /** | |
377 | * Allocate an event channel for the given xenbus_device, assigning the newly | |
378 | * created local port to *port. Return 0 on success, or -errno on error. On | |
379 | * error, the device will switch to XenbusStateClosing, and the error will be | |
380 | * saved in the store. | |
381 | */ | |
382 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) | |
383 | { | |
384 | struct evtchn_alloc_unbound alloc_unbound; | |
385 | int err; | |
386 | ||
387 | alloc_unbound.dom = DOMID_SELF; | |
388 | alloc_unbound.remote_dom = dev->otherend_id; | |
389 | ||
390 | err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, | |
391 | &alloc_unbound); | |
392 | if (err) | |
393 | xenbus_dev_fatal(dev, err, "allocating event channel"); | |
394 | else | |
395 | *port = alloc_unbound.port; | |
396 | ||
397 | return err; | |
398 | } | |
399 | EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); | |
400 | ||
401 | ||
402 | /** | |
403 | * Bind to an existing interdomain event channel in another domain. Returns 0 | |
404 | * on success and stores the local port in *port. On error, returns -errno, | |
405 | * switches the device to XenbusStateClosing, and saves the error in XenStore. | |
406 | */ | |
407 | int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) | |
408 | { | |
409 | struct evtchn_bind_interdomain bind_interdomain; | |
410 | int err; | |
411 | ||
412 | bind_interdomain.remote_dom = dev->otherend_id; | |
413 | bind_interdomain.remote_port = remote_port; | |
414 | ||
415 | err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, | |
416 | &bind_interdomain); | |
417 | if (err) | |
418 | xenbus_dev_fatal(dev, err, | |
419 | "binding to event channel %d from domain %d", | |
420 | remote_port, dev->otherend_id); | |
421 | else | |
422 | *port = bind_interdomain.local_port; | |
423 | ||
424 | return err; | |
425 | } | |
426 | EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); | |
427 | ||
428 | ||
429 | /** | |
430 | * Free an existing event channel. Returns 0 on success or -errno on error. | |
431 | */ | |
432 | int xenbus_free_evtchn(struct xenbus_device *dev, int port) | |
433 | { | |
434 | struct evtchn_close close; | |
435 | int err; | |
436 | ||
437 | close.port = port; | |
438 | ||
439 | err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); | |
440 | if (err) | |
441 | xenbus_dev_error(dev, err, "freeing event channel %d", port); | |
442 | ||
443 | return err; | |
444 | } | |
445 | EXPORT_SYMBOL_GPL(xenbus_free_evtchn); | |
446 | ||
447 | ||
448 | /** | |
449 | * xenbus_map_ring_valloc | |
450 | * @dev: xenbus device | |
451 | * @gnt_ref: grant reference | |
452 | * @vaddr: pointer to address to be filled out by mapping | |
453 | * | |
454 | * Based on Rusty Russell's skeleton driver's map_page. | |
455 | * Map a page of memory into this domain from another domain's grant table. | |
456 | * xenbus_map_ring_valloc allocates a page of virtual address space, maps the | |
457 | * page to that address, and sets *vaddr to that address. | |
458 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | |
459 | * or -ENOMEM on error. If an error is returned, device will switch to | |
460 | * XenbusStateClosing and the error message will be saved in XenStore. | |
461 | */ | |
462 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | |
2c5d37d3 DDG |
463 | { |
464 | return ring_ops->map(dev, gnt_ref, vaddr); | |
465 | } | |
466 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | |
467 | ||
468 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | |
469 | int gnt_ref, void **vaddr) | |
4bac07c9 JF |
470 | { |
471 | struct gnttab_map_grant_ref op = { | |
cd12909c | 472 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, |
4bac07c9 JF |
473 | .ref = gnt_ref, |
474 | .dom = dev->otherend_id, | |
475 | }; | |
2c5d37d3 | 476 | struct xenbus_map_node *node; |
4bac07c9 | 477 | struct vm_struct *area; |
cd12909c | 478 | pte_t *pte; |
4bac07c9 JF |
479 | |
480 | *vaddr = NULL; | |
481 | ||
2c5d37d3 DDG |
482 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
483 | if (!node) | |
484 | return -ENOMEM; | |
485 | ||
cd12909c | 486 | area = alloc_vm_area(PAGE_SIZE, &pte); |
2c5d37d3 DDG |
487 | if (!area) { |
488 | kfree(node); | |
4bac07c9 | 489 | return -ENOMEM; |
2c5d37d3 | 490 | } |
4bac07c9 | 491 | |
cd12909c | 492 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; |
4bac07c9 | 493 | |
c571898f | 494 | gnttab_batch_map(&op, 1); |
4bac07c9 JF |
495 | |
496 | if (op.status != GNTST_okay) { | |
4dcaebbf | 497 | free_vm_area(area); |
2c5d37d3 | 498 | kfree(node); |
4bac07c9 JF |
499 | xenbus_dev_fatal(dev, op.status, |
500 | "mapping in shared page %d from domain %d", | |
501 | gnt_ref, dev->otherend_id); | |
502 | return op.status; | |
503 | } | |
504 | ||
2c5d37d3 DDG |
505 | node->handle = op.handle; |
506 | node->area = area; | |
507 | ||
508 | spin_lock(&xenbus_valloc_lock); | |
509 | list_add(&node->next, &xenbus_valloc_pages); | |
510 | spin_unlock(&xenbus_valloc_lock); | |
4bac07c9 JF |
511 | |
512 | *vaddr = area->addr; | |
513 | return 0; | |
514 | } | |
2c5d37d3 DDG |
515 | |
516 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |
517 | int gnt_ref, void **vaddr) | |
518 | { | |
519 | struct xenbus_map_node *node; | |
520 | int err; | |
521 | void *addr; | |
522 | ||
523 | *vaddr = NULL; | |
524 | ||
525 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
526 | if (!node) | |
527 | return -ENOMEM; | |
528 | ||
529 | err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); | |
530 | if (err) | |
531 | goto out_err; | |
532 | ||
533 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | |
534 | ||
535 | err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); | |
536 | if (err) | |
8d0b8801 | 537 | goto out_err_free_ballooned_pages; |
2c5d37d3 DDG |
538 | |
539 | spin_lock(&xenbus_valloc_lock); | |
540 | list_add(&node->next, &xenbus_valloc_pages); | |
541 | spin_unlock(&xenbus_valloc_lock); | |
542 | ||
543 | *vaddr = addr; | |
544 | return 0; | |
545 | ||
8d0b8801 | 546 | out_err_free_ballooned_pages: |
2c5d37d3 | 547 | free_xenballooned_pages(1, &node->page); |
8d0b8801 | 548 | out_err: |
2c5d37d3 DDG |
549 | kfree(node); |
550 | return err; | |
551 | } | |
4bac07c9 JF |
552 | |
553 | ||
554 | /** | |
555 | * xenbus_map_ring | |
556 | * @dev: xenbus device | |
557 | * @gnt_ref: grant reference | |
558 | * @handle: pointer to grant handle to be filled | |
559 | * @vaddr: address to be mapped to | |
560 | * | |
561 | * Map a page of memory into this domain from another domain's grant table. | |
562 | * xenbus_map_ring does not allocate the virtual address space (you must do | |
563 | * this yourself!). It only maps in the page to the specified address. | |
564 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | |
565 | * or -ENOMEM on error. If an error is returned, device will switch to | |
566 | * XenbusStateClosing and the error message will be saved in XenStore. | |
567 | */ | |
568 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | |
569 | grant_handle_t *handle, void *vaddr) | |
570 | { | |
2946a52a DDG |
571 | struct gnttab_map_grant_ref op; |
572 | ||
5ac08001 | 573 | gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, |
2946a52a | 574 | dev->otherend_id); |
4bac07c9 | 575 | |
c571898f | 576 | gnttab_batch_map(&op, 1); |
4bac07c9 JF |
577 | |
578 | if (op.status != GNTST_okay) { | |
579 | xenbus_dev_fatal(dev, op.status, | |
580 | "mapping in shared page %d from domain %d", | |
581 | gnt_ref, dev->otherend_id); | |
582 | } else | |
583 | *handle = op.handle; | |
584 | ||
585 | return op.status; | |
586 | } | |
587 | EXPORT_SYMBOL_GPL(xenbus_map_ring); | |
588 | ||
589 | ||
590 | /** | |
591 | * xenbus_unmap_ring_vfree | |
592 | * @dev: xenbus device | |
593 | * @vaddr: addr to unmap | |
594 | * | |
595 | * Based on Rusty Russell's skeleton driver's unmap_page. | |
596 | * Unmap a page of memory in this domain that was imported from another domain. | |
597 | * Use xenbus_unmap_ring_vfree if you mapped in your memory with | |
598 | * xenbus_map_ring_valloc (it will free the virtual address space). | |
599 | * Returns 0 on success and returns GNTST_* on error | |
600 | * (see xen/include/interface/grant_table.h). | |
601 | */ | |
602 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | |
603 | { | |
2c5d37d3 DDG |
604 | return ring_ops->unmap(dev, vaddr); |
605 | } | |
606 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | |
607 | ||
608 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | |
609 | { | |
610 | struct xenbus_map_node *node; | |
4bac07c9 JF |
611 | struct gnttab_unmap_grant_ref op = { |
612 | .host_addr = (unsigned long)vaddr, | |
613 | }; | |
cd12909c | 614 | unsigned int level; |
4bac07c9 | 615 | |
2c5d37d3 DDG |
616 | spin_lock(&xenbus_valloc_lock); |
617 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | |
618 | if (node->area->addr == vaddr) { | |
619 | list_del(&node->next); | |
620 | goto found; | |
621 | } | |
4bac07c9 | 622 | } |
2c5d37d3 DDG |
623 | node = NULL; |
624 | found: | |
625 | spin_unlock(&xenbus_valloc_lock); | |
4bac07c9 | 626 | |
2c5d37d3 | 627 | if (!node) { |
4bac07c9 JF |
628 | xenbus_dev_error(dev, -ENOENT, |
629 | "can't find mapped virtual address %p", vaddr); | |
630 | return GNTST_bad_virt_addr; | |
631 | } | |
632 | ||
2c5d37d3 | 633 | op.handle = node->handle; |
cd12909c DV |
634 | op.host_addr = arbitrary_virt_to_machine( |
635 | lookup_address((unsigned long)vaddr, &level)).maddr; | |
4bac07c9 JF |
636 | |
637 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | |
638 | BUG(); | |
639 | ||
640 | if (op.status == GNTST_okay) | |
2c5d37d3 | 641 | free_vm_area(node->area); |
4bac07c9 JF |
642 | else |
643 | xenbus_dev_error(dev, op.status, | |
644 | "unmapping page at handle %d error %d", | |
2c5d37d3 | 645 | node->handle, op.status); |
4bac07c9 | 646 | |
2c5d37d3 | 647 | kfree(node); |
4bac07c9 JF |
648 | return op.status; |
649 | } | |
4bac07c9 | 650 | |
2c5d37d3 DDG |
651 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) |
652 | { | |
653 | int rv; | |
654 | struct xenbus_map_node *node; | |
655 | void *addr; | |
656 | ||
657 | spin_lock(&xenbus_valloc_lock); | |
658 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | |
659 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | |
660 | if (addr == vaddr) { | |
661 | list_del(&node->next); | |
662 | goto found; | |
663 | } | |
664 | } | |
5ac08001 | 665 | node = addr = NULL; |
2c5d37d3 DDG |
666 | found: |
667 | spin_unlock(&xenbus_valloc_lock); | |
668 | ||
669 | if (!node) { | |
670 | xenbus_dev_error(dev, -ENOENT, | |
671 | "can't find mapped virtual address %p", vaddr); | |
672 | return GNTST_bad_virt_addr; | |
673 | } | |
674 | ||
675 | rv = xenbus_unmap_ring(dev, node->handle, addr); | |
676 | ||
677 | if (!rv) | |
678 | free_xenballooned_pages(1, &node->page); | |
679 | else | |
680 | WARN(1, "Leaking %p\n", vaddr); | |
681 | ||
682 | kfree(node); | |
683 | return rv; | |
684 | } | |
4bac07c9 JF |
685 | |
686 | /** | |
687 | * xenbus_unmap_ring | |
688 | * @dev: xenbus device | |
689 | * @handle: grant handle | |
690 | * @vaddr: addr to unmap | |
691 | * | |
692 | * Unmap a page of memory in this domain that was imported from another domain. | |
693 | * Returns 0 on success and returns GNTST_* on error | |
694 | * (see xen/include/interface/grant_table.h). | |
695 | */ | |
696 | int xenbus_unmap_ring(struct xenbus_device *dev, | |
697 | grant_handle_t handle, void *vaddr) | |
698 | { | |
2946a52a DDG |
699 | struct gnttab_unmap_grant_ref op; |
700 | ||
5ac08001 | 701 | gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); |
4bac07c9 JF |
702 | |
703 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | |
704 | BUG(); | |
705 | ||
706 | if (op.status != GNTST_okay) | |
707 | xenbus_dev_error(dev, op.status, | |
708 | "unmapping page at handle %d error %d", | |
709 | handle, op.status); | |
710 | ||
711 | return op.status; | |
712 | } | |
713 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); | |
714 | ||
715 | ||
716 | /** | |
717 | * xenbus_read_driver_state | |
718 | * @path: path for driver | |
719 | * | |
720 | * Return the state of the driver rooted at the given store path, or | |
721 | * XenbusStateUnknown if no state can be read. | |
722 | */ | |
723 | enum xenbus_state xenbus_read_driver_state(const char *path) | |
724 | { | |
725 | enum xenbus_state result; | |
726 | int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); | |
727 | if (err) | |
728 | result = XenbusStateUnknown; | |
729 | ||
730 | return result; | |
731 | } | |
732 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); | |
2c5d37d3 DDG |
733 | |
734 | static const struct xenbus_ring_ops ring_ops_pv = { | |
735 | .map = xenbus_map_ring_valloc_pv, | |
736 | .unmap = xenbus_unmap_ring_vfree_pv, | |
737 | }; | |
738 | ||
739 | static const struct xenbus_ring_ops ring_ops_hvm = { | |
740 | .map = xenbus_map_ring_valloc_hvm, | |
741 | .unmap = xenbus_unmap_ring_vfree_hvm, | |
742 | }; | |
743 | ||
744 | void __init xenbus_ring_ops_init(void) | |
745 | { | |
746 | if (xen_pv_domain()) | |
747 | ring_ops = &ring_ops_pv; | |
748 | else | |
749 | ring_ops = &ring_ops_hvm; | |
750 | } |