Lines Matching +full:b +full:- +full:facing

2  * Client-facing interface for the Xenbus driver.  In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
115 * xenbus_watch_path - register a watch
123 * success, or -errno on error. On success, the given @path will be saved as
124 * @watch->node, and remains the caller's to free. On error, @watch->node will
137 watch->node = path; in xenbus_watch_path()
138 watch->will_handle = will_handle; in xenbus_watch_path()
139 watch->callback = callback; in xenbus_watch_path()
144 watch->node = NULL; in xenbus_watch_path()
145 watch->will_handle = NULL; in xenbus_watch_path()
146 watch->callback = NULL; in xenbus_watch_path()
156 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
164 * Return 0 on success, or -errno on error. On success, the watched path
165 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
166 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
187 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); in xenbus_watch_pathfmt()
188 return -ENOMEM; in xenbus_watch_pathfmt()
214 (something it was trying to in the past) because dev->state in __xenbus_switch_state()
222 if (state == dev->state) in __xenbus_switch_state()
234 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state); in __xenbus_switch_state()
238 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); in __xenbus_switch_state()
248 if (err == -EAGAIN && !abort) in __xenbus_switch_state()
252 dev->state = state; in __xenbus_switch_state()
263 * Return 0 on success, or -errno on error. On error, the device will switch
276 complete(&dev->down); in xenbus_frontend_closed()
294 len = sprintf(printf_buffer, "%i ", -err); in xenbus_va_dev_error()
295 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap); in xenbus_va_dev_error()
297 dev_err(&dev->dev, "%s\n", printf_buffer); in xenbus_va_dev_error()
299 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename); in xenbus_va_dev_error()
375 * -errno on error. On error, the device will switch to
392 err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); in xenbus_grant_ring()
415 * created local port to *port. Return 0 on success, or -errno on error. On
425 alloc_unbound.remote_dom = dev->otherend_id; in xenbus_alloc_evtchn()
440 * Free an existing event channel. Returns 0 on success or -errno on error.
468 * sets *vaddr to that address. Returns 0 on success, and -errno on
481 return -EINVAL; in xenbus_map_ring_valloc()
485 return -ENOMEM; in xenbus_map_ring_valloc()
487 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); in xenbus_map_ring_valloc()
488 if (!info->node) in xenbus_map_ring_valloc()
489 err = -ENOMEM; in xenbus_map_ring_valloc()
491 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); in xenbus_map_ring_valloc()
493 kfree(info->node); in xenbus_map_ring_valloc()
499 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
500 * long), e.g. 32-on-64. Caller is responsible for preparing the
513 return -EINVAL; in __xenbus_map_ring()
516 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, in __xenbus_map_ring()
517 gnt_refs[i], dev->otherend_id); in __xenbus_map_ring()
521 gnttab_batch_map(info->map, i); in __xenbus_map_ring()
524 if (info->map[i].status != GNTST_okay) { in __xenbus_map_ring()
525 xenbus_dev_fatal(dev, info->map[i].status, in __xenbus_map_ring()
527 gnt_refs[i], dev->otherend_id); in __xenbus_map_ring()
530 handles[i] = info->map[i].handle; in __xenbus_map_ring()
538 gnttab_set_unmap_op(&info->unmap[j], in __xenbus_map_ring()
539 info->phys_addrs[i], in __xenbus_map_ring()
545 BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)); in __xenbus_map_ring()
549 if (info->unmap[i].status != GNTST_okay) { in __xenbus_map_ring()
555 return -ENOENT; in __xenbus_map_ring()
577 return -EINVAL; in xenbus_unmap_ring()
607 info->phys_addrs[info->idx] = vaddr; in xenbus_map_ring_setup_grant_hvm()
608 info->addrs[info->idx] = vaddr; in xenbus_map_ring_setup_grant_hvm()
610 info->idx++; in xenbus_map_ring_setup_grant_hvm()
619 struct xenbus_map_node *node = info->node; in xenbus_map_ring_hvm()
625 err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_hvm()
629 gnttab_foreach_grant(node->hvm.pages, nr_grefs, in xenbus_map_ring_hvm()
633 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, in xenbus_map_ring_hvm()
635 node->nr_handles = nr_grefs; in xenbus_map_ring_hvm()
640 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, in xenbus_map_ring_hvm()
643 err = -ENOMEM; in xenbus_map_ring_hvm()
647 node->hvm.addr = addr; in xenbus_map_ring_hvm()
650 list_add(&node->next, &xenbus_valloc_pages); in xenbus_map_ring_hvm()
654 info->node = NULL; in xenbus_map_ring_hvm()
660 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); in xenbus_map_ring_hvm()
666 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_map_ring_hvm()
685 return ring_ops->unmap(dev, vaddr); in xenbus_unmap_ring_vfree()
694 info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr; in map_ring_apply()
704 struct xenbus_map_node *node = info->node; in xenbus_map_ring_pv()
707 int err = -ENOMEM; in xenbus_map_ring_pv()
711 return -ENOMEM; in xenbus_map_ring_pv()
712 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in xenbus_map_ring_pv()
715 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, in xenbus_map_ring_pv()
721 node->nr_handles = nr_grefs; in xenbus_map_ring_pv()
722 node->pv.area = area; in xenbus_map_ring_pv()
725 list_add(&node->next, &xenbus_valloc_pages); in xenbus_map_ring_pv()
728 *vaddr = area->addr; in xenbus_map_ring_pv()
729 info->node = NULL; in xenbus_map_ring_pv()
753 if (node->pv.area->addr == vaddr) { in xenbus_unmap_ring_pv()
754 list_del(&node->next); in xenbus_unmap_ring_pv()
763 xenbus_dev_error(dev, -ENOENT, in xenbus_unmap_ring_pv()
768 for (i = 0; i < node->nr_handles; i++) { in xenbus_unmap_ring_pv()
776 unmap[i].handle = node->handles[i]; in xenbus_unmap_ring_pv()
783 for (i = 0; i < node->nr_handles; i++) { in xenbus_unmap_ring_pv()
788 node->handles[i], unmap[i].status); in xenbus_unmap_ring_pv()
795 free_vm_area(node->pv.area); in xenbus_unmap_ring_pv()
798 node->pv.area, node->nr_handles); in xenbus_unmap_ring_pv()
823 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); in xenbus_unmap_ring_setup_grant_hvm()
825 info->idx++; in xenbus_unmap_ring_setup_grant_hvm()
840 addr = node->hvm.addr; in xenbus_unmap_ring_hvm()
842 list_del(&node->next); in xenbus_unmap_ring_hvm()
851 xenbus_dev_error(dev, -ENOENT, in xenbus_unmap_ring_hvm()
856 nr_pages = XENBUS_PAGES(node->nr_handles); in xenbus_unmap_ring_hvm()
858 gnttab_foreach_grant(node->hvm.pages, node->nr_handles, in xenbus_unmap_ring_hvm()
862 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, in xenbus_unmap_ring_hvm()
866 xen_free_unpopulated_pages(nr_pages, node->hvm.pages); in xenbus_unmap_ring_hvm()