Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-5.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

- decouple the PV interface from kernel internals in the Xen
scsifront/scsiback pv drivers

- harden the Xen scsifront PV driver against a malicious backend driver

- simplify Xen PV frontend driver ring page setup

- support Xen setups with multiple domains created at boot time to
tolerate Xenstore coming up late

- two small cleanup patches

* tag 'for-linus-5.19-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (29 commits)
xen: add support for initializing xenstore later as HVM domain
xen: sync xs_wire.h header with upstream xen
x86: xen: remove STACK_FRAME_NON_STANDARD from xen_cpuid
xen-blk{back,front}: Update contact points for buffer_squeeze_duration_ms and feature_persistent
xen/xenbus: eliminate xenbus_grant_ring()
xen/sndfront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/usbfront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/scsifront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/pcifront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/drmfront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/tpmfront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/netfront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/blkfront: use xenbus_setup_ring() and xenbus_teardown_ring()
xen/xenbus: add xenbus_setup_ring() service function
xen: update ring.h
xen/shbuf: switch xen-front-pgdir-shbuf to use INVALID_GRANT_REF
xen/dmabuf: switch gntdev-dmabuf to use INVALID_GRANT_REF
xen/sound: switch xen_snd_front to use INVALID_GRANT_REF
xen/drm: switch xen_drm_front to use INVALID_GRANT_REF
xen/usb: switch xen-hcd to use INVALID_GRANT_REF
...

+736 -478
+2 -2
Documentation/ABI/testing/sysfs-driver-xen-blkback
··· 29 29 What: /sys/module/xen_blkback/parameters/buffer_squeeze_duration_ms 30 30 Date: December 2019 31 31 KernelVersion: 5.6 32 - Contact: SeongJae Park <sj@kernel.org> 32 + Contact: Maximilian Heyne <mheyne@amazon.de> 33 33 Description: 34 34 When memory pressure is reported to blkback this option 35 35 controls the duration in milliseconds that blkback will not ··· 39 39 What: /sys/module/xen_blkback/parameters/feature_persistent 40 40 Date: September 2020 41 41 KernelVersion: 5.10 42 - Contact: SeongJae Park <sj@kernel.org> 42 + Contact: Maximilian Heyne <mheyne@amazon.de> 43 43 Description: 44 44 Whether to enable the persistent grants feature or not. Note 45 45 that this option only takes effect on newly created backends.
+1 -1
Documentation/ABI/testing/sysfs-driver-xen-blkfront
··· 12 12 What: /sys/module/xen_blkfront/parameters/feature_persistent 13 13 Date: September 2020 14 14 KernelVersion: 5.10 15 - Contact: SeongJae Park <sj@kernel.org> 15 + Contact: Maximilian Heyne <mheyne@amazon.de> 16 16 Description: 17 17 Whether to enable the persistent grants feature or not. Note 18 18 that this option only takes effect on newly created frontends.
-2
arch/x86/xen/enlighten_pv.c
··· 30 30 #include <linux/pci.h> 31 31 #include <linux/gfp.h> 32 32 #include <linux/edd.h> 33 - #include <linux/objtool.h> 34 33 35 34 #include <xen/xen.h> 36 35 #include <xen/events.h> ··· 164 165 165 166 *bx &= maskebx; 166 167 } 167 - STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */ 168 168 169 169 static bool __init xen_check_mwait(void) 170 170 {
+17 -40
drivers/block/xen-blkfront.c
··· 229 229 static unsigned long *minors; 230 230 static DEFINE_SPINLOCK(minor_lock); 231 231 232 - #define GRANT_INVALID_REF 0 233 - 234 232 #define PARTS_PER_DISK 16 235 233 #define PARTS_PER_EXT_DISK 256 236 234 ··· 319 321 gnt_list_entry->page = granted_page; 320 322 } 321 323 322 - gnt_list_entry->gref = GRANT_INVALID_REF; 324 + gnt_list_entry->gref = INVALID_GRANT_REF; 323 325 list_add(&gnt_list_entry->node, &rinfo->grants); 324 326 i++; 325 327 } ··· 348 350 node); 349 351 list_del(&gnt_list_entry->node); 350 352 351 - if (gnt_list_entry->gref != GRANT_INVALID_REF) 353 + if (gnt_list_entry->gref != INVALID_GRANT_REF) 352 354 rinfo->persistent_gnts_c--; 353 355 354 356 return gnt_list_entry; ··· 370 372 struct grant *gnt_list_entry = get_free_grant(rinfo); 371 373 struct blkfront_info *info = rinfo->dev_info; 372 374 373 - if (gnt_list_entry->gref != GRANT_INVALID_REF) 375 + if (gnt_list_entry->gref != INVALID_GRANT_REF) 374 376 return gnt_list_entry; 375 377 376 378 /* Assign a gref to this page */ ··· 394 396 struct grant *gnt_list_entry = get_free_grant(rinfo); 395 397 struct blkfront_info *info = rinfo->dev_info; 396 398 397 - if (gnt_list_entry->gref != GRANT_INVALID_REF) 399 + if (gnt_list_entry->gref != INVALID_GRANT_REF) 398 400 return gnt_list_entry; 399 401 400 402 /* Assign a gref to this page */ ··· 1219 1221 list_for_each_entry_safe(persistent_gnt, n, 1220 1222 &rinfo->grants, node) { 1221 1223 list_del(&persistent_gnt->node); 1222 - if (persistent_gnt->gref != GRANT_INVALID_REF) { 1224 + if (persistent_gnt->gref != INVALID_GRANT_REF) { 1223 1225 gnttab_end_foreign_access(persistent_gnt->gref, 1224 1226 0UL); 1225 1227 rinfo->persistent_gnts_c--; ··· 1280 1282 flush_work(&rinfo->work); 1281 1283 1282 1284 /* Free resources associated with old device channel. */ 1283 - for (i = 0; i < info->nr_ring_pages; i++) { 1284 - if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { 1285 - gnttab_end_foreign_access(rinfo->ring_ref[i], 0); 1286 - rinfo->ring_ref[i] = GRANT_INVALID_REF; 1287 - } 1288 - } 1289 - free_pages_exact(rinfo->ring.sring, 1290 - info->nr_ring_pages * XEN_PAGE_SIZE); 1291 - rinfo->ring.sring = NULL; 1285 + xenbus_teardown_ring((void **)&rinfo->ring.sring, info->nr_ring_pages, 1286 + rinfo->ring_ref); 1292 1287 1293 1288 if (rinfo->irq) 1294 1289 unbind_from_irqhandler(rinfo->irq, rinfo); ··· 1466 1475 * to the tail of the list, so it will not be picked 1467 1476 * again unless we run out of persistent grants. 1468 1477 */ 1469 - s->grants_used[i]->gref = GRANT_INVALID_REF; 1478 + s->grants_used[i]->gref = INVALID_GRANT_REF; 1470 1479 list_add_tail(&s->grants_used[i]->node, &rinfo->grants); 1471 1480 } 1472 1481 } ··· 1491 1500 indirect_page = s->indirect_grants[i]->page; 1492 1501 list_add(&indirect_page->lru, &rinfo->indirect_pages); 1493 1502 } 1494 - s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1503 + s->indirect_grants[i]->gref = INVALID_GRANT_REF; 1495 1504 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants); 1496 1505 } 1497 1506 } ··· 1672 1681 struct blkfront_ring_info *rinfo) 1673 1682 { 1674 1683 struct blkif_sring *sring; 1675 - int err, i; 1684 + int err; 1676 1685 struct blkfront_info *info = rinfo->dev_info; 1677 1686 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE; 1678 - grant_ref_t gref[XENBUS_MAX_RING_GRANTS]; 1679 1687 1680 - for (i = 0; i < info->nr_ring_pages; i++) 1681 - rinfo->ring_ref[i] = GRANT_INVALID_REF; 1682 - 1683 - sring = alloc_pages_exact(ring_size, GFP_NOIO); 1684 - if (!sring) { 1685 - xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 1686 - return -ENOMEM; 1687 - } 1688 - SHARED_RING_INIT(sring); 1689 - FRONT_RING_INIT(&rinfo->ring, sring, ring_size); 1690 - 1691 - err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); 1692 - if (err < 0) { 1693 - free_pages_exact(sring, ring_size); 1694 - rinfo->ring.sring = NULL; 1688 + err = xenbus_setup_ring(dev, GFP_NOIO, (void **)&sring, 1689 + info->nr_ring_pages, rinfo->ring_ref); 1690 + if (err) 1695 1691 goto fail; 1696 - } 1697 - for (i = 0; i < info->nr_ring_pages; i++) 1698 - rinfo->ring_ref[i] = gref[i]; 1692 + 1693 + XEN_FRONT_RING_INIT(&rinfo->ring, sring, ring_size); 1699 1694 1700 1695 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn); 1701 1696 if (err) ··· 2521 2544 2522 2545 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, 2523 2546 node) { 2524 - if (gnt_list_entry->gref == GRANT_INVALID_REF || 2547 + if (gnt_list_entry->gref == INVALID_GRANT_REF || 2525 2548 !gnttab_try_end_foreign_access(gnt_list_entry->gref)) 2526 2549 continue; 2527 2550 2528 2551 list_del(&gnt_list_entry->node); 2529 2552 rinfo->persistent_gnts_c--; 2530 - gnt_list_entry->gref = GRANT_INVALID_REF; 2553 + gnt_list_entry->gref = INVALID_GRANT_REF; 2531 2554 list_add_tail(&gnt_list_entry->node, &grants); 2532 2555 } 2533 2556
+3 -15
drivers/char/tpm/xen-tpmfront.c
··· 253 253 struct xenbus_transaction xbt; 254 254 const char *message = NULL; 255 255 int rv; 256 - grant_ref_t gref; 257 256 258 - priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 259 - if (!priv->shr) { 260 - xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); 261 - return -ENOMEM; 262 - } 263 - 264 - rv = xenbus_grant_ring(dev, priv->shr, 1, &gref); 257 + rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1, 258 + &priv->ring_ref); 265 259 if (rv < 0) 266 260 return rv; 267 - 268 - priv->ring_ref = gref; 269 261 270 262 rv = xenbus_alloc_evtchn(dev, &priv->evtchn); 271 263 if (rv) ··· 323 331 if (!priv) 324 332 return; 325 333 326 - if (priv->ring_ref) 327 - gnttab_end_foreign_access(priv->ring_ref, 328 - (unsigned long)priv->shr); 329 - else 330 - free_page((unsigned long)priv->shr); 334 + xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref); 331 335 332 336 if (priv->irq) 333 337 unbind_from_irqhandler(priv->irq, priv);
-9
drivers/gpu/drm/xen/xen_drm_front.h
··· 80 80 /* timeout in ms to wait for backend to respond */ 81 81 #define XEN_DRM_FRONT_WAIT_BACK_MS 3000 82 82 83 - #ifndef GRANT_INVALID_REF 84 - /* 85 - * Note on usage of grant reference 0 as invalid grant reference: 86 - * grant reference 0 is valid, but never exposed to a PV driver, 87 - * because of the fact it is already in use/reserved by the PV console. 88 - */ 89 - #define GRANT_INVALID_REF 0 90 - #endif 91 - 92 83 struct xen_drm_front_info { 93 84 struct xenbus_device *xb_dev; 94 85 struct xen_drm_front_drm_info *drm_info;
+11 -32
drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
··· 123 123 static void evtchnl_free(struct xen_drm_front_info *front_info, 124 124 struct xen_drm_front_evtchnl *evtchnl) 125 125 { 126 - unsigned long page = 0; 126 + void *page = NULL; 127 127 128 128 if (evtchnl->type == EVTCHNL_TYPE_REQ) 129 - page = (unsigned long)evtchnl->u.req.ring.sring; 129 + page = evtchnl->u.req.ring.sring; 130 130 else if (evtchnl->type == EVTCHNL_TYPE_EVT) 131 - page = (unsigned long)evtchnl->u.evt.page; 131 + page = evtchnl->u.evt.page; 132 132 if (!page) 133 133 return; 134 134 ··· 147 147 xenbus_free_evtchn(front_info->xb_dev, evtchnl->port); 148 148 149 149 /* end access and free the page */ 150 - if (evtchnl->gref != GRANT_INVALID_REF) 151 - gnttab_end_foreign_access(evtchnl->gref, page); 150 + xenbus_teardown_ring(&page, 1, &evtchnl->gref); 152 151 153 152 memset(evtchnl, 0, sizeof(*evtchnl)); 154 153 } ··· 157 158 enum xen_drm_front_evtchnl_type type) 158 159 { 159 160 struct xenbus_device *xb_dev = front_info->xb_dev; 160 - unsigned long page; 161 - grant_ref_t gref; 161 + void *page; 162 162 irq_handler_t handler; 163 163 int ret; 164 164 ··· 166 168 evtchnl->index = index; 167 169 evtchnl->front_info = front_info; 168 170 evtchnl->state = EVTCHNL_STATE_DISCONNECTED; 169 - evtchnl->gref = GRANT_INVALID_REF; 170 171 171 - page = get_zeroed_page(GFP_NOIO | __GFP_HIGH); 172 - if (!page) { 173 - ret = -ENOMEM; 172 + ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page, 173 + 1, &evtchnl->gref); 174 + if (ret) 174 175 goto fail; 175 - } 176 176 177 177 if (type == EVTCHNL_TYPE_REQ) { 178 178 struct xen_displif_sring *sring; 179 179 180 180 init_completion(&evtchnl->u.req.completion); 181 181 mutex_init(&evtchnl->u.req.req_io_lock); 182 - sring = (struct xen_displif_sring *)page; 183 - SHARED_RING_INIT(sring); 184 - FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE); 185 - 186 - ret = xenbus_grant_ring(xb_dev, sring, 1, &gref); 187 - if (ret < 0) { 188 - evtchnl->u.req.ring.sring = NULL; 189 - free_page(page); 190 - goto fail; 191 - } 182 + sring = page; 183 + XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE); 192 184 193 185 handler = evtchnl_interrupt_ctrl; 194 186 } else { 195 - ret = gnttab_grant_foreign_access(xb_dev->otherend_id, 196 - virt_to_gfn((void *)page), 0); 197 - if (ret < 0) { 198 - free_page(page); 199 - goto fail; 200 - } 201 - 202 - evtchnl->u.evt.page = (struct xendispl_event_page *)page; 203 - gref = ret; 187 + evtchnl->u.evt.page = page; 204 188 handler = evtchnl_interrupt_evt; 205 189 } 206 - evtchnl->gref = gref; 207 190 208 191 ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port); 209 192 if (ret < 0)
+27 -58
drivers/net/xen-netfront.c
··· 78 78 79 79 #define RX_COPY_THRESHOLD 256 80 80 81 - #define GRANT_INVALID_REF 0 82 - 83 81 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 84 82 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 85 83 ··· 222 224 { 223 225 int i = xennet_rxidx(ri); 224 226 grant_ref_t ref = queue->grant_rx_ref[i]; 225 - queue->grant_rx_ref[i] = GRANT_INVALID_REF; 227 + queue->grant_rx_ref[i] = INVALID_GRANT_REF; 226 228 return ref; 227 229 } 228 230 ··· 430 432 } 431 433 gnttab_release_grant_reference( 432 434 &queue->gref_tx_head, queue->grant_tx_ref[id]); 433 - queue->grant_tx_ref[id] = GRANT_INVALID_REF; 435 + queue->grant_tx_ref[id] = INVALID_GRANT_REF; 434 436 queue->grant_tx_page[id] = NULL; 435 437 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); 436 438 dev_kfree_skb_irq(skb); ··· 866 868 867 869 spin_lock_irqsave(&queue->rx_cons_lock, flags); 868 870 queue->rx.rsp_cons = val; 869 - queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); 871 + queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); 870 872 spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 871 873 } 872 874 ··· 1019 1021 * the backend driver. In future this should flag the bad 1020 1022 * situation to the system controller to reboot the backend. 1021 1023 */ 1022 - if (ref == GRANT_INVALID_REF) { 1024 + if (ref == INVALID_GRANT_REF) { 1023 1025 if (net_ratelimit()) 1024 1026 dev_warn(dev, "Bad rx response id %d.\n", 1025 1027 rx->id); ··· 1388 1390 gnttab_end_foreign_access(queue->grant_tx_ref[i], 1389 1391 (unsigned long)page_address(queue->grant_tx_page[i])); 1390 1392 queue->grant_tx_page[i] = NULL; 1391 - queue->grant_tx_ref[i] = GRANT_INVALID_REF; 1393 + queue->grant_tx_ref[i] = INVALID_GRANT_REF; 1392 1394 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); 1393 1395 dev_kfree_skb_irq(skb); 1394 1396 } ··· 1409 1411 continue; 1410 1412 1411 1413 ref = queue->grant_rx_ref[id]; 1412 - if (ref == GRANT_INVALID_REF) 1414 + if (ref == INVALID_GRANT_REF) 1413 1415 continue; 1414 1416 1415 1417 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); ··· 1420 1422 get_page(page); 1421 1423 gnttab_end_foreign_access(ref, 1422 1424 (unsigned long)page_address(page)); 1423 - queue->grant_rx_ref[id] = GRANT_INVALID_REF; 1425 + queue->grant_rx_ref[id] = INVALID_GRANT_REF; 1424 1426 1425 1427 kfree_skb(skb); 1426 1428 } ··· 1498 1500 return false; 1499 1501 1500 1502 spin_lock_irqsave(&queue->rx_cons_lock, flags); 1501 - work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx); 1503 + work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); 1502 1504 if (work_queued > queue->rx_rsp_unconsumed) { 1503 1505 queue->rx_rsp_unconsumed = work_queued; 1504 1506 *eoi = 0; ··· 1759 1761 static void xennet_end_access(int ref, void *page) 1760 1762 { 1761 1763 /* This frees the page as a side-effect */ 1762 - if (ref != GRANT_INVALID_REF) 1764 + if (ref != INVALID_GRANT_REF) 1763 1765 gnttab_end_foreign_access(ref, (unsigned long)page); 1764 1766 } 1765 1767 ··· 1796 1798 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1797 1799 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1798 1800 1799 - queue->tx_ring_ref = GRANT_INVALID_REF; 1800 - queue->rx_ring_ref = GRANT_INVALID_REF; 1801 + queue->tx_ring_ref = INVALID_GRANT_REF; 1802 + queue->rx_ring_ref = INVALID_GRANT_REF; 1801 1803 queue->tx.sring = NULL; 1802 1804 queue->rx.sring = NULL; 1803 1805 ··· 1921 1923 struct netfront_queue *queue, unsigned int feature_split_evtchn) 1922 1924 { 1923 1925 struct xen_netif_tx_sring *txs; 1924 - struct xen_netif_rx_sring *rxs = NULL; 1925 - grant_ref_t gref; 1926 + struct xen_netif_rx_sring *rxs; 1926 1927 int err; 1927 1928 1928 - queue->tx_ring_ref = GRANT_INVALID_REF; 1929 - queue->rx_ring_ref = GRANT_INVALID_REF; 1929 + queue->tx_ring_ref = INVALID_GRANT_REF; 1930 + queue->rx_ring_ref = INVALID_GRANT_REF; 1930 1931 queue->rx.sring = NULL; 1931 1932 queue->tx.sring = NULL; 1932 1933 1933 - txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1934 - if (!txs) { 1935 - err = -ENOMEM; 1936 - xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1934 + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs, 1935 + 1, &queue->tx_ring_ref); 1936 + if (err) 1937 1937 goto fail; 1938 - } 1939 - SHARED_RING_INIT(txs); 1940 - FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 1941 1938 1942 - err = xenbus_grant_ring(dev, txs, 1, &gref); 1943 - if (err < 0) 1944 - goto fail; 1945 - queue->tx_ring_ref = gref; 1939 + XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 1946 1940 1947 - rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1948 - if (!rxs) { 1949 - err = -ENOMEM; 1950 - xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1941 + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs, 1942 + 1, &queue->rx_ring_ref); 1943 + if (err) 1951 1944 goto fail; 1952 - } 1953 - SHARED_RING_INIT(rxs); 1954 - FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 1955 1945 1956 - err = xenbus_grant_ring(dev, rxs, 1, &gref); 1957 - if (err < 0) 1958 - goto fail; 1959 - queue->rx_ring_ref = gref; 1946 + XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 1960 1947 1961 1948 if (feature_split_evtchn) 1962 1949 err = setup_netfront_split(queue); ··· 1957 1974 1958 1975 return 0; 1959 1976 1960 - /* If we fail to setup netfront, it is safe to just revoke access to 1961 - * granted pages because backend is not accessing it at this point. 1962 - */ 1963 1977 fail: 1964 - if (queue->rx_ring_ref != GRANT_INVALID_REF) { 1965 - gnttab_end_foreign_access(queue->rx_ring_ref, 1966 - (unsigned long)rxs); 1967 - queue->rx_ring_ref = GRANT_INVALID_REF; 1968 - } else { 1969 - free_page((unsigned long)rxs); 1970 - } 1971 - if (queue->tx_ring_ref != GRANT_INVALID_REF) { 1972 - gnttab_end_foreign_access(queue->tx_ring_ref, 1973 - (unsigned long)txs); 1974 - queue->tx_ring_ref = GRANT_INVALID_REF; 1975 - } else { 1976 - free_page((unsigned long)txs); 1977 - } 1978 + xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref); 1979 + xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref); 1980 + 1978 1981 return err; 1979 1982 } 1980 1983 ··· 1989 2020 queue->tx_pend_queue = TX_LINK_NONE; 1990 2021 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1991 2022 queue->tx_link[i] = i + 1; 1992 - queue->grant_tx_ref[i] = GRANT_INVALID_REF; 2023 + queue->grant_tx_ref[i] = INVALID_GRANT_REF; 1993 2024 queue->grant_tx_page[i] = NULL; 1994 2025 } 1995 2026 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; ··· 1997 2028 /* Clear out rx_skbs */ 1998 2029 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1999 2030 queue->rx_skbs[i] = NULL; 2000 - queue->grant_rx_ref[i] = GRANT_INVALID_REF; 2031 + queue->grant_rx_ref[i] = INVALID_GRANT_REF; 2001 2032 } 2002 2033 2003 2034 /* A grant for every tx ring slot */
+3 -16
drivers/pci/xen-pcifront.c
··· 709 709 if (pdev == NULL) 710 710 goto out; 711 711 712 - pdev->sh_info = 713 - (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL); 714 - if (pdev->sh_info == NULL) { 712 + if (xenbus_setup_ring(xdev, GFP_KERNEL, (void **)&pdev->sh_info, 1, 713 + &pdev->gnt_ref)) { 715 714 kfree(pdev); 716 715 pdev = NULL; 717 716 goto out; ··· 728 729 spin_lock_init(&pdev->sh_info_lock); 729 730 730 731 pdev->evtchn = INVALID_EVTCHN; 731 - pdev->gnt_ref = INVALID_GRANT_REF; 732 732 pdev->irq = -1; 733 733 734 734 INIT_WORK(&pdev->op_work, pcifront_do_aer); ··· 752 754 if (pdev->evtchn != INVALID_EVTCHN) 753 755 xenbus_free_evtchn(pdev->xdev, pdev->evtchn); 754 756 755 - if (pdev->gnt_ref != INVALID_GRANT_REF) 756 - gnttab_end_foreign_access(pdev->gnt_ref, 757 - (unsigned long)pdev->sh_info); 758 - else 759 - free_page((unsigned long)pdev->sh_info); 757 + xenbus_teardown_ring((void **)&pdev->sh_info, 1, &pdev->gnt_ref); 760 758 761 759 dev_set_drvdata(&pdev->xdev->dev, NULL); 762 760 ··· 763 769 { 764 770 int err = 0; 765 771 struct xenbus_transaction trans; 766 - grant_ref_t gref; 767 - 768 - err = xenbus_grant_ring(pdev->xdev, pdev->sh_info, 1, &gref); 769 - if (err < 0) 770 - goto out; 771 - 772 - pdev->gnt_ref = gref; 773 772 774 773 err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); 775 774 if (err)
+140 -61
drivers/scsi/xen-scsifront.c
··· 58 58 59 59 #include <asm/xen/hypervisor.h> 60 60 61 - 62 - #define GRANT_INVALID_REF 0 63 - 64 61 #define VSCSIFRONT_OP_ADD_LUN 1 65 62 #define VSCSIFRONT_OP_DEL_LUN 2 66 63 #define VSCSIFRONT_OP_READD_LUN 3 ··· 80 83 uint16_t rqid; 81 84 uint16_t ref_rqid; 82 85 86 + bool inflight; 87 + 83 88 unsigned int nr_grants; /* number of grants in gref[] */ 84 89 struct scsiif_request_segment *sg; /* scatter/gather elements */ 85 90 struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; ··· 103 104 struct xenbus_device *dev; 104 105 105 106 struct Scsi_Host *host; 106 - int host_active; 107 + enum { 108 + STATE_INACTIVE, 109 + STATE_ACTIVE, 110 + STATE_ERROR 111 + } host_active; 107 112 108 113 unsigned int evtchn; 109 114 unsigned int irq; ··· 220 217 for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) 221 218 ring_req->seg[i] = shadow->seg[i]; 222 219 220 + shadow->inflight = true; 221 + 223 222 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); 224 223 if (notify) 225 224 notify_remote_via_irq(info->irq); 226 225 227 226 return 0; 227 + } 228 + 229 + static void scsifront_set_error(struct vscsifrnt_info *info, const char *msg) 230 + { 231 + shost_printk(KERN_ERR, info->host, KBUILD_MODNAME "%s\n" 232 + "Disabling device for further use\n", msg); 233 + info->host_active = STATE_ERROR; 228 234 } 229 235 230 236 static void scsifront_gnttab_done(struct vscsifrnt_info *info, ··· 246 234 247 235 for (i = 0; i < shadow->nr_grants; i++) { 248 236 if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { 249 - shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME 250 - "grant still in use by backend\n"); 251 - BUG(); 237 + scsifront_set_error(info, "grant still in use by backend"); 238 + return; 252 239 } 253 240 } 254 241 255 242 kfree(shadow->sg); 243 + } 244 + 245 + static unsigned int scsifront_host_byte(int32_t rslt) 246 + { 247 + switch (XEN_VSCSIIF_RSLT_HOST(rslt)) { 248 + case XEN_VSCSIIF_RSLT_HOST_OK: 249 + return DID_OK; 250 + case XEN_VSCSIIF_RSLT_HOST_NO_CONNECT: 251 + return DID_NO_CONNECT; 252 + case XEN_VSCSIIF_RSLT_HOST_BUS_BUSY: 253 + return DID_BUS_BUSY; 254 + case XEN_VSCSIIF_RSLT_HOST_TIME_OUT: 255 + return DID_TIME_OUT; 256 + case XEN_VSCSIIF_RSLT_HOST_BAD_TARGET: 257 + return DID_BAD_TARGET; 258 + case XEN_VSCSIIF_RSLT_HOST_ABORT: 259 + return DID_ABORT; 260 + case XEN_VSCSIIF_RSLT_HOST_PARITY: 261 + return DID_PARITY; 262 + case XEN_VSCSIIF_RSLT_HOST_ERROR: 263 + return DID_ERROR; 264 + case XEN_VSCSIIF_RSLT_HOST_RESET: 265 + return DID_RESET; 266 + case XEN_VSCSIIF_RSLT_HOST_BAD_INTR: 267 + return DID_BAD_INTR; 268 + case XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH: 269 + return DID_PASSTHROUGH; 270 + case XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR: 271 + return DID_SOFT_ERROR; 272 + case XEN_VSCSIIF_RSLT_HOST_IMM_RETRY: 273 + return DID_IMM_RETRY; 274 + case XEN_VSCSIIF_RSLT_HOST_REQUEUE: 275 + return DID_REQUEUE; 276 + case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED: 277 + return DID_TRANSPORT_DISRUPTED; 278 + case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST: 279 + return DID_TRANSPORT_FAILFAST; 280 + case XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE: 281 + return DID_TARGET_FAILURE; 282 + case XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE: 283 + return DID_NEXUS_FAILURE; 284 + case XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE: 285 + return DID_ALLOC_FAILURE; 286 + case XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR: 287 + return DID_MEDIUM_ERROR; 288 + case XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL: 289 + return DID_TRANSPORT_MARGINAL; 290 + default: 291 + return DID_ERROR; 292 + } 256 293 } 257 294 258 295 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, ··· 311 250 struct scsi_cmnd *sc; 312 251 uint32_t id; 313 252 uint8_t sense_len; 314 - int result; 315 253 316 254 id = ring_rsp->rqid; 317 255 shadow = info->shadow[id]; ··· 319 259 BUG_ON(sc == NULL); 320 260 321 261 scsifront_gnttab_done(info, shadow); 262 + if (info->host_active == STATE_ERROR) 263 + return; 322 264 scsifront_put_rqid(info, id); 323 265 324 - result = ring_rsp->rslt; 325 - if (result >> 24) 326 - set_host_byte(sc, DID_ERROR); 327 - else 328 - set_host_byte(sc, host_byte(result)); 329 - set_status_byte(sc, result & 0xff); 266 + set_host_byte(sc, scsifront_host_byte(ring_rsp->rslt)); 267 + set_status_byte(sc, XEN_VSCSIIF_RSLT_STATUS(ring_rsp->rslt)); 330 268 scsi_set_resid(sc, ring_rsp->residual_len); 331 269 332 270 sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE, ··· 348 290 shadow->wait_reset = 1; 349 291 switch (shadow->rslt_reset) { 350 292 case RSLT_RESET_WAITING: 351 - shadow->rslt_reset = ring_rsp->rslt; 293 + if (ring_rsp->rslt == XEN_VSCSIIF_RSLT_RESET_SUCCESS) 294 + shadow->rslt_reset = SUCCESS; 295 + else 296 + shadow->rslt_reset = FAILED; 352 297 break; 353 298 case RSLT_RESET_ERR: 354 299 kick = _scsifront_put_rqid(info, id); ··· 361 300 scsifront_wake_up(info); 362 301 return; 363 302 default: 364 - shost_printk(KERN_ERR, info->host, KBUILD_MODNAME 365 - "bad reset state %d, possibly leaking %u\n", 366 - shadow->rslt_reset, id); 303 + scsifront_set_error(info, "bad reset state"); 367 304 break; 368 305 } 369 306 spin_unlock_irqrestore(&info->shadow_lock, flags); ··· 372 313 static void scsifront_do_response(struct vscsifrnt_info *info, 373 314 struct vscsiif_response *ring_rsp) 374 315 { 375 - if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS || 376 - test_bit(ring_rsp->rqid, info->shadow_free_bitmap), 377 - "illegal rqid %u returned by backend!\n", ring_rsp->rqid)) 378 - return; 316 + struct vscsifrnt_shadow *shadow; 379 317 380 - if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB) 318 + if (ring_rsp->rqid >= VSCSIIF_MAX_REQS || 319 + !info->shadow[ring_rsp->rqid]->inflight) { 320 + scsifront_set_error(info, "illegal rqid returned by backend!"); 321 + return; 322 + } 323 + shadow = info->shadow[ring_rsp->rqid]; 324 + shadow->inflight = false; 325 + 326 + if (shadow->act == VSCSIIF_ACT_SCSI_CDB) 381 327 scsifront_cdb_cmd_done(info, ring_rsp); 382 328 else 383 329 scsifront_sync_cmd_done(info, ring_rsp); 384 330 } 385 331 386 - static int scsifront_ring_drain(struct vscsifrnt_info *info) 332 + static int scsifront_ring_drain(struct vscsifrnt_info *info, 333 + unsigned int *eoiflag) 387 334 { 388 - struct vscsiif_response *ring_rsp; 335 + struct vscsiif_response ring_rsp; 389 336 RING_IDX i, rp; 390 337 int more_to_do = 0; 391 338 392 - rp = info->ring.sring->rsp_prod; 393 - rmb(); /* ordering required respective to dom0 */ 339 + rp = READ_ONCE(info->ring.sring->rsp_prod); 340 + virt_rmb(); /* ordering required respective to backend */ 341 + if (RING_RESPONSE_PROD_OVERFLOW(&info->ring, rp)) { 342 + scsifront_set_error(info, "illegal number of responses"); 343 + return 0; 344 + } 394 345 for (i = info->ring.rsp_cons; i != rp; i++) { 395 - ring_rsp = RING_GET_RESPONSE(&info->ring, i); 396 - scsifront_do_response(info, ring_rsp); 346 + RING_COPY_RESPONSE(&info->ring, i, &ring_rsp); 347 + scsifront_do_response(info, &ring_rsp); 348 + if (info->host_active == STATE_ERROR) 349 + return 0; 350 + *eoiflag &= ~XEN_EOI_FLAG_SPURIOUS; 397 351 } 398 352 399 353 info->ring.rsp_cons = i; ··· 419 347 return more_to_do; 420 348 } 421 349 422 - static int scsifront_cmd_done(struct vscsifrnt_info *info) 350 + static int scsifront_cmd_done(struct vscsifrnt_info *info, 351 + unsigned int *eoiflag) 423 352 { 424 353 int more_to_do; 425 354 unsigned long flags; 426 355 427 356 spin_lock_irqsave(info->host->host_lock, flags); 428 357 429 - more_to_do = scsifront_ring_drain(info); 358 + more_to_do = scsifront_ring_drain(info, eoiflag); 430 359 431 360 info->wait_ring_available = 0; 432 361 ··· 441 368 static irqreturn_t scsifront_irq_fn(int irq, void *dev_id) 442 369 { 443 370 struct vscsifrnt_info *info = dev_id; 371 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 444 372 445 - while (scsifront_cmd_done(info)) 373 + if (info->host_active == STATE_ERROR) { 374 + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); 375 + return IRQ_HANDLED; 376 + } 377 + 378 + while (scsifront_cmd_done(info, &eoiflag)) 446 379 /* Yield point for this unbounded loop. */ 447 380 cond_resched(); 381 + 382 + xen_irq_lateeoi(irq, eoiflag); 448 383 449 384 return IRQ_HANDLED; 450 385 } 451 386 452 387 static void scsifront_finish_all(struct vscsifrnt_info *info) 453 388 { 454 - unsigned i; 389 + unsigned int i, dummy; 455 390 struct vscsiif_response resp; 456 391 457 - scsifront_ring_drain(info); 392 + scsifront_ring_drain(info, &dummy); 458 393 459 394 for (i = 0; i < VSCSIIF_MAX_REQS; i++) { 460 395 if (test_bit(i, info->shadow_free_bitmap)) ··· 619 538 unsigned long flags; 620 539 int err; 621 540 541 + if (info->host_active == STATE_ERROR) 542 + return SCSI_MLQUEUE_HOST_BUSY; 543 + 622 544 sc->result = 0; 623 545 624 546 shadow->sc = sc; ··· 673 589 struct vscsifrnt_info *info = shost_priv(host); 674 590 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); 675 591 int err = 0; 592 + 593 + if (info->host_active == STATE_ERROR) 594 + return FAILED; 676 595 677 596 shadow = kzalloc(sizeof(*shadow), GFP_NOIO); 678 597 if (!shadow) ··· 748 661 struct vscsifrnt_info *info = shost_priv(sdev->host); 749 662 int err; 750 663 664 + if (info->host_active == STATE_ERROR) 665 + return -EIO; 666 + 751 667 if (info && current == info->curr) { 752 668 err = xenbus_printf(XBT_NIL, info->dev->nodename, 753 669 info->dev_state_path, "%d", XenbusStateConnected); ··· 798 708 { 799 709 struct xenbus_device *dev = info->dev; 800 710 struct vscsiif_sring *sring; 801 - grant_ref_t gref; 802 - int err = -ENOMEM; 711 + int err; 803 712 804 713 /***** Frontend to Backend ring start *****/ 805 - sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL); 806 - if (!sring) { 807 - xenbus_dev_fatal(dev, err, 808 - "fail to allocate shared ring (Front to Back)"); 714 + err = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&sring, 1, 715 + &info->ring_ref); 716 + if (err) 809 717 return err; 810 - } 811 - SHARED_RING_INIT(sring); 812 - FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 813 718 814 - err = xenbus_grant_ring(dev, sring, 1, &gref); 815 - if (err < 0) { 816 - free_page((unsigned long)sring); 817 - xenbus_dev_fatal(dev, err, 818 - "fail to grant shared ring (Front to Back)"); 819 - return err; 820 - } 821 - info->ring_ref = gref; 719 + XEN_FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 822 720 823 721 err = xenbus_alloc_evtchn(dev, &info->evtchn); 824 722 if (err) { ··· 814 736 goto free_gnttab; 815 737 } 816 738 817 - err = bind_evtchn_to_irq(info->evtchn); 739 + err = bind_evtchn_to_irq_lateeoi(info->evtchn); 818 740 if (err <= 0) { 819 741 xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq"); 820 742 goto free_gnttab; ··· 835 757 free_irq: 836 758 unbind_from_irqhandler(info->irq, info); 837 759 free_gnttab: 838 - gnttab_end_foreign_access(info->ring_ref, 839 - (unsigned long)info->ring.sring); 760 + xenbus_teardown_ring((void **)&sring, 1, &info->ring_ref); 840 761 841 762 return err; 842 763 } ··· 843 766 static void scsifront_free_ring(struct vscsifrnt_info *info) 844 767 { 845 768 unbind_from_irqhandler(info->irq, info); 846 - gnttab_end_foreign_access(info->ring_ref, 847 - (unsigned long)info->ring.sring); 769 + xenbus_teardown_ring((void **)&info->ring.sring, 1, &info->ring_ref); 848 770 } 849 771 850 772 static int scsifront_init_ring(struct vscsifrnt_info *info) ··· 942 866 goto free_sring; 943 867 } 944 868 info->host = host; 945 - info->host_active = 1; 869 + info->host_active = STATE_ACTIVE; 946 870 947 871 xenbus_switch_state(dev, XenbusStateInitialised); 948 872 ··· 1010 934 pr_debug("%s: %s removed\n", __func__, dev->nodename); 1011 935 1012 936 mutex_lock(&scsifront_mutex); 1013 - if (info->host_active) { 937 + if (info->host_active != STATE_INACTIVE) { 1014 938 /* Scsi_host not yet removed */ 1015 939 scsi_remove_host(info->host); 1016 - info->host_active = 0; 940 + info->host_active = STATE_INACTIVE; 1017 941 } 1018 942 mutex_unlock(&scsifront_mutex); 1019 943 ··· 1037 961 */ 1038 962 1039 963 mutex_lock(&scsifront_mutex); 1040 - if (info->host_active) { 964 + if (info->host_active != STATE_INACTIVE) { 1041 965 scsi_remove_host(host); 1042 - info->host_active = 0; 966 + info->host_active = STATE_INACTIVE; 1043 967 } 1044 968 mutex_unlock(&scsifront_mutex); 1045 969 ··· 1056 980 unsigned int device_state; 1057 981 unsigned int hst, chn, tgt, lun; 1058 982 struct scsi_device *sdev; 983 + 984 + if (info->host_active == STATE_ERROR) 985 + return; 1059 986 1060 987 dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n); 1061 988 if (IS_ERR(dir))
+16 -49
drivers/usb/host/xen-hcd.c
··· 87 87 bool error; 88 88 }; 89 89 90 - #define GRANT_INVALID_REF 0 91 - 92 90 #define XENHCD_RING_JIFFIES (HZ/200) 93 91 #define XENHCD_SCAN_JIFFIES 1 94 92 ··· 1098 1100 unbind_from_irqhandler(info->irq, info); 1099 1101 info->irq = 0; 1100 1102 1101 - if (info->urb_ring_ref != GRANT_INVALID_REF) { 1102 - gnttab_end_foreign_access(info->urb_ring_ref, 1103 - (unsigned long)info->urb_ring.sring); 1104 - info->urb_ring_ref = GRANT_INVALID_REF; 1105 - } 1106 - info->urb_ring.sring = NULL; 1107 - 1108 - if (info->conn_ring_ref != GRANT_INVALID_REF) { 1109 - gnttab_end_foreign_access(info->conn_ring_ref, 1110 - (unsigned long)info->conn_ring.sring); 1111 - info->conn_ring_ref = GRANT_INVALID_REF; 1112 - } 1113 - info->conn_ring.sring = NULL; 1103 + xenbus_teardown_ring((void **)&info->urb_ring.sring, 1, 1104 + &info->urb_ring_ref); 1105 + xenbus_teardown_ring((void **)&info->conn_ring.sring, 1, 1106 + &info->conn_ring_ref); 1114 1107 } 1115 1108 1116 1109 static int xenhcd_setup_rings(struct xenbus_device *dev, ··· 1109 1120 { 1110 1121 struct xenusb_urb_sring *urb_sring; 1111 1122 struct xenusb_conn_sring *conn_sring; 1112 - grant_ref_t gref; 1113 1123 int err; 1114 1124 1115 - info->urb_ring_ref = GRANT_INVALID_REF; 1116 - info->conn_ring_ref = GRANT_INVALID_REF; 1117 - 1118 - urb_sring = (struct xenusb_urb_sring *)get_zeroed_page( 1119 - GFP_NOIO | __GFP_HIGH); 1120 - if (!urb_sring) { 1121 - xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring"); 1122 - return -ENOMEM; 1125 + info->conn_ring_ref = INVALID_GRANT_REF; 1126 + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, 1127 + (void **)&urb_sring, 1, &info->urb_ring_ref); 1128 + if (err) { 1129 + xenbus_dev_fatal(dev, err, "allocating urb ring"); 1130 + return err; 1123 1131 } 1124 - SHARED_RING_INIT(urb_sring); 1125 - FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE); 1132 + XEN_FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE); 1126 1133 1127 - err = xenbus_grant_ring(dev, urb_sring, 1, &gref); 1128 - if (err < 0) { 1129 - free_page((unsigned long)urb_sring); 1130 - info->urb_ring.sring = NULL; 1134 + err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, 1135 + (void **)&conn_sring, 1, &info->conn_ring_ref); 1136 + if (err) { 1137 + xenbus_dev_fatal(dev, err, "allocating conn ring"); 1131 1138 goto fail; 1132 1139 } 1133 - info->urb_ring_ref = gref; 1134 - 1135 - conn_sring = (struct xenusb_conn_sring *)get_zeroed_page( 1136 - GFP_NOIO | __GFP_HIGH); 1137 - if (!conn_sring) { 1138 - xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring"); 1139 - err = -ENOMEM; 1140 - goto fail; 1141 - } 1142 - SHARED_RING_INIT(conn_sring); 1143 - FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE); 1144 - 1145 - err = xenbus_grant_ring(dev, conn_sring, 1, &gref); 1146 - if (err < 0) { 1147 - free_page((unsigned long)conn_sring); 1148 - info->conn_ring.sring = NULL; 1149 - goto fail; 1150 - } 1151 - info->conn_ring_ref = gref; 1140 + XEN_FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE); 1152 1141 1153 1142 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1154 1143 if (err) {
+2 -11
drivers/xen/gntdev-dmabuf.c
··· 24 24 25 25 MODULE_IMPORT_NS(DMA_BUF); 26 26 27 - #ifndef GRANT_INVALID_REF 28 - /* 29 - * Note on usage of grant reference 0 as invalid grant reference: 30 - * grant reference 0 is valid, but never exposed to a driver, 31 - * because of the fact it is already in use/reserved by the PV console. 32 - */ 33 - #define GRANT_INVALID_REF 0 34 - #endif 35 - 36 27 struct gntdev_dmabuf { 37 28 struct gntdev_dmabuf_priv *priv; 38 29 struct dma_buf *dmabuf; ··· 523 532 int i; 524 533 525 534 for (i = 0; i < count; i++) 526 - if (refs[i] != GRANT_INVALID_REF) 535 + if (refs[i] != INVALID_GRANT_REF) 527 536 gnttab_end_foreign_access(refs[i], 0UL); 528 537 } 529 538 ··· 558 567 gntdev_dmabuf->nr_pages = count; 559 568 560 569 for (i = 0; i < count; i++) 561 - gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF; 570 + gntdev_dmabuf->u.imp.refs[i] = INVALID_GRANT_REF; 562 571 563 572 return gntdev_dmabuf; 564 573
+7 -5
drivers/xen/grant-table.c
··· 66 66 67 67 #include <asm/sync_bitops.h> 68 68 69 - /* External tools reserve first few grant table entries. */ 70 - #define NR_RESERVED_ENTRIES 8 71 69 #define GNTTAB_LIST_END 0xffffffff 72 70 73 71 static grant_ref_t **gnttab_list; ··· 207 209 static void put_free_entry(grant_ref_t ref) 208 210 { 209 211 unsigned long flags; 212 + 213 + if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES)) 214 + return; 215 + 210 216 spin_lock_irqsave(&gnttab_list_lock, flags); 211 217 gnttab_entry(ref) = gnttab_free_head; 212 218 gnttab_free_head = ref; ··· 1467 1465 nr_init_grefs = nr_grant_frames * 1468 1466 gnttab_interface->grefs_per_grant_frame; 1469 1467 1470 - for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1468 + for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1471 1469 gnttab_entry(i) = i + 1; 1472 1470 1473 1471 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; 1474 - gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; 1475 - gnttab_free_head = NR_RESERVED_ENTRIES; 1472 + gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES; 1473 + gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES; 1476 1474 1477 1475 printk("Grant table initialized\n"); 1478 1476 return 0;
+5 -13
drivers/xen/xen-front-pgdir-shbuf.c
··· 21 21 22 22 #include <xen/xen-front-pgdir-shbuf.h> 23 23 24 - #ifndef GRANT_INVALID_REF 25 - /* 26 - * FIXME: usage of grant reference 0 as invalid grant reference: 27 - * grant reference 0 is valid, but never exposed to a PV driver, 28 - * because of the fact it is already in use/reserved by the PV console. 29 - */ 30 - #define GRANT_INVALID_REF 0 31 - #endif 32 - 33 24 /** 34 25 * This structure represents the structure of a shared page 35 26 * that contains grant references to the pages of the shared ··· 29 38 */ 30 39 struct xen_page_directory { 31 40 grant_ref_t gref_dir_next_page; 41 + #define XEN_GREF_LIST_END 0 32 42 grant_ref_t gref[1]; /* Variable length */ 33 43 }; 34 44 ··· 75 83 xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf) 76 84 { 77 85 if (!buf->grefs) 78 - return GRANT_INVALID_REF; 86 + return INVALID_GRANT_REF; 79 87 80 88 return buf->grefs[0]; 81 89 } ··· 134 142 int i; 135 143 136 144 for (i = 0; i < buf->num_grefs; i++) 137 - if (buf->grefs[i] != GRANT_INVALID_REF) 145 + if (buf->grefs[i] != INVALID_GRANT_REF) 138 146 gnttab_end_foreign_access(buf->grefs[i], 0UL); 139 147 } 140 148 kfree(buf->grefs); ··· 347 355 } 348 356 /* Last page must say there is no more pages. */ 349 357 page_dir = (struct xen_page_directory *)ptr; 350 - page_dir->gref_dir_next_page = GRANT_INVALID_REF; 358 + page_dir->gref_dir_next_page = XEN_GREF_LIST_END; 351 359 } 352 360 353 361 /** ··· 376 384 377 385 if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) { 378 386 to_copy = grefs_left; 379 - page_dir->gref_dir_next_page = GRANT_INVALID_REF; 387 + page_dir->gref_dir_next_page = XEN_GREF_LIST_END; 380 388 } else { 381 389 to_copy = XEN_NUM_GREFS_PER_PAGE; 382 390 page_dir->gref_dir_next_page = buf->grefs[i + 1];
+79 -3
drivers/xen/xen-scsiback.c
··· 280 280 kfree(entry); 281 281 } 282 282 283 + static int32_t scsiback_result(int32_t result) 284 + { 285 + int32_t host_status; 286 + 287 + switch (XEN_VSCSIIF_RSLT_HOST(result)) { 288 + case DID_OK: 289 + host_status = XEN_VSCSIIF_RSLT_HOST_OK; 290 + break; 291 + case DID_NO_CONNECT: 292 + host_status = XEN_VSCSIIF_RSLT_HOST_NO_CONNECT; 293 + break; 294 + case DID_BUS_BUSY: 295 + host_status = XEN_VSCSIIF_RSLT_HOST_BUS_BUSY; 296 + break; 297 + case DID_TIME_OUT: 298 + host_status = XEN_VSCSIIF_RSLT_HOST_TIME_OUT; 299 + break; 300 + case DID_BAD_TARGET: 301 + host_status = XEN_VSCSIIF_RSLT_HOST_BAD_TARGET; 302 + break; 303 + case DID_ABORT: 304 + host_status = XEN_VSCSIIF_RSLT_HOST_ABORT; 305 + break; 306 + case DID_PARITY: 307 + host_status = XEN_VSCSIIF_RSLT_HOST_PARITY; 308 + break; 309 + case DID_ERROR: 310 + host_status = XEN_VSCSIIF_RSLT_HOST_ERROR; 311 + break; 312 + case DID_RESET: 313 + host_status = XEN_VSCSIIF_RSLT_HOST_RESET; 314 + break; 315 + case DID_BAD_INTR: 316 + host_status = XEN_VSCSIIF_RSLT_HOST_BAD_INTR; 317 + break; 318 + case DID_PASSTHROUGH: 319 + host_status = XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH; 320 + break; 321 + case DID_SOFT_ERROR: 322 + host_status = XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR; 323 + break; 324 + case DID_IMM_RETRY: 325 + host_status = XEN_VSCSIIF_RSLT_HOST_IMM_RETRY; 326 + break; 327 + case DID_REQUEUE: 328 + host_status = XEN_VSCSIIF_RSLT_HOST_REQUEUE; 329 + break; 330 + case DID_TRANSPORT_DISRUPTED: 331 + host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED; 332 + break; 333 + case DID_TRANSPORT_FAILFAST: 334 + host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST; 335 + break; 336 + case DID_TARGET_FAILURE: 337 + host_status = XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE; 338 + break; 339 + case DID_NEXUS_FAILURE: 340 + host_status = XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE; 341 + break; 342 + case DID_ALLOC_FAILURE: 343 + host_status = XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE; 344 + break; 345 + case DID_MEDIUM_ERROR: 346 + host_status = XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR; 347 + break; 348 + case DID_TRANSPORT_MARGINAL: 349 + host_status = XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL; 350 + break; 351 + default: 352 + host_status = XEN_VSCSIIF_RSLT_HOST_ERROR; 353 + break; 354 + } 355 + 356 + return (host_status << 16) | (result & 0x00ffff); 357 + } 358 + 283 359 static void scsiback_send_response(struct vscsibk_info *info, 284 360 char *sense_buffer, int32_t result, uint32_t resid, 285 361 uint16_t rqid) ··· 371 295 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt); 372 296 info->ring.rsp_prod_pvt++; 373 297 374 - ring_res->rslt = result; 298 + ring_res->rslt = scsiback_result(result); 375 299 ring_res->rqid = rqid; 376 300 377 301 if (sense_buffer != NULL && ··· 631 555 struct scsiback_nexus *nexus = tpg->tpg_nexus; 632 556 struct se_cmd *se_cmd = &pending_req->se_cmd; 633 557 u64 unpacked_lun = pending_req->v2p->lun; 634 - int rc, err = FAILED; 558 + int rc, err = XEN_VSCSIIF_RSLT_RESET_FAILED; 635 559 636 560 init_completion(&pending_req->tmr_done); 637 561 ··· 645 569 wait_for_completion(&pending_req->tmr_done); 646 570 647 571 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? 648 - SUCCESS : FAILED; 572 + XEN_VSCSIIF_RSLT_RESET_SUCCESS : XEN_VSCSIIF_RSLT_RESET_FAILED; 649 573 650 574 scsiback_do_resp_with_sense(NULL, err, 0, pending_req); 651 575 transport_generic_free_cmd(&pending_req->se_cmd, 0);
+63 -21
drivers/xen/xenbus/xenbus_client.c
··· 363 363 __xenbus_switch_state(dev, XenbusStateClosing, 1); 364 364 } 365 365 366 - /** 367 - * xenbus_grant_ring 366 + /* 367 + * xenbus_setup_ring 368 368 * @dev: xenbus device 369 - * @vaddr: starting virtual address of the ring 369 + * @vaddr: pointer to starting virtual address of the ring 370 370 * @nr_pages: number of pages to be granted 371 371 * @grefs: grant reference array to be filled in 372 372 * 373 - * Grant access to the given @vaddr to the peer of the given device. 374 - * Then fill in @grefs with grant references. Return 0 on success, or 375 - * -errno on error. On error, the device will switch to 376 - * XenbusStateClosing, and the error will be saved in the store. 373 + * Allocate physically contiguous pages for a shared ring buffer and grant it 374 + * to the peer of the given device. The ring buffer is initially filled with 375 + * zeroes. The virtual address of the ring is stored at @vaddr and the 376 + * grant references are stored in the @grefs array. In case of error @vaddr 377 + * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF. 377 378 */ 378 - int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, 379 + int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, 379 380 unsigned int nr_pages, grant_ref_t *grefs) 380 381 { 381 - int err; 382 - unsigned int i; 382 + unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; 383 383 grant_ref_t gref_head; 384 + unsigned int i; 385 + int ret; 384 386 385 - err = gnttab_alloc_grant_references(nr_pages, &gref_head); 386 - if (err) { 387 - xenbus_dev_fatal(dev, err, "granting access to ring page"); 388 - return err; 387 + *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); 388 + if (!*vaddr) { 389 + ret = -ENOMEM; 390 + goto err; 391 + } 392 + 393 + ret = gnttab_alloc_grant_references(nr_pages, &gref_head); 394 + if (ret) { 395 + xenbus_dev_fatal(dev, ret, "granting access to %u ring pages", 396 + nr_pages); 397 + goto err; 389 398 } 390 399 391 400 for (i = 0; i < nr_pages; i++) { 392 401 unsigned long gfn; 393 402 394 - if (is_vmalloc_addr(vaddr)) 395 - gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); 403 + if (is_vmalloc_addr(*vaddr)) 404 + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr[i])); 396 405 else 397 - gfn = virt_to_gfn(vaddr); 406 + gfn = virt_to_gfn(vaddr[i]); 398 407 399 408 grefs[i] = gnttab_claim_grant_reference(&gref_head); 400 409 gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, 401 410 gfn, 0); 402 - 403 - vaddr = vaddr + XEN_PAGE_SIZE; 404 411 } 405 412 406 413 return 0; 407 - } 408 - EXPORT_SYMBOL_GPL(xenbus_grant_ring); 409 414 415 + err: 416 + if (*vaddr) 417 + free_pages_exact(*vaddr, ring_size); 418 + for (i = 0; i < nr_pages; i++) 419 + grefs[i] = INVALID_GRANT_REF; 420 + *vaddr = NULL; 421 + 422 + return ret; 423 + } 424 + EXPORT_SYMBOL_GPL(xenbus_setup_ring); 425 + 426 + /* 427 + * xenbus_teardown_ring 428 + * @vaddr: starting virtual address of the ring 429 + * @nr_pages: number of pages 430 + * @grefs: grant reference array 431 + * 432 + * Remove grants for the shared ring buffer and free the associated memory. 433 + * On return the grant reference array is filled with INVALID_GRANT_REF. 434 + */ 435 + void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages, 436 + grant_ref_t *grefs) 437 + { 438 + unsigned int i; 439 + 440 + for (i = 0; i < nr_pages; i++) { 441 + if (grefs[i] != INVALID_GRANT_REF) { 442 + gnttab_end_foreign_access(grefs[i], 0); 443 + grefs[i] = INVALID_GRANT_REF; 444 + } 445 + } 446 + 447 + if (*vaddr) 448 + free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE); 449 + *vaddr = NULL; 450 + } 451 + EXPORT_SYMBOL_GPL(xenbus_teardown_ring); 410 452 411 453 /** 412 454 * Allocate an event channel for the given xenbus_device, assigning the newly
+71 -20
drivers/xen/xenbus/xenbus_probe.c
··· 65 65 #include "xenbus.h" 66 66 67 67 68 + static int xs_init_irq; 68 69 int xen_store_evtchn; 69 70 EXPORT_SYMBOL_GPL(xen_store_evtchn); 70 71 ··· 751 750 { 752 751 xenstored_ready = 1; 753 752 753 + if (!xen_store_interface) { 754 + xen_store_interface = xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, 755 + XEN_PAGE_SIZE); 756 + /* 757 + * Now it is safe to free the IRQ used for xenstore late 758 + * initialization. No need to unbind: it is about to be 759 + * bound again from xb_init_comms. Note that calling 760 + * unbind_from_irqhandler now would result in xen_evtchn_close() 761 + * being called and the event channel not being enabled again 762 + * afterwards, resulting in missed event notifications. 763 + */ 764 + free_irq(xs_init_irq, &xb_waitq); 765 + } 766 + 754 767 /* 755 768 * In the HVM case, xenbus_init() deferred its call to 756 769 * xs_init() in case callbacks were not operational yet. ··· 813 798 { 814 799 /* 815 800 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we 816 - * need to wait for the platform PCI device to come up. 801 + * need to wait for the platform PCI device to come up or 802 + * xen_store_interface is not ready. 817 803 */ 818 804 if (xen_store_domain_type == XS_PV || 819 805 (xen_store_domain_type == XS_HVM && 820 - !xs_hvm_defer_init_for_callback())) 806 + !xs_hvm_defer_init_for_callback() && 807 + xen_store_interface != NULL)) 821 808 xenbus_probe(); 822 809 823 810 /* 824 - * For XS_LOCAL, spawn a thread which will wait for xenstored 825 - * or a xenstore-stubdom to be started, then probe. It will be 826 - * triggered when communication starts happening, by waiting 827 - * on xb_waitq. 811 + * For XS_LOCAL or when xen_store_interface is not ready, spawn a 812 + * thread which will wait for xenstored or a xenstore-stubdom to be 813 + * started, then probe. It will be triggered when communication 814 + * starts happening, by waiting on xb_waitq. 828 815 */ 829 - if (xen_store_domain_type == XS_LOCAL) { 816 + if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) { 830 817 struct task_struct *probe_task; 831 818 832 819 probe_task = kthread_run(xenbus_probe_thread, NULL, ··· 924 907 .notifier_call = xenbus_resume_cb, 925 908 }; 926 909 910 + static irqreturn_t xenbus_late_init(int irq, void *unused) 911 + { 912 + int err; 913 + uint64_t v = 0; 914 + 915 + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); 916 + if (err || !v || !~v) 917 + return IRQ_HANDLED; 918 + xen_store_gfn = (unsigned long)v; 919 + 920 + wake_up(&xb_waitq); 921 + return IRQ_HANDLED; 922 + } 923 + 927 924 static int __init xenbus_init(void) 928 925 { 929 926 int err; 930 927 uint64_t v = 0; 928 + bool wait = false; 931 929 xen_store_domain_type = XS_UNKNOWN; 932 930 933 931 if (!xen_domain()) ··· 989 957 * been properly initialized. Instead of attempting to map a 990 958 * wrong guest physical address return error. 991 959 * 992 - * Also recognize all bits set as an invalid value. 960 + * Also recognize all bits set as an invalid/uninitialized value. 993 961 */ 994 - if (!v || !~v) { 962 + if (!v) { 995 963 err = -ENOENT; 996 964 goto out_error; 997 965 } 998 - /* Avoid truncation on 32-bit. */ 966 + if (v == ~0ULL) { 967 + wait = true; 968 + } else { 969 + /* Avoid truncation on 32-bit. */ 999 970 #if BITS_PER_LONG == 32 1000 - if (v > ULONG_MAX) { 1001 - pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n", 1002 - __func__, v); 1003 - err = -EINVAL; 1004 - goto out_error; 1005 - } 971 + if (v > ULONG_MAX) { 972 + pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n", 973 + __func__, v); 974 + err = -EINVAL; 975 + goto out_error; 976 + } 1006 977 #endif 1007 - xen_store_gfn = (unsigned long)v; 1008 - xen_store_interface = 1009 - xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, 1010 - XEN_PAGE_SIZE); 978 + xen_store_gfn = (unsigned long)v; 979 + xen_store_interface = 980 + xen_remap(xen_store_gfn << XEN_PAGE_SHIFT, 981 + XEN_PAGE_SIZE); 982 + if (xen_store_interface->connection != XENSTORE_CONNECTED) 983 + wait = true; 984 + } 985 + if (wait) { 986 + err = bind_evtchn_to_irqhandler(xen_store_evtchn, 987 + xenbus_late_init, 988 + 0, "xenstore_late_init", 989 + &xb_waitq); 990 + if (err < 0) { 991 + pr_err("xenstore_late_init couldn't bind irq err=%d\n", 992 + err); 993 + return err; 994 + } 995 + 996 + xs_init_irq = err; 997 + } 1011 998 break; 1012 999 default: 1013 1000 pr_warn("Xenstore state unknown\n");
-2
include/xen/grant_table.h
··· 57 57 #define INVALID_GRANT_REF ((grant_ref_t)-1) 58 58 #define INVALID_GRANT_HANDLE ((grant_handle_t)-1) 59 59 60 - #define GNTTAB_RESERVED_XENSTORE 1 61 - 62 60 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ 63 61 #define NR_GRANT_FRAMES 4 64 62
+100 -61
include/xen/interface/grant_table.h
··· 19 19 20 20 /* Some rough guidelines on accessing and updating grant-table entries 21 21 * in a concurrency-safe manner. For more information, Linux contains a 22 - * reference implementation for guest OSes (arch/xen/kernel/grant_table.c). 22 + * reference implementation for guest OSes (drivers/xen/grant_table.c, see 23 + * http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=drivers/xen/grant-table.c;hb=HEAD 23 24 * 24 25 * NB. WMB is a no-op on current-generation x86 processors. However, a 25 26 * compiler barrier will still be required. ··· 81 80 */ 82 81 83 82 /* 84 - * Version 1 of the grant table entry structure is maintained purely 85 - * for backwards compatibility. New guests should use version 2. 83 + * Version 1 of the grant table entry structure is maintained largely for 84 + * backwards compatibility. New guests are recommended to support using 85 + * version 2 to overcome version 1 limitations, but to default to version 1. 86 86 */ 87 87 struct grant_entry_v1 { 88 88 /* GTF_xxx: various type and flag information. [XEN,GST] */ ··· 91 89 /* The domain being granted foreign privileges. [GST] */ 92 90 domid_t domid; 93 91 /* 94 - * GTF_permit_access: Frame that @domid is allowed to map and access. [GST] 95 - * GTF_accept_transfer: Frame whose ownership transferred by @domid. [XEN] 92 + * GTF_permit_access: GFN that @domid is allowed to map and access. [GST] 93 + * GTF_accept_transfer: GFN that @domid is allowed to transfer into. [GST] 94 + * GTF_transfer_completed: MFN whose ownership transferred by @domid 95 + * (non-translated guests only). [XEN] 96 96 */ 97 97 uint32_t frame; 98 98 }; 99 + 100 + /* The first few grant table entries will be preserved across grant table 101 + * version changes and may be pre-populated at domain creation by tools. 102 + */ 103 + #define GNTTAB_NR_RESERVED_ENTRIES 8 104 + #define GNTTAB_RESERVED_CONSOLE 0 105 + #define GNTTAB_RESERVED_XENSTORE 1 99 106 100 107 /* 101 108 * Type of grant entry. ··· 122 111 #define GTF_type_mask (3U<<0) 123 112 124 113 /* 125 - * Subflags for GTF_permit_access. 114 + * Subflags for GTF_permit_access and GTF_transitive. 126 115 * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] 127 116 * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] 128 117 * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] 118 + * Further subflags for GTF_permit_access only. 119 + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for 120 + * mappings of the grant [GST] 129 121 * GTF_sub_page: Grant access to only a subrange of the page. @domid 130 122 * will only be allowed to copy from the grant, and not 131 123 * map it. [GST] ··· 139 125 #define GTF_reading (1U<<_GTF_reading) 140 126 #define _GTF_writing (4) 141 127 #define GTF_writing (1U<<_GTF_writing) 128 + #define _GTF_PWT (5) 129 + #define GTF_PWT (1U<<_GTF_PWT) 130 + #define _GTF_PCD (6) 131 + #define GTF_PCD (1U<<_GTF_PCD) 132 + #define _GTF_PAT (7) 133 + #define GTF_PAT (1U<<_GTF_PAT) 142 134 #define _GTF_sub_page (8) 143 135 #define GTF_sub_page (1U<<_GTF_sub_page) 144 136 ··· 184 164 }; 185 165 186 166 /* 187 - * Version 2 of the grant entry structure, here is a union because three 188 - * different types are suppotted: full_page, sub_page and transitive. 167 + * Version 2 of the grant entry structure. 189 168 */ 190 169 union grant_entry_v2 { 191 170 struct grant_entry_header hdr; ··· 199 180 * field of the same name in the V1 entry structure. 200 181 */ 201 182 struct { 202 - struct grant_entry_header hdr; 203 - uint32_t pad0; 204 - uint64_t frame; 183 + struct grant_entry_header hdr; 184 + uint32_t pad0; 185 + uint64_t frame; 205 186 } full_page; 206 187 207 188 /* ··· 210 191 * in frame @frame. 211 192 */ 212 193 struct { 213 - struct grant_entry_header hdr; 214 - uint16_t page_off; 215 - uint16_t length; 216 - uint64_t frame; 194 + struct grant_entry_header hdr; 195 + uint16_t page_off; 196 + uint16_t length; 197 + uint64_t frame; 217 198 } sub_page; 218 199 219 200 /* ··· 221 202 * grant @gref in domain @trans_domid, as if it was the local 222 203 * domain. Obviously, the transitive access must be compatible 223 204 * with the original grant. 205 + * 206 + * The current version of Xen does not allow transitive grants 207 + * to be mapped. 224 208 */ 225 209 struct { 226 - struct grant_entry_header hdr; 227 - domid_t trans_domid; 228 - uint16_t pad0; 229 - grant_ref_t gref; 210 + struct grant_entry_header hdr; 211 + domid_t trans_domid; 212 + uint16_t pad0; 213 + grant_ref_t gref; 230 214 } transitive; 231 215 232 216 uint32_t __spacer[4]; /* Pad to a power of two */ ··· 241 219 * GRANT TABLE QUERIES AND USES 242 220 */ 243 221 222 + #define GNTTABOP_map_grant_ref 0 223 + #define GNTTABOP_unmap_grant_ref 1 224 + #define GNTTABOP_setup_table 2 225 + #define GNTTABOP_dump_table 3 226 + #define GNTTABOP_transfer 4 227 + #define GNTTABOP_copy 5 228 + #define GNTTABOP_query_size 6 229 + #define GNTTABOP_unmap_and_replace 7 230 + #define GNTTABOP_set_version 8 231 + #define GNTTABOP_get_status_frames 9 232 + #define GNTTABOP_get_version 10 233 + #define GNTTABOP_swap_grant_ref 11 234 + #define GNTTABOP_cache_flush 12 235 + /* ` } */ 236 + 244 237 /* 245 238 * Handle to track a mapping created via a grant reference. 246 239 */ ··· 264 227 /* 265 228 * GNTTABOP_map_grant_ref: Map the grant entry (<dom>,<ref>) for access 266 229 * by devices and/or host CPUs. If successful, <handle> is a tracking number 267 - * that must be presented later to destroy the mapping(s). On error, <handle> 230 + * that must be presented later to destroy the mapping(s). On error, <status> 268 231 * is a negative status code. 269 232 * NOTES: 270 233 * 1. If GNTMAP_device_map is specified then <dev_bus_addr> is the address ··· 278 241 * host mapping is destroyed by other means then it is *NOT* guaranteed 279 242 * to be accounted to the correct grant reference! 280 243 */ 281 - #define GNTTABOP_map_grant_ref 0 282 244 struct gnttab_map_grant_ref { 283 245 /* IN parameters. */ 284 246 uint64_t host_addr; ··· 302 266 * 3. After executing a batch of unmaps, it is guaranteed that no stale 303 267 * mappings will remain in the device or host TLBs. 304 268 */ 305 - #define GNTTABOP_unmap_grant_ref 1 306 269 struct gnttab_unmap_grant_ref { 307 270 /* IN parameters. */ 308 271 uint64_t host_addr; ··· 321 286 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. 322 287 * 3. Xen may not support more than a single grant-table page per domain. 323 288 */ 324 - #define GNTTABOP_setup_table 2 325 289 struct gnttab_setup_table { 326 290 /* IN parameters. */ 327 291 domid_t dom; ··· 335 301 * GNTTABOP_dump_table: Dump the contents of the grant table to the 336 302 * xen console. Debugging use only. 337 303 */ 338 - #define GNTTABOP_dump_table 3 339 304 struct gnttab_dump_table { 340 305 /* IN parameters. */ 341 306 domid_t dom; ··· 344 311 DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table); 345 312 346 313 /* 347 - * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The 348 - * foreign domain has previously registered its interest in the transfer via 349 - * <domid, ref>. 314 + * GNTTABOP_transfer: Transfer <frame> to a foreign domain. The foreign domain 315 + * has previously registered its interest in the transfer via <domid, ref>. 350 316 * 351 317 * Note that, even if the transfer fails, the specified page no longer belongs 352 318 * to the calling domain *unless* the error is GNTST_bad_page. 319 + * 320 + * Note further that only PV guests can use this operation. 353 321 */ 354 - #define GNTTABOP_transfer 4 355 322 struct gnttab_transfer { 356 323 /* IN parameters. */ 357 - xen_pfn_t mfn; 324 + xen_pfn_t mfn; 358 325 domid_t domid; 359 326 grant_ref_t ref; 360 327 /* OUT parameters. */ ··· 385 352 #define _GNTCOPY_dest_gref (1) 386 353 #define GNTCOPY_dest_gref (1<<_GNTCOPY_dest_gref) 387 354 388 - #define GNTTABOP_copy 5 389 355 struct gnttab_copy { 390 - /* IN parameters. */ 391 - struct { 392 - union { 393 - grant_ref_t ref; 394 - xen_pfn_t gmfn; 395 - } u; 396 - domid_t domid; 397 - uint16_t offset; 398 - } source, dest; 399 - uint16_t len; 400 - uint16_t flags; /* GNTCOPY_* */ 401 - /* OUT parameters. */ 402 - int16_t status; 356 + /* IN parameters. */ 357 + struct gnttab_copy_ptr { 358 + union { 359 + grant_ref_t ref; 360 + xen_pfn_t gmfn; 361 + } u; 362 + domid_t domid; 363 + uint16_t offset; 364 + } source, dest; 365 + uint16_t len; 366 + uint16_t flags; /* GNTCOPY_* */ 367 + /* OUT parameters. */ 368 + int16_t status; 403 369 }; 404 370 DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy); 405 371 ··· 409 377 * 1. <dom> may be specified as DOMID_SELF. 410 378 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. 411 379 */ 412 - #define GNTTABOP_query_size 6 413 380 struct gnttab_query_size { 414 381 /* IN parameters. */ 415 382 domid_t dom; ··· 430 399 * 2. After executing a batch of unmaps, it is guaranteed that no stale 431 400 * mappings will remain in the device or host TLBs. 432 401 */ 433 - #define GNTTABOP_unmap_and_replace 7 434 402 struct gnttab_unmap_and_replace { 435 403 /* IN parameters. */ 436 404 uint64_t host_addr; ··· 442 412 443 413 /* 444 414 * GNTTABOP_set_version: Request a particular version of the grant 445 - * table shared table structure. This operation can only be performed 446 - * once in any given domain. It must be performed before any grants 447 - * are activated; otherwise, the domain will be stuck with version 1. 448 - * The only defined versions are 1 and 2. 415 + * table shared table structure. This operation may be used to toggle 416 + * between different versions, but must be performed while no grants 417 + * are active. The only defined versions are 1 and 2. 449 418 */ 450 - #define GNTTABOP_set_version 8 451 419 struct gnttab_set_version { 452 - /* IN parameters */ 420 + /* IN/OUT parameters */ 453 421 uint32_t version; 454 422 }; 455 423 DEFINE_GUEST_HANDLE_STRUCT(gnttab_set_version); ··· 464 436 * 1. <dom> may be specified as DOMID_SELF. 465 437 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF. 466 438 */ 467 - #define GNTTABOP_get_status_frames 9 468 439 struct gnttab_get_status_frames { 469 440 /* IN parameters. */ 470 441 uint32_t nr_frames; ··· 478 451 * GNTTABOP_get_version: Get the grant table version which is in 479 452 * effect for domain <dom>. 480 453 */ 481 - #define GNTTABOP_get_version 10 482 454 struct gnttab_get_version { 483 455 /* IN parameters */ 484 456 domid_t dom; ··· 488 462 DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); 489 463 490 464 /* 465 + * GNTTABOP_swap_grant_ref: Swap the contents of two grant entries. 466 + */ 467 + struct gnttab_swap_grant_ref { 468 + /* IN parameters */ 469 + grant_ref_t ref_a; 470 + grant_ref_t ref_b; 471 + /* OUT parameters */ 472 + int16_t status; /* GNTST_* */ 473 + }; 474 + DEFINE_GUEST_HANDLE_STRUCT(gnttab_swap_grant_ref); 475 + 476 + /* 491 477 * Issue one or more cache maintenance operations on a portion of a 492 478 * page granted to the calling domain by a foreign domain. 493 479 */ 494 - #define GNTTABOP_cache_flush 12 495 480 struct gnttab_cache_flush { 496 481 union { 497 482 uint64_t dev_bus_addr; 498 483 grant_ref_t ref; 499 484 } a; 500 - uint16_t offset; /* offset from start of grant */ 501 - uint16_t length; /* size within the grant */ 502 - #define GNTTAB_CACHE_CLEAN (1<<0) 503 - #define GNTTAB_CACHE_INVAL (1<<1) 504 - #define GNTTAB_CACHE_SOURCE_GREF (1<<31) 485 + uint16_t offset; /* offset from start of grant */ 486 + uint16_t length; /* size within the grant */ 487 + #define GNTTAB_CACHE_CLEAN (1u<<0) 488 + #define GNTTAB_CACHE_INVAL (1u<<1) 489 + #define GNTTAB_CACHE_SOURCE_GREF (1u<<31) 505 490 uint32_t op; 506 491 }; 507 492 DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush); 508 493 509 494 /* 510 - * Bitfield values for update_pin_status.flags. 495 + * Bitfield values for gnttab_map_grant_ref.flags. 511 496 */ 512 497 /* Map the grant entry for access by I/O devices. */ 513 498 #define _GNTMAP_device_map (0) ··· 568 531 #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ 569 532 #define GNTST_address_too_big (-11) /* transfer page address too large. */ 570 533 #define GNTST_eagain (-12) /* Operation not done; try again. */ 534 + #define GNTST_no_space (-13) /* Out of space (handles etc). */ 571 535 572 536 #define GNTTABOP_error_msgs { \ 573 537 "okay", \ ··· 583 545 "bad page", \ 584 546 "copy arguments cross page boundary", \ 585 547 "page address size too large", \ 586 - "operation not done; try again" \ 548 + "operation not done; try again", \ 549 + "out of space", \ 587 550 } 588 551 589 552 #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */
+14 -5
include/xen/interface/io/ring.h
··· 72 72 * of the shared memory area (PAGE_SIZE, for instance). To initialise 73 73 * the front half: 74 74 * 75 - * mytag_front_ring_t front_ring; 76 - * SHARED_RING_INIT((mytag_sring_t *)shared_page); 77 - * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 75 + * mytag_front_ring_t ring; 76 + * XEN_FRONT_RING_INIT(&ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 78 77 * 79 78 * Initializing the back follows similarly (note that only the front 80 79 * initializes the shared ring): ··· 145 146 146 147 #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) 147 148 149 + #define XEN_FRONT_RING_INIT(r, s, size) do { \ 150 + SHARED_RING_INIT(s); \ 151 + FRONT_RING_INIT(r, s, size); \ 152 + } while (0) 153 + 148 154 #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ 149 155 (_r)->rsp_prod_pvt = (_i); \ 150 156 (_r)->req_cons = (_i); \ ··· 174 170 (RING_FREE_REQUESTS(_r) == 0) 175 171 176 172 /* Test if there are outstanding messages to be processed on a ring. */ 177 - #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 173 + #define XEN_RING_NR_UNCONSUMED_RESPONSES(_r) \ 178 174 ((_r)->sring->rsp_prod - (_r)->rsp_cons) 179 175 180 - #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ 176 + #define XEN_RING_NR_UNCONSUMED_REQUESTS(_r) ({ \ 181 177 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ 182 178 unsigned int rsp = RING_SIZE(_r) - \ 183 179 ((_r)->req_cons - (_r)->rsp_prod_pvt); \ 184 180 req < rsp ? req : rsp; \ 185 181 }) 182 + 183 + #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 184 + (!!XEN_RING_NR_UNCONSUMED_RESPONSES(_r)) 185 + #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ 186 + (!!XEN_RING_NR_UNCONSUMED_REQUESTS(_r)) 186 187 187 188 /* Direct access to individual ring elements, by index. */ 188 189 #define RING_GET_REQUEST(_r, _idx) \
+129 -4
include/xen/interface/io/vscsiif.h
··· 43 43 * 44 44 * A string specifying the backend device: either a 4-tuple "h:c:t:l" 45 45 * (host, controller, target, lun, all integers), or a WWN (e.g. 46 - * "naa.60014054ac780582"). 46 + * "naa.60014054ac780582:0"). 47 47 * 48 48 * v-dev 49 49 * Values: string ··· 87 87 * response structures. 88 88 */ 89 89 90 + /* 91 + * Xenstore format in practice 92 + * =========================== 93 + * 94 + * The backend driver uses a single_host:many_devices notation to manage domU 95 + * devices. Everything is stored in /local/domain/<backend_domid>/backend/vscsi/. 96 + * The xenstore layout looks like this (dom0 is assumed to be the backend_domid): 97 + * 98 + * <domid>/<vhost>/feature-host = "0" 99 + * <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0" 100 + * <domid>/<vhost>/frontend-id = "<domid>" 101 + * <domid>/<vhost>/online = "1" 102 + * <domid>/<vhost>/state = "4" 103 + * <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1" or "naa.wwn:lun" 104 + * <domid>/<vhost>/vscsi-devs/dev-0/state = "4" 105 + * <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0" 106 + * <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2" 107 + * <domid>/<vhost>/vscsi-devs/dev-1/state = "4" 108 + * <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0" 109 + * 110 + * The frontend driver maintains its state in 111 + * /local/domain/<domid>/device/vscsi/. 112 + * 113 + * <vhost>/backend = "/local/domain/0/backend/vscsi/<domid>/<vhost>" 114 + * <vhost>/backend-id = "0" 115 + * <vhost>/event-channel = "20" 116 + * <vhost>/ring-ref = "43" 117 + * <vhost>/state = "4" 118 + * <vhost>/vscsi-devs/dev-0/state = "4" 119 + * <vhost>/vscsi-devs/dev-1/state = "4" 120 + * 121 + * In addition to the entries for backend and frontend these flags are stored 122 + * for the toolstack: 123 + * 124 + * <domid>/<vhost>/vscsi-devs/dev-1/p-devname = "/dev/$device" 125 + * <domid>/<vhost>/libxl_ctrl_index = "0" 126 + * 127 + * 128 + * Backend/frontend protocol 129 + * ========================= 130 + * 131 + * To create a vhost along with a device: 132 + * <domid>/<vhost>/feature-host = "0" 133 + * <domid>/<vhost>/frontend = "/local/domain/<domid>/device/vscsi/0" 134 + * <domid>/<vhost>/frontend-id = "<domid>" 135 + * <domid>/<vhost>/online = "1" 136 + * <domid>/<vhost>/state = "1" 137 + * <domid>/<vhost>/vscsi-devs/dev-0/p-dev = "8:0:2:1" 138 + * <domid>/<vhost>/vscsi-devs/dev-0/state = "1" 139 + * <domid>/<vhost>/vscsi-devs/dev-0/v-dev = "0:0:0:0" 140 + * Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-0/state become 4 141 + * 142 + * To add another device to a vhost: 143 + * <domid>/<vhost>/state = "7" 144 + * <domid>/<vhost>/vscsi-devs/dev-1/p-dev = "8:0:2:2" 145 + * <domid>/<vhost>/vscsi-devs/dev-1/state = "1" 146 + * <domid>/<vhost>/vscsi-devs/dev-1/v-dev = "0:0:1:0" 147 + * Wait for <domid>/<vhost>/state + <domid>/<vhost>/vscsi-devs/dev-1/state become 4 148 + * 149 + * To remove a device from a vhost: 150 + * <domid>/<vhost>/state = "7" 151 + * <domid>/<vhost>/vscsi-devs/dev-1/state = "5" 152 + * Wait for <domid>/<vhost>/state to become 4 153 + * Wait for <domid>/<vhost>/vscsi-devs/dev-1/state become 6 154 + * Remove <domid>/<vhost>/vscsi-devs/dev-1/{state,p-dev,v-dev,p-devname} 155 + * Remove <domid>/<vhost>/vscsi-devs/dev-1/ 156 + * 157 + */ 158 + 90 159 /* Requests from the frontend to the backend */ 91 160 92 161 /* ··· 186 117 * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment 187 118 * elements referencing the target data buffers is calculated from the lengths 188 119 * of the seg[] elements (the sum of all valid seg[].length divided by the 189 - * size of one scsiif_request_segment structure). 120 + * size of one scsiif_request_segment structure). The frontend may use a mix of 121 + * direct and indirect requests. 190 122 */ 191 123 #define VSCSIIF_ACT_SCSI_CDB 1 192 124 ··· 224 154 225 155 /* 226 156 * based on Linux kernel 2.6.18, still valid 157 + * 227 158 * Changing these values requires support of multiple protocols via the rings 228 159 * as "old clients" will blindly use these values and the resulting structure 229 160 * sizes. 230 161 */ 231 162 #define VSCSIIF_MAX_COMMAND_SIZE 16 232 163 #define VSCSIIF_SENSE_BUFFERSIZE 96 164 + #define VSCSIIF_PAGE_SIZE 4096 233 165 234 166 struct scsiif_request_segment { 235 167 grant_ref_t gref; ··· 239 167 uint16_t length; 240 168 }; 241 169 242 - #define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment)) 170 + #define VSCSIIF_SG_PER_PAGE (VSCSIIF_PAGE_SIZE / \ 171 + sizeof(struct scsiif_request_segment)) 243 172 244 173 /* Size of one request is 252 bytes */ 245 174 struct vscsiif_request { ··· 280 207 uint32_t reserved[36]; 281 208 }; 282 209 210 + /* SCSI I/O status from vscsiif_response->rslt */ 211 + #define XEN_VSCSIIF_RSLT_STATUS(x) ((x) & 0x00ff) 212 + 213 + /* Host I/O status from vscsiif_response->rslt */ 214 + #define XEN_VSCSIIF_RSLT_HOST(x) (((x) & 0x00ff0000) >> 16) 215 + #define XEN_VSCSIIF_RSLT_HOST_OK 0 216 + /* Couldn't connect before timeout */ 217 + #define XEN_VSCSIIF_RSLT_HOST_NO_CONNECT 1 218 + /* Bus busy through timeout */ 219 + #define XEN_VSCSIIF_RSLT_HOST_BUS_BUSY 2 220 + /* Timed out for other reason */ 221 + #define XEN_VSCSIIF_RSLT_HOST_TIME_OUT 3 222 + /* Bad target */ 223 + #define XEN_VSCSIIF_RSLT_HOST_BAD_TARGET 4 224 + /* Abort for some other reason */ 225 + #define XEN_VSCSIIF_RSLT_HOST_ABORT 5 226 + /* Parity error */ 227 + #define XEN_VSCSIIF_RSLT_HOST_PARITY 6 228 + /* Internal error */ 229 + #define XEN_VSCSIIF_RSLT_HOST_ERROR 7 230 + /* Reset by somebody */ 231 + #define XEN_VSCSIIF_RSLT_HOST_RESET 8 232 + /* Unexpected interrupt */ 233 + #define XEN_VSCSIIF_RSLT_HOST_BAD_INTR 9 234 + /* Force command past mid-layer */ 235 + #define XEN_VSCSIIF_RSLT_HOST_PASSTHROUGH 10 236 + /* Retry requested */ 237 + #define XEN_VSCSIIF_RSLT_HOST_SOFT_ERROR 11 238 + /* Hidden retry requested */ 239 + #define XEN_VSCSIIF_RSLT_HOST_IMM_RETRY 12 240 + /* Requeue command requested */ 241 + #define XEN_VSCSIIF_RSLT_HOST_REQUEUE 13 242 + /* Transport error disrupted I/O */ 243 + #define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_DISRUPTED 14 244 + /* Transport class fastfailed */ 245 + #define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_FAILFAST 15 246 + /* Permanent target failure */ 247 + #define XEN_VSCSIIF_RSLT_HOST_TARGET_FAILURE 16 248 + /* Permanent nexus failure on path */ 249 + #define XEN_VSCSIIF_RSLT_HOST_NEXUS_FAILURE 17 250 + /* Space allocation on device failed */ 251 + #define XEN_VSCSIIF_RSLT_HOST_ALLOC_FAILURE 18 252 + /* Medium error */ 253 + #define XEN_VSCSIIF_RSLT_HOST_MEDIUM_ERROR 19 254 + /* Transport marginal errors */ 255 + #define XEN_VSCSIIF_RSLT_HOST_TRANSPORT_MARGINAL 20 256 + 257 + /* Result values of reset operations */ 258 + #define XEN_VSCSIIF_RSLT_RESET_SUCCESS 0x2002 259 + #define XEN_VSCSIIF_RSLT_RESET_FAILED 0x2003 260 + 283 261 DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response); 284 262 285 - #endif /*__XEN__PUBLIC_IO_SCSI_H__*/ 263 + 264 + #endif /*__XEN__PUBLIC_IO_SCSI_H__*/
+33 -4
include/xen/interface/io/xs_wire.h
··· 10 10 11 11 enum xsd_sockmsg_type 12 12 { 13 - XS_DEBUG, 13 + XS_CONTROL, 14 + #define XS_DEBUG XS_CONTROL 14 15 XS_DIRECTORY, 15 16 XS_READ, 16 17 XS_GET_PERMS, ··· 31 30 XS_IS_DOMAIN_INTRODUCED, 32 31 XS_RESUME, 33 32 XS_SET_TARGET, 34 - XS_RESTRICT, 35 - XS_RESET_WATCHES, 33 + /* XS_RESTRICT has been removed */ 34 + XS_RESET_WATCHES = XS_SET_TARGET + 2, 35 + XS_DIRECTORY_PART, 36 + 37 + XS_TYPE_COUNT, /* Number of valid types. */ 38 + 39 + XS_INVALID = 0xffff /* Guaranteed to remain an invalid type */ 36 40 }; 37 41 38 42 #define XS_WRITE_NONE "NONE" ··· 65 59 XSD_ERROR(EROFS), 66 60 XSD_ERROR(EBUSY), 67 61 XSD_ERROR(EAGAIN), 68 - XSD_ERROR(EISCONN) 62 + XSD_ERROR(EISCONN), 63 + XSD_ERROR(E2BIG) 69 64 }; 70 65 71 66 struct xsd_sockmsg ··· 94 87 char rsp[XENSTORE_RING_SIZE]; /* Replies and async watch events. */ 95 88 XENSTORE_RING_IDX req_cons, req_prod; 96 89 XENSTORE_RING_IDX rsp_cons, rsp_prod; 90 + uint32_t server_features; /* Bitmap of features supported by the server */ 91 + uint32_t connection; 92 + uint32_t error; 97 93 }; 98 94 99 95 /* Violating this is very bad. See docs/misc/xenstore.txt. */ 100 96 #define XENSTORE_PAYLOAD_MAX 4096 97 + 98 + /* Violating these just gets you an error back */ 99 + #define XENSTORE_ABS_PATH_MAX 3072 100 + #define XENSTORE_REL_PATH_MAX 2048 101 + 102 + /* The ability to reconnect a ring */ 103 + #define XENSTORE_SERVER_FEATURE_RECONNECTION 1 104 + /* The presence of the "error" field in the ring page */ 105 + #define XENSTORE_SERVER_FEATURE_ERROR 2 106 + 107 + /* Valid values for the connection field */ 108 + #define XENSTORE_CONNECTED 0 /* the steady-state */ 109 + #define XENSTORE_RECONNECT 1 /* guest has initiated a reconnect */ 110 + 111 + /* Valid values for the error field */ 112 + #define XENSTORE_ERROR_NONE 0 /* No error */ 113 + #define XENSTORE_ERROR_COMM 1 /* Communication problem */ 114 + #define XENSTORE_ERROR_RINGIDX 2 /* Invalid ring index */ 115 + #define XENSTORE_ERROR_PROTO 3 /* Protocol violation (payload too long) */ 101 116 102 117 #endif /* _XS_WIRE_H */
+3 -1
include/xen/xenbus.h
··· 224 224 const char *pathfmt, ...); 225 225 226 226 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); 227 - int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, 227 + int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, 228 228 unsigned int nr_pages, grant_ref_t *grefs); 229 + void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages, 230 + grant_ref_t *grefs); 229 231 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, 230 232 unsigned int nr_grefs, void **vaddr); 231 233
+10 -34
sound/xen/xen_snd_front_evtchnl.c
··· 143 143 static void evtchnl_free(struct xen_snd_front_info *front_info, 144 144 struct xen_snd_front_evtchnl *channel) 145 145 { 146 - unsigned long page = 0; 146 + void *page = NULL; 147 147 148 148 if (channel->type == EVTCHNL_TYPE_REQ) 149 - page = (unsigned long)channel->u.req.ring.sring; 149 + page = channel->u.req.ring.sring; 150 150 else if (channel->type == EVTCHNL_TYPE_EVT) 151 - page = (unsigned long)channel->u.evt.page; 151 + page = channel->u.evt.page; 152 152 153 153 if (!page) 154 154 return; ··· 167 167 xenbus_free_evtchn(front_info->xb_dev, channel->port); 168 168 169 169 /* End access and free the page. */ 170 - if (channel->gref != GRANT_INVALID_REF) 171 - gnttab_end_foreign_access(channel->gref, page); 172 - else 173 - free_page(page); 170 + xenbus_teardown_ring(&page, 1, &channel->gref); 174 171 175 172 memset(channel, 0, sizeof(*channel)); 176 173 } ··· 193 196 enum xen_snd_front_evtchnl_type type) 194 197 { 195 198 struct xenbus_device *xb_dev = front_info->xb_dev; 196 - unsigned long page; 197 - grant_ref_t gref; 199 + void *page; 198 200 irq_handler_t handler; 199 201 char *handler_name = NULL; 200 202 int ret; ··· 203 207 channel->index = index; 204 208 channel->front_info = front_info; 205 209 channel->state = EVTCHNL_STATE_DISCONNECTED; 206 - channel->gref = GRANT_INVALID_REF; 207 - page = get_zeroed_page(GFP_KERNEL); 208 - if (!page) { 209 - ret = -ENOMEM; 210 + ret = xenbus_setup_ring(xb_dev, GFP_KERNEL, &page, 1, &channel->gref); 211 + if (ret) 210 212 goto fail; 211 - } 212 213 213 214 handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME, 214 215 type == EVTCHNL_TYPE_REQ ? ··· 219 226 mutex_init(&channel->ring_io_lock); 220 227 221 228 if (type == EVTCHNL_TYPE_REQ) { 222 - struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page; 229 + struct xen_sndif_sring *sring = page; 223 230 224 231 init_completion(&channel->u.req.completion); 225 232 mutex_init(&channel->u.req.req_io_lock); 226 - SHARED_RING_INIT(sring); 227 - FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE); 228 - 229 - ret = xenbus_grant_ring(xb_dev, sring, 1, &gref); 230 - if (ret < 0) { 231 - channel->u.req.ring.sring = NULL; 232 - goto fail; 233 - } 233 + XEN_FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE); 234 234 235 235 handler = evtchnl_interrupt_req; 236 236 } else { 237 - ret = gnttab_grant_foreign_access(xb_dev->otherend_id, 238 - virt_to_gfn((void *)page), 0); 239 - if (ret < 0) 240 - goto fail; 241 - 242 - channel->u.evt.page = (struct xensnd_event_page *)page; 243 - gref = ret; 237 + channel->u.evt.page = page; 244 238 handler = evtchnl_interrupt_evt; 245 239 } 246 - 247 - channel->gref = gref; 248 240 249 241 ret = xenbus_alloc_evtchn(xb_dev, &channel->port); 250 242 if (ret < 0) ··· 257 279 return 0; 258 280 259 281 fail: 260 - if (page) 261 - free_page(page); 262 282 kfree(handler_name); 263 283 dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret); 264 284 return ret;
-9
sound/xen/xen_snd_front_evtchnl.h
··· 15 15 16 16 struct xen_snd_front_info; 17 17 18 - #ifndef GRANT_INVALID_REF 19 - /* 20 - * FIXME: usage of grant reference 0 as invalid grant reference: 21 - * grant reference 0 is valid, but never exposed to a PV driver, 22 - * because of the fact it is already in use/reserved by the PV console. 23 - */ 24 - #define GRANT_INVALID_REF 0 25 - #endif 26 - 27 18 /* Timeout in ms to wait for backend to respond. */ 28 19 #define VSND_WAIT_BACK_MS 3000 29 20