Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-3.18/drivers' of git://git.kernel.dk/linux-block

Pull block layer driver update from Jens Axboe:
"This is the block driver pull request for 3.18. Not a lot in there
this round, and nothing earth shattering.

- A round of drbd fixes from the linbit team, and an improvement in
asender performance.

- Removal of deprecated (and unused) IRQF_DISABLED flag in rsxx and
hd from Michael Opdenacker.

- Disable entropy collection from flash devices by default, from Mike
Snitzer.

- A small collection of xen blkfront/back fixes from Roger Pau Monné
and Vitaly Kuznetsov"

* 'for-3.18/drivers' of git://git.kernel.dk/linux-block:
block: disable entropy contributions for nonrot devices
xen, blkfront: factor out flush-related checks from do_blkif_request()
xen-blkback: fix leak on grant map error path
xen/blkback: unmap all persistent grants when frontend gets disconnected
rsxx: Remove deprecated IRQF_DISABLED
block: hd: remove deprecated IRQF_DISABLED
drbd: use RB_DECLARE_CALLBACKS() to define augment callbacks
drbd: compute the end before rb_insert_augmented()
drbd: Add missing newline in resync progress display in /proc/drbd
drbd: reduce lock contention in drbd_worker
drbd: Improve asender performance
drbd: Get rid of the WORK_PENDING macro
drbd: Get rid of the __no_warn and __cond_lock macros
drbd: Avoid inconsistent locking warning
drbd: Remove superfluous newline from "resync_extents" debugfs entry.
drbd: Use consistent names for all the bi_end_io callbacks
drbd: Use better variable names

+132 -147
+2 -2
drivers/block/drbd/drbd_actlog.c
··· 158 158 if (bio_add_page(bio, device->md_io.page, size, 0) != size) 159 159 goto out; 160 160 bio->bi_private = device; 161 - bio->bi_end_io = drbd_md_io_complete; 161 + bio->bi_end_io = drbd_md_endio; 162 162 bio->bi_rw = rw; 163 163 164 164 if (!(rw & WRITE) && device->state.disk == D_DISKLESS && device->ldev == NULL) 165 165 /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ 166 166 ; 167 167 else if (!get_ldev_if_state(device, D_ATTACHING)) { 168 - /* Corresponding put_ldev in drbd_md_io_complete() */ 168 + /* Corresponding put_ldev in drbd_md_endio() */ 169 169 drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n"); 170 170 err = -ENODEV; 171 171 goto out;
+3 -3
drivers/block/drbd/drbd_bitmap.c
··· 941 941 } 942 942 943 943 /* bv_page may be a copy, or may be the original */ 944 - static void bm_async_io_complete(struct bio *bio, int error) 944 + static void drbd_bm_endio(struct bio *bio, int error) 945 945 { 946 946 struct drbd_bm_aio_ctx *ctx = bio->bi_private; 947 947 struct drbd_device *device = ctx->device; ··· 1027 1027 * according to api. Do we want to assert that? */ 1028 1028 bio_add_page(bio, page, len, 0); 1029 1029 bio->bi_private = ctx; 1030 - bio->bi_end_io = bm_async_io_complete; 1030 + bio->bi_end_io = drbd_bm_endio; 1031 1031 1032 1032 if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 1033 1033 bio->bi_rw |= rw; ··· 1125 1125 } 1126 1126 1127 1127 /* 1128 - * We initialize ctx->in_flight to one to make sure bm_async_io_complete 1128 + * We initialize ctx->in_flight to one to make sure drbd_bm_endio 1129 1129 * will not set ctx->done early, and decrement / test it here. If there 1130 1130 * are still some bios in flight, we need to wait for them here. 1131 1131 * If all IO is done already (or nothing had been submitted), there is
+1 -1
drivers/block/drbd/drbd_debugfs.c
··· 695 695 { 696 696 struct bm_extent *bme = lc_entry(e, struct bm_extent, lce); 697 697 698 - seq_printf(m, "%5d %s %s %s\n", bme->rs_left, 698 + seq_printf(m, "%5d %s %s %s", bme->rs_left, 699 699 test_bit(BME_NO_WRITES, &bme->flags) ? "NO_WRITES" : "---------", 700 700 test_bit(BME_LOCKED, &bme->flags) ? "LOCKED" : "------", 701 701 test_bit(BME_PRIORITY, &bme->flags) ? "PRIORITY" : "--------"
+10 -9
drivers/block/drbd/drbd_int.h
··· 61 61 # define __must_hold(x) 62 62 #endif 63 63 64 - #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) 65 - 66 64 /* module parameter, defined in drbd_main.c */ 67 65 extern unsigned int minor_count; 68 66 extern bool disable_sendpage; ··· 1481 1483 1482 1484 /* drbd_worker.c */ 1483 1485 /* bi_end_io handlers */ 1484 - extern void drbd_md_io_complete(struct bio *bio, int error); 1486 + extern void drbd_md_endio(struct bio *bio, int error); 1485 1487 extern void drbd_peer_request_endio(struct bio *bio, int error); 1486 1488 extern void drbd_request_endio(struct bio *bio, int error); 1487 1489 extern int drbd_worker(struct drbd_thread *thi); ··· 2098 2100 2099 2101 /** 2100 2102 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev 2101 - * @M: DRBD device. 2103 + * @_device: DRBD device. 2104 + * @_min_state: Minimum device state required for success. 2102 2105 * 2103 2106 * You have to call put_ldev() when finished working with device->ldev. 2104 2107 */ 2105 - #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) 2106 - #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) 2108 + #define get_ldev_if_state(_device, _min_state) \ 2109 + (_get_ldev_if_state((_device), (_min_state)) ? \ 2110 + ({ __acquire(x); true; }) : false) 2111 + #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT) 2107 2112 2108 2113 static inline void put_ldev(struct drbd_device *device) 2109 2114 { 2110 - enum drbd_disk_state ds = device->state.disk; 2115 + enum drbd_disk_state disk_state = device->state.disk; 2111 2116 /* We must check the state *before* the atomic_dec becomes visible, 2112 2117 * or we have a theoretical race where someone hitting zero, 2113 2118 * while state still D_FAILED, will then see D_DISKLESS in the ··· 2123 2122 __release(local); 2124 2123 D_ASSERT(device, i >= 0); 2125 2124 if (i == 0) { 2126 - if (ds == D_DISKLESS) 2125 + if (disk_state == D_DISKLESS) 2127 2126 /* even internal references gone, safe to destroy */ 2128 2127 drbd_device_post_work(device, DESTROY_DISK); 2129 - if (ds == D_FAILED) 2128 + if (disk_state == D_FAILED) 2130 2129 /* all application IO references gone. */ 2131 2130 if (!test_and_set_bit(GOING_DISKLESS, &device->flags)) 2132 2131 drbd_device_post_work(device, GO_DISKLESS);
+6 -34
drivers/block/drbd/drbd_interval.c
··· 37 37 return max; 38 38 } 39 39 40 - static void augment_propagate(struct rb_node *rb, struct rb_node *stop) 41 - { 42 - while (rb != stop) { 43 - struct drbd_interval *node = rb_entry(rb, struct drbd_interval, rb); 44 - sector_t subtree_last = compute_subtree_last(node); 45 - if (node->end == subtree_last) 46 - break; 47 - node->end = subtree_last; 48 - rb = rb_parent(&node->rb); 49 - } 50 - } 51 - 52 - static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new) 53 - { 54 - struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb); 55 - struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb); 56 - 57 - new->end = old->end; 58 - } 59 - 60 - static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new) 61 - { 62 - struct drbd_interval *old = rb_entry(rb_old, struct drbd_interval, rb); 63 - struct drbd_interval *new = rb_entry(rb_new, struct drbd_interval, rb); 64 - 65 - new->end = old->end; 66 - old->end = compute_subtree_last(old); 67 - } 68 - 69 - static const struct rb_augment_callbacks augment_callbacks = { 70 - augment_propagate, 71 - augment_copy, 72 - augment_rotate, 73 - }; 40 + RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb, 41 + sector_t, end, compute_subtree_last); 74 42 75 43 /** 76 44 * drbd_insert_interval - insert a new interval into a tree ··· 47 79 drbd_insert_interval(struct rb_root *root, struct drbd_interval *this) 48 80 { 49 81 struct rb_node **new = &root->rb_node, *parent = NULL; 82 + sector_t this_end = this->sector + (this->size >> 9); 50 83 51 84 BUG_ON(!IS_ALIGNED(this->size, 512)); 52 85 ··· 56 87 rb_entry(*new, struct drbd_interval, rb); 57 88 58 89 parent = *new; 90 + if (here->end < this_end) 91 + here->end = this_end; 59 92 if (this->sector < here->sector) 60 93 new = &(*new)->rb_left; 61 94 else if (this->sector > here->sector) ··· 70 99 return false; 71 100 } 72 101 102 + this->end = this_end; 73 103 rb_link_node(&this->rb, parent, new); 74 104 rb_insert_augmented(&this->rb, root, &augment_callbacks); 75 105 return true;
+14 -14
drivers/block/drbd/drbd_main.c
··· 1622 1622 struct drbd_socket *sock; 1623 1623 struct p_data *p; 1624 1624 unsigned int dp_flags = 0; 1625 - int dgs; 1625 + int digest_size; 1626 1626 int err; 1627 1627 1628 1628 sock = &peer_device->connection->data; 1629 1629 p = drbd_prepare_command(peer_device, sock); 1630 - dgs = peer_device->connection->integrity_tfm ? 1631 - crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; 1630 + digest_size = peer_device->connection->integrity_tfm ? 1631 + crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; 1632 1632 1633 1633 if (!p) 1634 1634 return -EIO; ··· 1659 1659 1660 1660 /* our digest is still only over the payload. 1661 1661 * TRIM does not carry any payload. */ 1662 - if (dgs) 1662 + if (digest_size) 1663 1663 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1); 1664 - err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); 1664 + err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size, NULL, req->i.size); 1665 1665 if (!err) { 1666 1666 /* For protocol A, we have to memcpy the payload into 1667 1667 * socket buffers, as we may complete right away ··· 1674 1674 * out ok after sending on this side, but does not fit on the 1675 1675 * receiving side, we sure have detected corruption elsewhere. 1676 1676 */ 1677 - if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs) 1677 + if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size) 1678 1678 err = _drbd_send_bio(peer_device, req->master_bio); 1679 1679 else 1680 1680 err = _drbd_send_zc_bio(peer_device, req->master_bio); 1681 1681 1682 1682 /* double check digest, sometimes buffers have been modified in flight. */ 1683 - if (dgs > 0 && dgs <= 64) { 1683 + if (digest_size > 0 && digest_size <= 64) { 1684 1684 /* 64 byte, 512 bit, is the largest digest size 1685 1685 * currently supported in kernel crypto. */ 1686 1686 unsigned char digest[64]; 1687 1687 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest); 1688 - if (memcmp(p + 1, digest, dgs)) { 1688 + if (memcmp(p + 1, digest, digest_size)) { 1689 1689 drbd_warn(device, 1690 1690 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", 1691 1691 (unsigned long long)req->i.sector, req->i.size); 1692 1692 } 1693 - } /* else if (dgs > 64) { 1693 + } /* else if (digest_size > 64) { 1694 1694 ... Be noisy about digest too large ... 1695 1695 } */ 1696 1696 } ··· 1711 1711 struct drbd_socket *sock; 1712 1712 struct p_data *p; 1713 1713 int err; 1714 - int dgs; 1714 + int digest_size; 1715 1715 1716 1716 sock = &peer_device->connection->data; 1717 1717 p = drbd_prepare_command(peer_device, sock); 1718 1718 1719 - dgs = peer_device->connection->integrity_tfm ? 1720 - crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; 1719 + digest_size = peer_device->connection->integrity_tfm ? 1720 + crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0; 1721 1721 1722 1722 if (!p) 1723 1723 return -EIO; ··· 1725 1725 p->block_id = peer_req->block_id; 1726 1726 p->seq_num = 0; /* unused */ 1727 1727 p->dp_flags = 0; 1728 - if (dgs) 1728 + if (digest_size) 1729 1729 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1); 1730 - err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size); 1730 + err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size); 1731 1731 if (!err) 1732 1732 err = _drbd_send_zc_ee(peer_device, peer_req); 1733 1733 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
+3 -1
drivers/block/drbd/drbd_proc.c
··· 142 142 (unsigned long) Bit2KB(rs_left >> 10), 143 143 (unsigned long) Bit2KB(rs_total >> 10)); 144 144 else 145 - seq_printf(seq, "(%lu/%lu)K\n\t", 145 + seq_printf(seq, "(%lu/%lu)K", 146 146 (unsigned long) Bit2KB(rs_left), 147 147 (unsigned long) Bit2KB(rs_total)); 148 + 149 + seq_printf(seq, "\n\t"); 148 150 149 151 /* see drivers/md/md.c 150 152 * We do not want to overflow, so the order of operands and
+29 -23
drivers/block/drbd/drbd_receiver.c
··· 1371 1371 struct bio *bio; 1372 1372 struct page *page = peer_req->pages; 1373 1373 sector_t sector = peer_req->i.sector; 1374 - unsigned ds = peer_req->i.size; 1374 + unsigned data_size = peer_req->i.size; 1375 1375 unsigned n_bios = 0; 1376 - unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 1376 + unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 1377 1377 int err = -ENOMEM; 1378 1378 1379 1379 if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) { ··· 1388 1388 list_add_tail(&peer_req->w.list, &device->active_ee); 1389 1389 spin_unlock_irq(&device->resource->req_lock); 1390 1390 if (blkdev_issue_zeroout(device->ldev->backing_bdev, 1391 - sector, ds >> 9, GFP_NOIO)) 1391 + sector, data_size >> 9, GFP_NOIO)) 1392 1392 peer_req->flags |= EE_WAS_ERROR; 1393 1393 drbd_endio_write_sec_final(peer_req); 1394 1394 return 0; ··· 1426 1426 ++n_bios; 1427 1427 1428 1428 if (rw & REQ_DISCARD) { 1429 - bio->bi_iter.bi_size = ds; 1429 + bio->bi_iter.bi_size = data_size; 1430 1430 goto submit; 1431 1431 } 1432 1432 1433 1433 page_chain_for_each(page) { 1434 - unsigned len = min_t(unsigned, ds, PAGE_SIZE); 1434 + unsigned len = min_t(unsigned, data_size, PAGE_SIZE); 1435 1435 if (!bio_add_page(bio, page, len, 0)) { 1436 1436 /* A single page must always be possible! 1437 1437 * But in case it fails anyways, ··· 1446 1446 } 1447 1447 goto next_bio; 1448 1448 } 1449 - ds -= len; 1449 + data_size -= len; 1450 1450 sector += len >> 9; 1451 1451 --nr_pages; 1452 1452 } 1453 - D_ASSERT(device, ds == 0); 1453 + D_ASSERT(device, data_size == 0); 1454 1454 submit: 1455 1455 D_ASSERT(device, page == NULL); 1456 1456 ··· 1591 1591 const sector_t capacity = drbd_get_capacity(device->this_bdev); 1592 1592 struct drbd_peer_request *peer_req; 1593 1593 struct page *page; 1594 - int dgs, ds, err; 1595 - unsigned int data_size = pi->size; 1594 + int digest_size, err; 1595 + unsigned int data_size = pi->size, ds; 1596 1596 void *dig_in = peer_device->connection->int_dig_in; 1597 1597 void *dig_vv = peer_device->connection->int_dig_vv; 1598 1598 unsigned long *data; 1599 1599 struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; 1600 1600 1601 - dgs = 0; 1601 + digest_size = 0; 1602 1602 if (!trim && peer_device->connection->peer_integrity_tfm) { 1603 - dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); 1603 + digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); 1604 1604 /* 1605 1605 * FIXME: Receive the incoming digest into the receive buffer 1606 1606 * here, together with its struct p_data? 1607 1607 */ 1608 - err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs); 1608 + err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); 1609 1609 if (err) 1610 1610 return NULL; 1611 - data_size -= dgs; 1611 + data_size -= digest_size; 1612 1612 } 1613 1613 1614 1614 if (trim) { ··· 1661 1661 ds -= len; 1662 1662 } 1663 1663 1664 - if (dgs) { 1664 + if (digest_size) { 1665 1665 drbd_csum_ee(peer_device->connection->peer_integrity_tfm, peer_req, dig_vv); 1666 - if (memcmp(dig_in, dig_vv, dgs)) { 1666 + if (memcmp(dig_in, dig_vv, digest_size)) { 1667 1667 drbd_err(device, "Digest integrity check FAILED: %llus +%u\n", 1668 1668 (unsigned long long)sector, data_size); 1669 1669 drbd_free_peer_req(device, peer_req); 1670 1670 return NULL; 1671 1671 } 1672 1672 } 1673 - device->recv_cnt += data_size>>9; 1673 + device->recv_cnt += data_size >> 9; 1674 1674 return peer_req; 1675 1675 } 1676 1676 ··· 1708 1708 struct bio_vec bvec; 1709 1709 struct bvec_iter iter; 1710 1710 struct bio *bio; 1711 - int dgs, err, expect; 1711 + int digest_size, err, expect; 1712 1712 void *dig_in = peer_device->connection->int_dig_in; 1713 1713 void *dig_vv = peer_device->connection->int_dig_vv; 1714 1714 1715 - dgs = 0; 1715 + digest_size = 0; 1716 1716 if (peer_device->connection->peer_integrity_tfm) { 1717 - dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); 1718 - err = drbd_recv_all_warn(peer_device->connection, dig_in, dgs); 1717 + digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); 1718 + err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size); 1719 1719 if (err) 1720 1720 return err; 1721 - data_size -= dgs; 1721 + data_size -= digest_size; 1722 1722 } 1723 1723 1724 1724 /* optimistically update recv_cnt. if receiving fails below, ··· 1738 1738 data_size -= expect; 1739 1739 } 1740 1740 1741 - if (dgs) { 1741 + if (digest_size) { 1742 1742 drbd_csum_bio(peer_device->connection->peer_integrity_tfm, bio, dig_vv); 1743 - if (memcmp(dig_in, dig_vv, dgs)) { 1743 + if (memcmp(dig_in, dig_vv, digest_size)) { 1744 1744 drbd_err(peer_device, "Digest integrity check FAILED. Broken NICs?\n"); 1745 1745 return -EINVAL; 1746 1746 } ··· 5561 5561 * rv < expected: "woken" by signal during receive 5562 5562 * rv == 0 : "connection shut down by peer" 5563 5563 */ 5564 + received_more: 5564 5565 if (likely(rv > 0)) { 5565 5566 received += rv; 5566 5567 buf += rv; ··· 5637 5636 expect = header_size; 5638 5637 cmd = NULL; 5639 5638 } 5639 + if (test_bit(SEND_PING, &connection->flags)) 5640 + continue; 5641 + rv = drbd_recv_short(connection->meta.socket, buf, expect-received, MSG_DONTWAIT); 5642 + if (rv > 0) 5643 + goto received_more; 5640 5644 } 5641 5645 5642 5646 if (0) {
+1 -1
drivers/block/drbd/drbd_req.c
··· 1645 1645 ? oldest_submit_jif + dt : now + et; 1646 1646 nt = time_before(ent, dt) ? ent : dt; 1647 1647 out: 1648 - spin_unlock_irq(&connection->resource->req_lock); 1648 + spin_unlock_irq(&device->resource->req_lock); 1649 1649 mod_timer(&device->request_timer, nt); 1650 1650 }
+9 -9
drivers/block/drbd/drbd_state.c
··· 136 136 137 137 enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection) 138 138 { 139 - enum drbd_disk_state ds = D_DISKLESS; 139 + enum drbd_disk_state disk_state = D_DISKLESS; 140 140 struct drbd_peer_device *peer_device; 141 141 int vnr; 142 142 143 143 rcu_read_lock(); 144 144 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 145 145 struct drbd_device *device = peer_device->device; 146 - ds = max_t(enum drbd_disk_state, ds, device->state.disk); 146 + disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk); 147 147 } 148 148 rcu_read_unlock(); 149 149 150 - return ds; 150 + return disk_state; 151 151 } 152 152 153 153 enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection) 154 154 { 155 - enum drbd_disk_state ds = D_MASK; 155 + enum drbd_disk_state disk_state = D_MASK; 156 156 struct drbd_peer_device *peer_device; 157 157 int vnr; 158 158 159 159 rcu_read_lock(); 160 160 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 161 161 struct drbd_device *device = peer_device->device; 162 - ds = min_t(enum drbd_disk_state, ds, device->state.disk); 162 + disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk); 163 163 } 164 164 rcu_read_unlock(); 165 165 166 - return ds; 166 + return disk_state; 167 167 } 168 168 169 169 enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection) 170 170 { 171 - enum drbd_disk_state ds = D_DISKLESS; 171 + enum drbd_disk_state disk_state = D_DISKLESS; 172 172 struct drbd_peer_device *peer_device; 173 173 int vnr; 174 174 175 175 rcu_read_lock(); 176 176 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { 177 177 struct drbd_device *device = peer_device->device; 178 - ds = max_t(enum drbd_disk_state, ds, device->state.pdsk); 178 + disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk); 179 179 } 180 180 rcu_read_unlock(); 181 181 182 - return ds; 182 + return disk_state; 183 183 } 184 184 185 185 enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
+21 -30
drivers/block/drbd/drbd_worker.c
··· 43 43 static int make_resync_request(struct drbd_device *, int); 44 44 45 45 /* endio handlers: 46 - * drbd_md_io_complete (defined here) 46 + * drbd_md_endio (defined here) 47 47 * drbd_request_endio (defined here) 48 48 * drbd_peer_request_endio (defined here) 49 - * bm_async_io_complete (defined in drbd_bitmap.c) 49 + * drbd_bm_endio (defined in drbd_bitmap.c) 50 50 * 51 51 * For all these callbacks, note the following: 52 52 * The callbacks will be called in irq context by the IDE drivers, ··· 65 65 /* used for synchronous meta data and bitmap IO 66 66 * submitted by drbd_md_sync_page_io() 67 67 */ 68 - void drbd_md_io_complete(struct bio *bio, int error) 68 + void drbd_md_endio(struct bio *bio, int error) 69 69 { 70 70 struct drbd_device *device; 71 71 ··· 1853 1853 device->resync = NULL; 1854 1854 lc_destroy(device->act_log); 1855 1855 device->act_log = NULL; 1856 - __no_warn(local, 1857 - drbd_free_ldev(device->ldev); 1858 - device->ldev = NULL;); 1856 + 1857 + __acquire(local); 1858 + drbd_free_ldev(device->ldev); 1859 + device->ldev = NULL; 1860 + __release(local); 1861 + 1859 1862 clear_bit(GOING_DISKLESS, &device->flags); 1860 1863 wake_up(&device->misc_wait); 1861 1864 } ··· 1931 1928 ++(*cb_nr); 1932 1929 } 1933 1930 1934 - #define WORK_PENDING(work_bit, todo) (todo & (1UL << work_bit)) 1935 1931 static void do_device_work(struct drbd_device *device, const unsigned long todo) 1936 1932 { 1937 - if (WORK_PENDING(MD_SYNC, todo)) 1933 + if (test_bit(MD_SYNC, &todo)) 1938 1934 do_md_sync(device); 1939 - if (WORK_PENDING(RS_DONE, todo) || 1940 - WORK_PENDING(RS_PROGRESS, todo)) 1941 - update_on_disk_bitmap(device, WORK_PENDING(RS_DONE, todo)); 1942 - if (WORK_PENDING(GO_DISKLESS, todo)) 1935 + if (test_bit(RS_DONE, &todo) || 1936 + test_bit(RS_PROGRESS, &todo)) 1937 + update_on_disk_bitmap(device, test_bit(RS_DONE, &todo)); 1938 + if (test_bit(GO_DISKLESS, &todo)) 1943 1939 go_diskless(device); 1944 - if (WORK_PENDING(DESTROY_DISK, todo)) 1940 + if (test_bit(DESTROY_DISK, &todo)) 1945 1941 drbd_ldev_destroy(device); 1946 - if (WORK_PENDING(RS_START, todo)) 1942 + if (test_bit(RS_START, &todo)) 1947 1943 do_start_resync(device); 1948 1944 } 1949 1945 ··· 1994 1992 return !list_empty(work_list); 1995 1993 } 1996 1994 1997 - static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *work_list) 1998 - { 1999 - spin_lock_irq(&queue->q_lock); 2000 - if (!list_empty(&queue->q)) 2001 - list_move(queue->q.next, work_list); 2002 - spin_unlock_irq(&queue->q_lock); 2003 - return !list_empty(work_list); 2004 - } 2005 - 2006 1995 static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list) 2007 1996 { 2008 1997 DEFINE_WAIT(wait); 2009 1998 struct net_conf *nc; 2010 1999 int uncork, cork; 2011 2000 2012 - dequeue_work_item(&connection->sender_work, work_list); 2001 + dequeue_work_batch(&connection->sender_work, work_list); 2013 2002 if (!list_empty(work_list)) 2014 2003 return; 2015 2004 ··· 2026 2033 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); 2027 2034 spin_lock_irq(&connection->resource->req_lock); 2028 2035 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ 2029 - /* dequeue single item only, 2030 - * we still use drbd_queue_work_front() in some places */ 2031 2036 if (!list_empty(&connection->sender_work.q)) 2032 2037 list_splice_tail_init(&connection->sender_work.q, work_list); 2033 2038 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ ··· 2112 2121 if (get_t_state(thi) != RUNNING) 2113 2122 break; 2114 2123 2115 - while (!list_empty(&work_list)) { 2124 + if (!list_empty(&work_list)) { 2116 2125 w = list_first_entry(&work_list, struct drbd_work, list); 2117 2126 list_del_init(&w->list); 2118 2127 update_worker_timing_details(connection, w->cb); ··· 2128 2137 update_worker_timing_details(connection, do_unqueued_work); 2129 2138 do_unqueued_work(connection); 2130 2139 } 2131 - while (!list_empty(&work_list)) { 2140 + if (!list_empty(&work_list)) { 2132 2141 w = list_first_entry(&work_list, struct drbd_work, list); 2133 2142 list_del_init(&w->list); 2134 2143 update_worker_timing_details(connection, w->cb); 2135 2144 w->cb(w, 1); 2136 - } 2137 - dequeue_work_batch(&connection->sender_work, &work_list); 2145 + } else 2146 + dequeue_work_batch(&connection->sender_work, &work_list); 2138 2147 } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags)); 2139 2148 2140 2149 rcu_read_lock();
+1 -11
drivers/block/hd.c
··· 694 694 .getgeo = hd_getgeo, 695 695 }; 696 696 697 - /* 698 - * This is the hard disk IRQ description. The IRQF_DISABLED in sa_flags 699 - * means we run the IRQ-handler with interrupts disabled: this is bad for 700 - * interrupt latency, but anything else has led to problems on some 701 - * machines. 702 - * 703 - * We enable interrupts in some of the routines after making sure it's 704 - * safe. 705 - */ 706 - 707 697 static int __init hd_init(void) 708 698 { 709 699 int drive; ··· 751 761 p->cyl, p->head, p->sect); 752 762 } 753 763 754 - if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) { 764 + if (request_irq(HD_IRQ, hd_interrupt, 0, "hd", NULL)) { 755 765 printk("hd: unable to get IRQ%d for the hard disk driver\n", 756 766 HD_IRQ); 757 767 goto out1;
+1
drivers/block/mtip32xx/mtip32xx.c
··· 3954 3954 3955 3955 /* Set device limits. */ 3956 3956 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); 3957 + clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags); 3957 3958 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3958 3959 blk_queue_physical_block_size(dd->queue, 4096); 3959 3960 blk_queue_max_hw_sectors(dd->queue, 0xffff);
+1
drivers/block/nbd.c
··· 847 847 * Tell the block layer that we are not a rotational device 848 848 */ 849 849 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); 850 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); 850 851 disk->queue->limits.discard_granularity = 512; 851 852 disk->queue->limits.max_discard_sectors = UINT_MAX; 852 853 disk->queue->limits.discard_zeroes_data = 0;
+1
drivers/block/null_blk.c
··· 521 521 522 522 nullb->q->queuedata = nullb; 523 523 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 524 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 524 525 525 526 disk = nullb->disk = alloc_disk_node(1, home_node); 526 527 if (!disk) {
+1
drivers/block/nvme-core.c
··· 1916 1916 ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; 1917 1917 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); 1918 1918 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1919 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, ns->queue); 1919 1920 blk_queue_make_request(ns->queue, nvme_make_request); 1920 1921 ns->dev = dev; 1921 1922 ns->queue->queuedata = ns;
+1 -1
drivers/block/rsxx/core.c
··· 837 837 "Failed to enable MSI\n"); 838 838 } 839 839 840 - st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED, 840 + st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED, 841 841 DRIVER_NAME, card); 842 842 if (st) { 843 843 dev_err(CARD_TO_DEV(card),
+1
drivers/block/rsxx/dev.c
··· 307 307 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); 308 308 309 309 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); 310 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); 310 311 if (rsxx_discard_supported(card)) { 311 312 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); 312 313 blk_queue_max_discard_sectors(card->queue,
+1
drivers/block/skd_main.c
··· 4426 4426 q->limits.discard_zeroes_data = 1; 4427 4427 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 4428 4428 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 4429 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 4429 4430 4430 4431 spin_lock_irqsave(&skdev->lock, flags); 4431 4432 pr_debug("%s:%s:%d stopping %s queue\n",
+1
drivers/block/xen-blkback/blkback.c
··· 763 763 BUG_ON(new_map_idx >= segs_to_map); 764 764 if (unlikely(map[new_map_idx].status != 0)) { 765 765 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); 766 + put_free_pages(blkif, &pages[seg_idx]->page, 1); 766 767 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; 767 768 ret |= 1; 768 769 goto next;
+3 -3
drivers/block/xen-blkback/xenbus.c
··· 270 270 blkif->blk_rings.common.sring = NULL; 271 271 } 272 272 273 + /* Remove all persistent grants and the cache of ballooned pages. */ 274 + xen_blkbk_free_caches(blkif); 275 + 273 276 return 0; 274 277 } 275 278 ··· 283 280 284 281 xen_blkif_disconnect(blkif); 285 282 xen_vbd_free(&blkif->vbd); 286 - 287 - /* Remove all persistent grants and the cache of ballooned pages. */ 288 - xen_blkbk_free_caches(blkif); 289 283 290 284 /* Make sure everything is drained before shutting down */ 291 285 BUG_ON(blkif->persistent_gnt_c != 0);
+9 -3
drivers/block/xen-blkfront.c
··· 582 582 notify_remote_via_irq(info->irq); 583 583 } 584 584 585 + static inline bool blkif_request_flush_valid(struct request *req, 586 + struct blkfront_info *info) 587 + { 588 + return ((req->cmd_type != REQ_TYPE_FS) || 589 + ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && 590 + !info->flush_op)); 591 + } 592 + 585 593 /* 586 594 * do_blkif_request 587 595 * read a block; request is in a request queue ··· 612 604 613 605 blk_start_request(req); 614 606 615 - if ((req->cmd_type != REQ_TYPE_FS) || 616 - ((req->cmd_flags & (REQ_FLUSH | REQ_FUA)) && 617 - !info->flush_op)) { 607 + if (blkif_request_flush_valid(req, info)) { 618 608 __blk_end_request_all(req, -EIO); 619 609 continue; 620 610 }
+1
drivers/block/zram/zram_drv.c
··· 1031 1031 set_capacity(zram->disk, 0); 1032 1032 /* zram devices sort of resembles non-rotational disks */ 1033 1033 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); 1034 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); 1034 1035 /* 1035 1036 * To ensure that we always get PAGE_SIZE aligned 1036 1037 * and n*PAGE_SIZED sized I/O requests.
+3 -1
drivers/ide/ide-disk.c
··· 685 685 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, 686 686 queue_max_sectors(q) / 2); 687 687 688 - if (ata_id_is_ssd(id)) 688 + if (ata_id_is_ssd(id)) { 689 689 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 690 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); 691 + } 690 692 691 693 /* calculate drive capacity, and select LBA if possible */ 692 694 ide_disk_get_capacity(drive);
+1
drivers/md/bcache/super.c
··· 842 842 q->limits.logical_block_size = block_size; 843 843 q->limits.physical_block_size = block_size; 844 844 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 845 + clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); 845 846 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 846 847 847 848 blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
+1
drivers/mmc/card/queue.c
··· 210 210 211 211 blk_queue_prep_rq(mq->queue, mmc_prep_request); 212 212 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 213 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); 213 214 if (mmc_can_erase(card)) 214 215 mmc_queue_setup_discard(mq->queue, card); 215 216
+1
drivers/mtd/mtd_blkdevs.c
··· 417 417 blk_queue_logical_block_size(new->rq, tr->blksize); 418 418 419 419 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); 420 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq); 420 421 421 422 if (tr->discard) { 422 423 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
+1
drivers/s390/block/scm_blk.c
··· 386 386 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ 387 387 blk_queue_max_segments(rq, nr_max_blk); 388 388 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); 389 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); 389 390 scm_blk_dev_cluster_setup(bdev); 390 391 391 392 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
+1
drivers/s390/block/xpram.c
··· 346 346 goto out; 347 347 } 348 348 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); 349 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); 349 350 blk_queue_make_request(xpram_queues[i], xpram_make_request); 350 351 blk_queue_logical_block_size(xpram_queues[i], 4096); 351 352 }
+3 -1
drivers/scsi/sd.c
··· 2677 2677 2678 2678 rot = get_unaligned_be16(&buffer[4]); 2679 2679 2680 - if (rot == 1) 2680 + if (rot == 1) { 2681 2681 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); 2682 + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue); 2683 + } 2682 2684 2683 2685 out: 2684 2686 kfree(buffer);