Merge tag 'ceph-for-4.15-rc8' of git://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
"Two rbd fixes for 4.12 and 4.2 issues respectively, marked for
stable"

* tag 'ceph-for-4.15-rc8' of git://github.com/ceph/ceph-client:
rbd: set max_segments to USHRT_MAX
rbd: reacquire lock should update lock owner client id

+12 -6
+12 -6
drivers/block/rbd.c
··· 3047 mutex_unlock(&rbd_dev->watch_mutex); 3048 } 3049 3050 /* 3051 * lock_rwsem must be held for write 3052 */ 3053 static int rbd_lock(struct rbd_device *rbd_dev) 3054 { 3055 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3056 - struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3057 char cookie[32]; 3058 int ret; 3059 ··· 3076 return ret; 3077 3078 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3079 - strcpy(rbd_dev->lock_cookie, cookie); 3080 - rbd_set_owner_cid(rbd_dev, &cid); 3081 - queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); 3082 return 0; 3083 } 3084 ··· 3862 queue_delayed_work(rbd_dev->task_wq, 3863 &rbd_dev->lock_dwork, 0); 3864 } else { 3865 - strcpy(rbd_dev->lock_cookie, cookie); 3866 } 3867 } 3868 ··· 4387 segment_size = rbd_obj_bytes(&rbd_dev->header); 4388 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4389 q->limits.max_sectors = queue_max_hw_sectors(q); 4390 - blk_queue_max_segments(q, segment_size / SECTOR_SIZE); 4391 blk_queue_max_segment_size(q, segment_size); 4392 blk_queue_io_min(q, segment_size); 4393 blk_queue_io_opt(q, segment_size);
··· 3047 mutex_unlock(&rbd_dev->watch_mutex); 3048 } 3049 3050 + static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie) 3051 + { 3052 + struct rbd_client_id cid = rbd_get_cid(rbd_dev); 3053 + 3054 + strcpy(rbd_dev->lock_cookie, cookie); 3055 + rbd_set_owner_cid(rbd_dev, &cid); 3056 + queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work); 3057 + } 3058 + 3059 /* 3060 * lock_rwsem must be held for write 3061 */ 3062 static int rbd_lock(struct rbd_device *rbd_dev) 3063 { 3064 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 3065 char cookie[32]; 3066 int ret; 3067 ··· 3068 return ret; 3069 3070 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED; 3071 + __rbd_lock(rbd_dev, cookie); 3072 return 0; 3073 } 3074 ··· 3856 queue_delayed_work(rbd_dev->task_wq, 3857 &rbd_dev->lock_dwork, 0); 3858 } else { 3859 + __rbd_lock(rbd_dev, cookie); 3860 } 3861 } 3862 ··· 4381 segment_size = rbd_obj_bytes(&rbd_dev->header); 4382 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); 4383 q->limits.max_sectors = queue_max_hw_sectors(q); 4384 + blk_queue_max_segments(q, USHRT_MAX); 4385 blk_queue_max_segment_size(q, segment_size); 4386 blk_queue_io_min(q, segment_size); 4387 blk_queue_io_opt(q, segment_size);