Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge patch series "Use block pr_ops in LIO"

Mike Christie <michael.christie@oracle.com> says:

The patches in this thread allow us to use the block pr_ops with LIO's
target_core_iblock module to support cluster applications in VMs. They
were built over Linus's tree. They also apply over linux-next and
Martin's tree and Jens's trees.

Currently, to use windows clustering or linux clustering (pacemaker +
cluster labs scsi fence agents) in VMs with LIO and vhost-scsi, you
have to use tcmu or pscsi or use a cluster aware FS/framework for the
LIO pr file. Setting up a cluster FS/framework is pain and waste when
your real backend device is already a distributed device, and pscsi
and tcmu are nice for specific use cases, but iblock gives you the
best performance and allows you to use stacked devices like
dm-multipath. So these patches allow iblock to work like pscsi/tcmu
where they can pass a PR command to the backend module. And then
iblock will use the pr_ops to pass the PR command to the real devices
similar to what we do for unmap today.

The patches are separated in the following groups:

Patch 1 - 2:

- Add block layer callouts for reading reservations and rename reservation
error code.

Patch 3 - 5:

- SCSI support for new callouts.

Patch 6:

- DM support for new callouts.

Patch 7 - 13:

- NVMe support for new callouts.

Patch 14 - 18:

- LIO support for new callouts.

This patchset has been tested with the libiscsi PGR ops and with
window's failover cluster verification test. Note that for scsi
backend devices we need this patchset:

https://lore.kernel.org/linux-scsi/20230123221046.125483-1-michael.christie@oracle.com/T/#m4834a643ffb5bac2529d65d40906d3cfbdd9b1b7

to handle UAs. To reduce the size of this patchset that's being done
separately to make reviewing easier. And to make merging easier this
patchset and the one above do not have any conflicts so can be merged
in different trees.

Link: https://lore.kernel.org/r/20230407200551.12660-1-michael.christie@oracle.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

+1073 -245
+1 -1
block/blk-core.c
··· 155 155 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, 156 156 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, 157 157 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, 158 - [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, 158 + [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" }, 159 159 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, 160 160 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, 161 161 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
+69
drivers/md/dm.c
··· 3132 3132 bool fail_early; 3133 3133 int ret; 3134 3134 enum pr_type type; 3135 + struct pr_keys *read_keys; 3136 + struct pr_held_reservation *rsv; 3135 3137 }; 3136 3138 3137 3139 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, ··· 3366 3364 return r; 3367 3365 } 3368 3366 3367 + static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev, 3368 + sector_t start, sector_t len, void *data) 3369 + { 3370 + struct dm_pr *pr = data; 3371 + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3372 + 3373 + if (!ops || !ops->pr_read_keys) { 3374 + pr->ret = -EOPNOTSUPP; 3375 + return -1; 3376 + } 3377 + 3378 + pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); 3379 + if (!pr->ret) 3380 + return -1; 3381 + 3382 + return 0; 3383 + } 3384 + 3385 + static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys) 3386 + { 3387 + struct dm_pr pr = { 3388 + .read_keys = keys, 3389 + }; 3390 + int ret; 3391 + 3392 + ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr); 3393 + if (ret) 3394 + return ret; 3395 + 3396 + return pr.ret; 3397 + } 3398 + 3399 + static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev, 3400 + sector_t start, sector_t len, void *data) 3401 + { 3402 + struct dm_pr *pr = data; 3403 + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; 3404 + 3405 + if (!ops || !ops->pr_read_reservation) { 3406 + pr->ret = -EOPNOTSUPP; 3407 + return -1; 3408 + } 3409 + 3410 + pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); 3411 + if (!pr->ret) 3412 + return -1; 3413 + 3414 + return 0; 3415 + } 3416 + 3417 + static int dm_pr_read_reservation(struct block_device *bdev, 3418 + struct pr_held_reservation *rsv) 3419 + { 3420 + struct dm_pr pr = { 3421 + .rsv = rsv, 3422 + }; 3423 + int ret; 3424 + 3425 + ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr); 3426 + if (ret) 3427 + return ret; 3428 + 3429 + return pr.ret; 3430 + } 3431 + 3369 3432 static const struct pr_ops dm_pr_ops = { 3370 3433 .pr_register = dm_pr_register, 3371 3434 .pr_reserve = dm_pr_reserve, 3372 3435 .pr_release = dm_pr_release, 3373 3436 .pr_preempt = dm_pr_preempt, 3374 3437 .pr_clear = dm_pr_clear, 3438 + .pr_read_keys = dm_pr_read_keys, 3439 + .pr_read_reservation = dm_pr_read_reservation, 3375 3440 }; 3376 3441 3377 3442 static const struct block_device_operations dm_blk_dops = {
+1 -1
drivers/nvme/host/Makefile
··· 10 10 obj-$(CONFIG_NVME_TCP) += nvme-tcp.o 11 11 obj-$(CONFIG_NVME_APPLE) += nvme-apple.o 12 12 13 - nvme-core-y += core.o ioctl.o 13 + nvme-core-y += core.o ioctl.o pr.o 14 14 nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o 15 15 nvme-core-$(CONFIG_TRACING) += trace.o 16 16 nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
+1 -148
drivers/nvme/host/core.c
··· 279 279 case NVME_SC_INVALID_PI: 280 280 return BLK_STS_PROTECTION; 281 281 case NVME_SC_RESERVATION_CONFLICT: 282 - return BLK_STS_NEXUS; 282 + return BLK_STS_RESV_CONFLICT; 283 283 case NVME_SC_HOST_PATH_ERROR: 284 284 return BLK_STS_TRANSPORT; 285 285 case NVME_SC_ZONE_TOO_MANY_ACTIVE: ··· 2060 2060 return nvme_update_ns_info_generic(ns, info); 2061 2061 } 2062 2062 } 2063 - 2064 - static char nvme_pr_type(enum pr_type type) 2065 - { 2066 - switch (type) { 2067 - case PR_WRITE_EXCLUSIVE: 2068 - return 1; 2069 - case PR_EXCLUSIVE_ACCESS: 2070 - return 2; 2071 - case PR_WRITE_EXCLUSIVE_REG_ONLY: 2072 - return 3; 2073 - case PR_EXCLUSIVE_ACCESS_REG_ONLY: 2074 - return 4; 2075 - case PR_WRITE_EXCLUSIVE_ALL_REGS: 2076 - return 5; 2077 - case PR_EXCLUSIVE_ACCESS_ALL_REGS: 2078 - return 6; 2079 - default: 2080 - return 0; 2081 - } 2082 - } 2083 - 2084 - static int nvme_send_ns_head_pr_command(struct block_device *bdev, 2085 - struct nvme_command *c, u8 data[16]) 2086 - { 2087 - struct nvme_ns_head *head = bdev->bd_disk->private_data; 2088 - int srcu_idx = srcu_read_lock(&head->srcu); 2089 - struct nvme_ns *ns = nvme_find_path(head); 2090 - int ret = -EWOULDBLOCK; 2091 - 2092 - if (ns) { 2093 - c->common.nsid = cpu_to_le32(ns->head->ns_id); 2094 - ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); 2095 - } 2096 - srcu_read_unlock(&head->srcu, srcu_idx); 2097 - return ret; 2098 - } 2099 - 2100 - static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, 2101 - u8 data[16]) 2102 - { 2103 - c->common.nsid = cpu_to_le32(ns->head->ns_id); 2104 - return nvme_submit_sync_cmd(ns->queue, c, data, 16); 2105 - } 2106 - 2107 - static int nvme_sc_to_pr_err(int nvme_sc) 2108 - { 2109 - if (nvme_is_path_error(nvme_sc)) 2110 - return PR_STS_PATH_FAILED; 2111 - 2112 - switch (nvme_sc) { 2113 - case NVME_SC_SUCCESS: 2114 - return PR_STS_SUCCESS; 2115 - case NVME_SC_RESERVATION_CONFLICT: 2116 - return PR_STS_RESERVATION_CONFLICT; 2117 - case NVME_SC_ONCS_NOT_SUPPORTED: 2118 - return -EOPNOTSUPP; 2119 - case NVME_SC_BAD_ATTRIBUTES: 2120 - case NVME_SC_INVALID_OPCODE: 2121 - case NVME_SC_INVALID_FIELD: 2122 - case NVME_SC_INVALID_NS: 2123 - return -EINVAL; 2124 - default: 2125 - return PR_STS_IOERR; 2126 - } 2127 - } 2128 - 2129 - static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 2130 - u64 key, u64 sa_key, u8 op) 2131 - { 2132 - struct nvme_command c = { }; 2133 - u8 data[16] = { 0, }; 2134 - int ret; 2135 - 2136 - put_unaligned_le64(key, &data[0]); 2137 - put_unaligned_le64(sa_key, &data[8]); 2138 - 2139 - c.common.opcode = op; 2140 - c.common.cdw10 = cpu_to_le32(cdw10); 2141 - 2142 - if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && 2143 - bdev->bd_disk->fops == &nvme_ns_head_ops) 2144 - ret = nvme_send_ns_head_pr_command(bdev, &c, data); 2145 - else 2146 - ret = nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, 2147 - data); 2148 - if (ret < 0) 2149 - return ret; 2150 - 2151 - return nvme_sc_to_pr_err(ret); 2152 - } 2153 - 2154 - static int nvme_pr_register(struct block_device *bdev, u64 old, 2155 - u64 new, unsigned flags) 2156 - { 2157 - u32 cdw10; 2158 - 2159 - if (flags & ~PR_FL_IGNORE_KEY) 2160 - return -EOPNOTSUPP; 2161 - 2162 - cdw10 = old ? 2 : 0; 2163 - cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 2164 - cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 2165 - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 2166 - } 2167 - 2168 - static int nvme_pr_reserve(struct block_device *bdev, u64 key, 2169 - enum pr_type type, unsigned flags) 2170 - { 2171 - u32 cdw10; 2172 - 2173 - if (flags & ~PR_FL_IGNORE_KEY) 2174 - return -EOPNOTSUPP; 2175 - 2176 - cdw10 = nvme_pr_type(type) << 8; 2177 - cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 2178 - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 2179 - } 2180 - 2181 - static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 2182 - enum pr_type type, bool abort) 2183 - { 2184 - u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 2185 - 2186 - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 2187 - } 2188 - 2189 - static int nvme_pr_clear(struct block_device *bdev, u64 key) 2190 - { 2191 - u32 cdw10 = 1 | (key ? 0 : 1 << 3); 2192 - 2193 - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2194 - } 2195 - 2196 - static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2197 - { 2198 - u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); 2199 - 2200 - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2201 - } 2202 - 2203 - const struct pr_ops nvme_pr_ops = { 2204 - .pr_register = nvme_pr_register, 2205 - .pr_reserve = nvme_pr_reserve, 2206 - .pr_release = nvme_pr_release, 2207 - .pr_preempt = nvme_pr_preempt, 2208 - .pr_clear = nvme_pr_clear, 2209 - }; 2210 2063 2211 2064 #ifdef CONFIG_BLK_SED_OPAL 2212 2065 static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
+2
drivers/nvme/host/nvme.h
··· 19 19 20 20 #include <trace/events/block.h> 21 21 22 + extern const struct pr_ops nvme_pr_ops; 23 + 22 24 extern unsigned int nvme_io_timeout; 23 25 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 24 26
+315
drivers/nvme/host/pr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2015 Intel Corporation 4 + * Keith Busch <kbusch@kernel.org> 5 + */ 6 + #include <linux/blkdev.h> 7 + #include <linux/pr.h> 8 + #include <asm/unaligned.h> 9 + 10 + #include "nvme.h" 11 + 12 + static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type) 13 + { 14 + switch (type) { 15 + case PR_WRITE_EXCLUSIVE: 16 + return NVME_PR_WRITE_EXCLUSIVE; 17 + case PR_EXCLUSIVE_ACCESS: 18 + return NVME_PR_EXCLUSIVE_ACCESS; 19 + case PR_WRITE_EXCLUSIVE_REG_ONLY: 20 + return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY; 21 + case PR_EXCLUSIVE_ACCESS_REG_ONLY: 22 + return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY; 23 + case PR_WRITE_EXCLUSIVE_ALL_REGS: 24 + return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS; 25 + case PR_EXCLUSIVE_ACCESS_ALL_REGS: 26 + return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS; 27 + } 28 + 29 + return 0; 30 + } 31 + 32 + static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type) 33 + { 34 + switch (type) { 35 + case NVME_PR_WRITE_EXCLUSIVE: 36 + return PR_WRITE_EXCLUSIVE; 37 + case NVME_PR_EXCLUSIVE_ACCESS: 38 + return PR_EXCLUSIVE_ACCESS; 39 + case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY: 40 + return PR_WRITE_EXCLUSIVE_REG_ONLY; 41 + case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY: 42 + return PR_EXCLUSIVE_ACCESS_REG_ONLY; 43 + case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS: 44 + return PR_WRITE_EXCLUSIVE_ALL_REGS; 45 + case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS: 46 + return PR_EXCLUSIVE_ACCESS_ALL_REGS; 47 + } 48 + 49 + return 0; 50 + } 51 + 52 + static int nvme_send_ns_head_pr_command(struct block_device *bdev, 53 + struct nvme_command *c, void *data, unsigned int data_len) 54 + { 55 + struct nvme_ns_head *head = bdev->bd_disk->private_data; 56 + int srcu_idx = srcu_read_lock(&head->srcu); 57 + struct nvme_ns *ns = nvme_find_path(head); 58 + int ret = -EWOULDBLOCK; 59 + 60 + if (ns) { 61 + c->common.nsid = cpu_to_le32(ns->head->ns_id); 62 + ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len); 63 + } 64 + srcu_read_unlock(&head->srcu, srcu_idx); 65 + return ret; 66 + } 67 + 68 + static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, 69 + void *data, unsigned int data_len) 70 + { 71 + c->common.nsid = cpu_to_le32(ns->head->ns_id); 72 + return nvme_submit_sync_cmd(ns->queue, c, data, data_len); 73 + } 74 + 75 + static int nvme_sc_to_pr_err(int nvme_sc) 76 + { 77 + if (nvme_is_path_error(nvme_sc)) 78 + return PR_STS_PATH_FAILED; 79 + 80 + switch (nvme_sc) { 81 + case NVME_SC_SUCCESS: 82 + return PR_STS_SUCCESS; 83 + case NVME_SC_RESERVATION_CONFLICT: 84 + return PR_STS_RESERVATION_CONFLICT; 85 + case NVME_SC_ONCS_NOT_SUPPORTED: 86 + return -EOPNOTSUPP; 87 + case NVME_SC_BAD_ATTRIBUTES: 88 + case NVME_SC_INVALID_OPCODE: 89 + case NVME_SC_INVALID_FIELD: 90 + case NVME_SC_INVALID_NS: 91 + return -EINVAL; 92 + default: 93 + return PR_STS_IOERR; 94 + } 95 + } 96 + 97 + static int nvme_send_pr_command(struct block_device *bdev, 98 + struct nvme_command *c, void *data, unsigned int data_len) 99 + { 100 + if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && 101 + bdev->bd_disk->fops == &nvme_ns_head_ops) 102 + return nvme_send_ns_head_pr_command(bdev, c, data, data_len); 103 + 104 + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, 105 + data_len); 106 + } 107 + 108 + static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 109 + u64 key, u64 sa_key, u8 op) 110 + { 111 + struct nvme_command c = { }; 112 + u8 data[16] = { 0, }; 113 + int ret; 114 + 115 + put_unaligned_le64(key, &data[0]); 116 + put_unaligned_le64(sa_key, &data[8]); 117 + 118 + c.common.opcode = op; 119 + c.common.cdw10 = cpu_to_le32(cdw10); 120 + 121 + ret = nvme_send_pr_command(bdev, &c, data, sizeof(data)); 122 + if (ret < 0) 123 + return ret; 124 + 125 + return nvme_sc_to_pr_err(ret); 126 + } 127 + 128 + static int nvme_pr_register(struct block_device *bdev, u64 old, 129 + u64 new, unsigned flags) 130 + { 131 + u32 cdw10; 132 + 133 + if (flags & ~PR_FL_IGNORE_KEY) 134 + return -EOPNOTSUPP; 135 + 136 + cdw10 = old ? 2 : 0; 137 + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 138 + cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 139 + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 140 + } 141 + 142 + static int nvme_pr_reserve(struct block_device *bdev, u64 key, 143 + enum pr_type type, unsigned flags) 144 + { 145 + u32 cdw10; 146 + 147 + if (flags & ~PR_FL_IGNORE_KEY) 148 + return -EOPNOTSUPP; 149 + 150 + cdw10 = nvme_pr_type_from_blk(type) << 8; 151 + cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 152 + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 153 + } 154 + 155 + static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 156 + enum pr_type type, bool abort) 157 + { 158 + u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1); 159 + 160 + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 161 + } 162 + 163 + static int nvme_pr_clear(struct block_device *bdev, u64 key) 164 + { 165 + u32 cdw10 = 1 | (key ? 0 : 1 << 3); 166 + 167 + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 168 + } 169 + 170 + static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 171 + { 172 + u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3); 173 + 174 + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 175 + } 176 + 177 + static int nvme_pr_resv_report(struct block_device *bdev, void *data, 178 + u32 data_len, bool *eds) 179 + { 180 + struct nvme_command c = { }; 181 + int ret; 182 + 183 + c.common.opcode = nvme_cmd_resv_report; 184 + c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len)); 185 + c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT); 186 + *eds = true; 187 + 188 + retry: 189 + ret = nvme_send_pr_command(bdev, &c, data, data_len); 190 + if (ret == NVME_SC_HOST_ID_INCONSIST && 191 + c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) { 192 + c.common.cdw11 = 0; 193 + *eds = false; 194 + goto retry; 195 + } 196 + 197 + if (ret < 0) 198 + return ret; 199 + 200 + return nvme_sc_to_pr_err(ret); 201 + } 202 + 203 + static int nvme_pr_read_keys(struct block_device *bdev, 204 + struct pr_keys *keys_info) 205 + { 206 + u32 rse_len, num_keys = keys_info->num_keys; 207 + struct nvme_reservation_status_ext *rse; 208 + int ret, i; 209 + bool eds; 210 + 211 + /* 212 + * Assume we are using 128-bit host IDs and allocate a buffer large 213 + * enough to get enough keys to fill the return keys buffer. 214 + */ 215 + rse_len = struct_size(rse, regctl_eds, num_keys); 216 + rse = kzalloc(rse_len, GFP_KERNEL); 217 + if (!rse) 218 + return -ENOMEM; 219 + 220 + ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); 221 + if (ret) 222 + goto free_rse; 223 + 224 + keys_info->generation = le32_to_cpu(rse->gen); 225 + keys_info->num_keys = get_unaligned_le16(&rse->regctl); 226 + 227 + num_keys = min(num_keys, keys_info->num_keys); 228 + for (i = 0; i < num_keys; i++) { 229 + if (eds) { 230 + keys_info->keys[i] = 231 + le64_to_cpu(rse->regctl_eds[i].rkey); 232 + } else { 233 + struct nvme_reservation_status *rs; 234 + 235 + rs = (struct nvme_reservation_status *)rse; 236 + keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); 237 + } 238 + } 239 + 240 + free_rse: 241 + kfree(rse); 242 + return ret; 243 + } 244 + 245 + static int nvme_pr_read_reservation(struct block_device *bdev, 246 + struct pr_held_reservation *resv) 247 + { 248 + struct nvme_reservation_status_ext tmp_rse, *rse; 249 + int ret, i, num_regs; 250 + u32 rse_len; 251 + bool eds; 252 + 253 + get_num_regs: 254 + /* 255 + * Get the number of registrations so we know how big to allocate 256 + * the response buffer. 257 + */ 258 + ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds); 259 + if (ret) 260 + return ret; 261 + 262 + num_regs = get_unaligned_le16(&tmp_rse.regctl); 263 + if (!num_regs) { 264 + resv->generation = le32_to_cpu(tmp_rse.gen); 265 + return 0; 266 + } 267 + 268 + rse_len = struct_size(rse, regctl_eds, num_regs); 269 + rse = kzalloc(rse_len, GFP_KERNEL); 270 + if (!rse) 271 + return -ENOMEM; 272 + 273 + ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); 274 + if (ret) 275 + goto free_rse; 276 + 277 + if (num_regs != get_unaligned_le16(&rse->regctl)) { 278 + kfree(rse); 279 + goto get_num_regs; 280 + } 281 + 282 + resv->generation = le32_to_cpu(rse->gen); 283 + resv->type = block_pr_type_from_nvme(rse->rtype); 284 + 285 + for (i = 0; i < num_regs; i++) { 286 + if (eds) { 287 + if (rse->regctl_eds[i].rcsts) { 288 + resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); 289 + break; 290 + } 291 + } else { 292 + struct nvme_reservation_status *rs; 293 + 294 + rs = (struct nvme_reservation_status *)rse; 295 + if (rs->regctl_ds[i].rcsts) { 296 + resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); 297 + break; 298 + } 299 + } 300 + } 301 + 302 + free_rse: 303 + kfree(rse); 304 + return ret; 305 + } 306 + 307 + const struct pr_ops nvme_pr_ops = { 308 + .pr_register = nvme_pr_register, 309 + .pr_reserve = nvme_pr_reserve, 310 + .pr_release = nvme_pr_release, 311 + .pr_preempt = nvme_pr_preempt, 312 + .pr_clear = nvme_pr_clear, 313 + .pr_read_keys = nvme_pr_read_keys, 314 + .pr_read_reservation = nvme_pr_read_reservation, 315 + };
+6 -1
drivers/s390/block/dasd.c
··· 2737 2737 else if (status == 0) { 2738 2738 switch (cqr->intrc) { 2739 2739 case -EPERM: 2740 - error = BLK_STS_NEXUS; 2740 + /* 2741 + * DASD doesn't implement SCSI/NVMe reservations, but it 2742 + * implements a locking scheme similar to them. We 2743 + * return this error when we no longer have the lock. 2744 + */ 2745 + error = BLK_STS_RESV_CONFLICT; 2741 2746 break; 2742 2747 case -ENOLINK: 2743 2748 error = BLK_STS_TRANSPORT;
+43
drivers/scsi/scsi_common.c
··· 8 8 #include <linux/string.h> 9 9 #include <linux/errno.h> 10 10 #include <linux/module.h> 11 + #include <uapi/linux/pr.h> 11 12 #include <asm/unaligned.h> 12 13 #include <scsi/scsi_common.h> 13 14 ··· 63 62 return scsi_device_types[type]; 64 63 } 65 64 EXPORT_SYMBOL(scsi_device_type); 65 + 66 + enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type) 67 + { 68 + switch (type) { 69 + case SCSI_PR_WRITE_EXCLUSIVE: 70 + return PR_WRITE_EXCLUSIVE; 71 + case SCSI_PR_EXCLUSIVE_ACCESS: 72 + return PR_EXCLUSIVE_ACCESS; 73 + case SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY: 74 + return PR_WRITE_EXCLUSIVE_REG_ONLY; 75 + case SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY: 76 + return PR_EXCLUSIVE_ACCESS_REG_ONLY; 77 + case SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS: 78 + return PR_WRITE_EXCLUSIVE_ALL_REGS; 79 + case SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS: 80 + return PR_EXCLUSIVE_ACCESS_ALL_REGS; 81 + } 82 + 83 + return 0; 84 + } 85 + EXPORT_SYMBOL_GPL(scsi_pr_type_to_block); 86 + 87 + enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type) 88 + { 89 + switch (type) { 90 + case PR_WRITE_EXCLUSIVE: 91 + return SCSI_PR_WRITE_EXCLUSIVE; 92 + case PR_EXCLUSIVE_ACCESS: 93 + return SCSI_PR_EXCLUSIVE_ACCESS; 94 + case PR_WRITE_EXCLUSIVE_REG_ONLY: 95 + return SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY; 96 + case PR_EXCLUSIVE_ACCESS_REG_ONLY: 97 + return SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY; 98 + case PR_WRITE_EXCLUSIVE_ALL_REGS: 99 + return SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS; 100 + case PR_EXCLUSIVE_ACCESS_ALL_REGS: 101 + return SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS; 102 + } 103 + 104 + return 0; 105 + } 106 + EXPORT_SYMBOL_GPL(block_pr_type_to_scsi); 66 107 67 108 /** 68 109 * scsilun_to_int - convert a scsi_lun to an int
+1 -1
drivers/scsi/scsi_lib.c
··· 599 599 case SCSIML_STAT_OK: 600 600 break; 601 601 case SCSIML_STAT_RESV_CONFLICT: 602 - return BLK_STS_NEXUS; 602 + return BLK_STS_RESV_CONFLICT; 603 603 case SCSIML_STAT_NOSPC: 604 604 return BLK_STS_NOSPC; 605 605 case SCSIML_STAT_MED_ERROR:
+102 -28
drivers/scsi/sd.c
··· 67 67 #include <scsi/scsi_host.h> 68 68 #include <scsi/scsi_ioctl.h> 69 69 #include <scsi/scsicam.h> 70 + #include <scsi/scsi_common.h> 70 71 71 72 #include "sd.h" 72 73 #include "scsi_priv.h" ··· 1692 1691 return ret; 1693 1692 } 1694 1693 1695 - static char sd_pr_type(enum pr_type type) 1696 - { 1697 - switch (type) { 1698 - case PR_WRITE_EXCLUSIVE: 1699 - return 0x01; 1700 - case PR_EXCLUSIVE_ACCESS: 1701 - return 0x03; 1702 - case PR_WRITE_EXCLUSIVE_REG_ONLY: 1703 - return 0x05; 1704 - case PR_EXCLUSIVE_ACCESS_REG_ONLY: 1705 - return 0x06; 1706 - case PR_WRITE_EXCLUSIVE_ALL_REGS: 1707 - return 0x07; 1708 - case PR_EXCLUSIVE_ACCESS_ALL_REGS: 1709 - return 0x08; 1710 - default: 1711 - return 0; 1712 - } 1713 - }; 1714 - 1715 1694 static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) 1716 1695 { 1717 1696 switch (host_byte(result)) { ··· 1722 1741 } 1723 1742 } 1724 1743 1725 - static int sd_pr_command(struct block_device *bdev, u8 sa, 1726 - u64 key, u64 sa_key, u8 type, u8 flags) 1744 + static int sd_pr_in_command(struct block_device *bdev, u8 sa, 1745 + unsigned char *data, int data_len) 1746 + { 1747 + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1748 + struct scsi_device *sdev = sdkp->device; 1749 + struct scsi_sense_hdr sshdr; 1750 + u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa }; 1751 + const struct scsi_exec_args exec_args = { 1752 + .sshdr = &sshdr, 1753 + }; 1754 + int result; 1755 + 1756 + put_unaligned_be16(data_len, &cmd[7]); 1757 + 1758 + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len, 1759 + SD_TIMEOUT, sdkp->max_retries, &exec_args); 1760 + if (scsi_status_is_check_condition(result) && 1761 + scsi_sense_valid(&sshdr)) { 1762 + sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); 1763 + scsi_print_sense_hdr(sdev, NULL, &sshdr); 1764 + } 1765 + 1766 + if (result <= 0) 1767 + return result; 1768 + 1769 + return sd_scsi_to_pr_err(&sshdr, result); 1770 + } 1771 + 1772 + static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) 1773 + { 1774 + int result, i, data_offset, num_copy_keys; 1775 + u32 num_keys = keys_info->num_keys; 1776 + int data_len = num_keys * 8 + 8; 1777 + u8 *data; 1778 + 1779 + data = kzalloc(data_len, GFP_KERNEL); 1780 + if (!data) 1781 + return -ENOMEM; 1782 + 1783 + result = sd_pr_in_command(bdev, READ_KEYS, data, data_len); 1784 + if (result) 1785 + goto free_data; 1786 + 1787 + keys_info->generation = get_unaligned_be32(&data[0]); 1788 + keys_info->num_keys = get_unaligned_be32(&data[4]) / 8; 1789 + 1790 + data_offset = 8; 1791 + num_copy_keys = min(num_keys, keys_info->num_keys); 1792 + 1793 + for (i = 0; i < num_copy_keys; i++) { 1794 + keys_info->keys[i] = get_unaligned_be64(&data[data_offset]); 1795 + data_offset += 8; 1796 + } 1797 + 1798 + free_data: 1799 + kfree(data); 1800 + return result; 1801 + } 1802 + 1803 + static int sd_pr_read_reservation(struct block_device *bdev, 1804 + struct pr_held_reservation *rsv) 1805 + { 1806 + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1807 + struct scsi_device *sdev = sdkp->device; 1808 + u8 data[24] = { }; 1809 + int result, len; 1810 + 1811 + result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data)); 1812 + if (result) 1813 + return result; 1814 + 1815 + len = get_unaligned_be32(&data[4]); 1816 + if (!len) 1817 + return 0; 1818 + 1819 + /* Make sure we have at least the key and type */ 1820 + if (len < 14) { 1821 + sdev_printk(KERN_INFO, sdev, 1822 + "READ RESERVATION failed due to short return buffer of %d bytes\n", 1823 + len); 1824 + return -EINVAL; 1825 + } 1826 + 1827 + rsv->generation = get_unaligned_be32(&data[0]); 1828 + rsv->key = get_unaligned_be64(&data[8]); 1829 + rsv->type = scsi_pr_type_to_block(data[21] & 0x0f); 1830 + return 0; 1831 + } 1832 + 1833 + static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key, 1834 + u64 sa_key, enum scsi_pr_type type, u8 flags) 1727 1835 { 1728 1836 struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); 1729 1837 struct scsi_device *sdev = sdkp->device; ··· 1854 1784 { 1855 1785 if (flags & ~PR_FL_IGNORE_KEY) 1856 1786 return -EOPNOTSUPP; 1857 - return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1787 + return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, 1858 1788 old_key, new_key, 0, 1859 1789 (1 << 0) /* APTPL */); 1860 1790 } ··· 1864 1794 { 1865 1795 if (flags) 1866 1796 return -EOPNOTSUPP; 1867 - return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0); 1797 + return sd_pr_out_command(bdev, 0x01, key, 0, 1798 + block_pr_type_to_scsi(type), 0); 1868 1799 } 1869 1800 1870 1801 static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 1871 1802 { 1872 - return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0); 1803 + return sd_pr_out_command(bdev, 0x02, key, 0, 1804 + block_pr_type_to_scsi(type), 0); 1873 1805 } 1874 1806 1875 1807 static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, 1876 1808 enum pr_type type, bool abort) 1877 1809 { 1878 - return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1879 - sd_pr_type(type), 0); 1810 + return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, 1811 + block_pr_type_to_scsi(type), 0); 1880 1812 } 1881 1813 1882 1814 static int sd_pr_clear(struct block_device *bdev, u64 key) 1883 1815 { 1884 - return sd_pr_command(bdev, 0x03, key, 0, 0, 0); 1816 + return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0); 1885 1817 } 1886 1818 1887 1819 static const struct pr_ops sd_pr_ops = { ··· 1892 1820 .pr_release = sd_pr_release, 1893 1821 .pr_preempt = sd_pr_preempt, 1894 1822 .pr_clear = sd_pr_clear, 1823 + .pr_read_keys = sd_pr_read_keys, 1824 + .pr_read_reservation = sd_pr_read_reservation, 1895 1825 }; 1896 1826 1897 1827 static void scsi_disk_free_disk(struct gendisk *disk)
+2 -2
drivers/target/target_core_file.c
··· 896 896 fd_dev->fd_prot_file = NULL; 897 897 } 898 898 899 - static struct sbc_ops fd_sbc_ops = { 899 + static struct exec_cmd_ops fd_exec_cmd_ops = { 900 900 .execute_rw = fd_execute_rw, 901 901 .execute_sync_cache = fd_execute_sync_cache, 902 902 .execute_write_same = fd_execute_write_same, ··· 906 906 static sense_reason_t 907 907 fd_parse_cdb(struct se_cmd *cmd) 908 908 { 909 - return sbc_parse_cdb(cmd, &fd_sbc_ops); 909 + return sbc_parse_cdb(cmd, &fd_exec_cmd_ops); 910 910 } 911 911 912 912 static const struct target_backend_ops fileio_ops = {
+268 -7
drivers/target/target_core_iblock.c
··· 23 23 #include <linux/file.h> 24 24 #include <linux/module.h> 25 25 #include <linux/scatterlist.h> 26 + #include <linux/pr.h> 26 27 #include <scsi/scsi_proto.h> 28 + #include <scsi/scsi_common.h> 27 29 #include <asm/unaligned.h> 28 30 29 31 #include <target/target_core_base.h> 30 32 #include <target/target_core_backend.h> 31 33 32 34 #include "target_core_iblock.h" 35 + #include "target_core_pr.h" 33 36 34 37 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 35 38 #define IBLOCK_BIO_POOL_SIZE 128 ··· 313 310 return blocks_long; 314 311 } 315 312 316 - static void iblock_complete_cmd(struct se_cmd *cmd) 313 + static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status) 317 314 { 318 315 struct iblock_req *ibr = cmd->priv; 319 316 u8 status; ··· 321 318 if (!refcount_dec_and_test(&ibr->pending)) 322 319 return; 323 320 324 - if (atomic_read(&ibr->ib_bio_err_cnt)) 321 + if (blk_status == BLK_STS_RESV_CONFLICT) 322 + status = SAM_STAT_RESERVATION_CONFLICT; 323 + else if (atomic_read(&ibr->ib_bio_err_cnt)) 325 324 status = SAM_STAT_CHECK_CONDITION; 326 325 else 327 326 status = SAM_STAT_GOOD; ··· 336 331 { 337 332 struct se_cmd *cmd = bio->bi_private; 338 333 struct iblock_req *ibr = cmd->priv; 334 + blk_status_t blk_status = bio->bi_status; 339 335 340 336 if (bio->bi_status) { 341 337 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); ··· 349 343 350 344 bio_put(bio); 351 345 352 - iblock_complete_cmd(cmd); 346 + iblock_complete_cmd(cmd, blk_status); 353 347 } 354 348 355 349 static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, ··· 765 759 766 760 if (!sgl_nents) { 767 761 refcount_set(&ibr->pending, 1); 768 - iblock_complete_cmd(cmd); 762 + iblock_complete_cmd(cmd, BLK_STS_OK); 769 763 return 0; 770 764 } 771 765 ··· 823 817 } 824 818 825 819 iblock_submit_bios(&list); 826 - iblock_complete_cmd(cmd); 820 + iblock_complete_cmd(cmd, BLK_STS_OK); 827 821 return 0; 828 822 829 823 fail_put_bios: ··· 833 827 kfree(ibr); 834 828 fail: 835 829 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 830 + } 831 + 832 + static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key, 833 + u64 sa_key, u8 type, bool aptpl) 834 + { 835 + struct se_device *dev = cmd->se_dev; 836 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 837 + struct block_device *bdev = ib_dev->ibd_bd; 838 + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 839 + int ret; 840 + 841 + if (!ops) { 842 + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); 843 + return TCM_UNSUPPORTED_SCSI_OPCODE; 844 + } 845 + 846 + switch (sa) { 847 + case PRO_REGISTER: 848 + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: 849 + if (!ops->pr_register) { 850 + pr_err("block device does not support pr_register.\n"); 851 + return TCM_UNSUPPORTED_SCSI_OPCODE; 852 + } 853 + 854 + /* The block layer pr ops always enables aptpl */ 855 + if (!aptpl) 856 + pr_info("APTPL not set by initiator, but will be used.\n"); 857 + 858 + ret = ops->pr_register(bdev, key, sa_key, 859 + sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY); 860 + break; 861 + case PRO_RESERVE: 862 + if (!ops->pr_reserve) { 863 + pr_err("block_device does not support pr_reserve.\n"); 864 + return TCM_UNSUPPORTED_SCSI_OPCODE; 865 + } 866 + 867 + ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0); 868 + break; 869 + case PRO_CLEAR: 870 + if (!ops->pr_clear) { 871 + pr_err("block_device does not support pr_clear.\n"); 872 + return TCM_UNSUPPORTED_SCSI_OPCODE; 873 + } 874 + 875 + ret = ops->pr_clear(bdev, key); 876 + break; 877 + case PRO_PREEMPT: 878 + case PRO_PREEMPT_AND_ABORT: 879 + if (!ops->pr_clear) { 880 + pr_err("block_device does not support pr_preempt.\n"); 881 + return TCM_UNSUPPORTED_SCSI_OPCODE; 882 + } 883 + 884 + ret = ops->pr_preempt(bdev, key, sa_key, 885 + scsi_pr_type_to_block(type), 886 + sa == PRO_PREEMPT ? false : true); 887 + break; 888 + case PRO_RELEASE: 889 + if (!ops->pr_clear) { 890 + pr_err("block_device does not support pr_pclear.\n"); 891 + return TCM_UNSUPPORTED_SCSI_OPCODE; 892 + } 893 + 894 + ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type)); 895 + break; 896 + default: 897 + pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa); 898 + return TCM_UNSUPPORTED_SCSI_OPCODE; 899 + } 900 + 901 + if (!ret) 902 + return TCM_NO_SENSE; 903 + else if (ret == PR_STS_RESERVATION_CONFLICT) 904 + return TCM_RESERVATION_CONFLICT; 905 + else 906 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 907 + } 908 + 909 + static void iblock_pr_report_caps(unsigned char *param_data) 910 + { 911 + u16 len = 8; 912 + 913 + put_unaligned_be16(len, &param_data[0]); 914 + /* 915 + * When using the pr_ops passthrough method we only support exporting 916 + * the device through one target port because from the backend module 917 + * level we can't see the target port config. As a result we only 918 + * support registration directly from the I_T nexus the cmd is sent 919 + * through and do not set ATP_C here. 920 + * 921 + * The block layer pr_ops do not support passing in initiators so 922 + * we don't set SIP_C here. 923 + */ 924 + /* PTPL_C: Persistence across Target Power Loss bit */ 925 + param_data[2] |= 0x01; 926 + /* 927 + * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so 928 + * set the TMV: Task Mask Valid bit. 929 + */ 930 + param_data[3] |= 0x80; 931 + /* 932 + * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 933 + */ 934 + param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */ 935 + /* 936 + * PTPL_A: Persistence across Target Power Loss Active bit. The block 937 + * layer pr ops always enables this so report it active. 938 + */ 939 + param_data[3] |= 0x01; 940 + /* 941 + * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37. 942 + */ 943 + param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ 944 + param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ 945 + param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ 946 + param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ 947 + param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ 948 + param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ 949 + } 950 + 951 + static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd, 952 + unsigned char *param_data) 953 + { 954 + struct se_device *dev = cmd->se_dev; 955 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 956 + struct block_device *bdev = ib_dev->ibd_bd; 957 + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 958 + int i, len, paths, data_offset; 959 + struct pr_keys *keys; 960 + sense_reason_t ret; 961 + 962 + if (!ops) { 963 + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); 964 + return TCM_UNSUPPORTED_SCSI_OPCODE; 965 + } 966 + 967 + if (!ops->pr_read_keys) { 968 + pr_err("Block device does not support read_keys.\n"); 969 + return TCM_UNSUPPORTED_SCSI_OPCODE; 970 + } 971 + 972 + /* 973 + * We don't know what's under us, but dm-multipath will register every 974 + * path with the same key, so start off with enough space for 16 paths. 975 + * which is not a lot of memory and should normally be enough. 976 + */ 977 + paths = 16; 978 + retry: 979 + len = 8 * paths; 980 + keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL); 981 + if (!keys) 982 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 983 + 984 + keys->num_keys = paths; 985 + if (!ops->pr_read_keys(bdev, keys)) { 986 + if (keys->num_keys > paths) { 987 + kfree(keys); 988 + paths *= 2; 989 + goto retry; 990 + } 991 + } else { 992 + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 993 + goto free_keys; 994 + } 995 + 996 + ret = TCM_NO_SENSE; 997 + 998 + put_unaligned_be32(keys->generation, &param_data[0]); 999 + if (!keys->num_keys) { 1000 + put_unaligned_be32(0, &param_data[4]); 1001 + goto free_keys; 1002 + } 1003 + 1004 + put_unaligned_be32(8 * keys->num_keys, &param_data[4]); 1005 + 1006 + data_offset = 8; 1007 + for (i = 0; i < keys->num_keys; i++) { 1008 + if (data_offset + 8 > cmd->data_length) 1009 + break; 1010 + 1011 + put_unaligned_be64(keys->keys[i], &param_data[data_offset]); 1012 + data_offset += 8; 1013 + } 1014 + 1015 + free_keys: 1016 + kfree(keys); 1017 + return ret; 1018 + } 1019 + 1020 + static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd, 1021 + unsigned char *param_data) 1022 + { 1023 + struct se_device *dev = cmd->se_dev; 1024 + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 1025 + struct block_device *bdev = ib_dev->ibd_bd; 1026 + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; 1027 + struct pr_held_reservation rsv = { }; 1028 + 1029 + if (!ops) { 1030 + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); 1031 + return TCM_UNSUPPORTED_SCSI_OPCODE; 1032 + } 1033 + 1034 + if (!ops->pr_read_reservation) { 1035 + pr_err("Block device does not support read_keys.\n"); 1036 + return TCM_UNSUPPORTED_SCSI_OPCODE; 1037 + } 1038 + 1039 + if (ops->pr_read_reservation(bdev, &rsv)) 1040 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1041 + 1042 + put_unaligned_be32(rsv.generation, &param_data[0]); 1043 + if (!block_pr_type_to_scsi(rsv.type)) { 1044 + put_unaligned_be32(0, &param_data[4]); 1045 + return TCM_NO_SENSE; 1046 + } 1047 + 1048 + put_unaligned_be32(16, &param_data[4]); 1049 + 1050 + if (cmd->data_length < 16) 1051 + return TCM_NO_SENSE; 1052 + put_unaligned_be64(rsv.key, &param_data[8]); 1053 + 1054 + if (cmd->data_length < 22) 1055 + return TCM_NO_SENSE; 1056 + param_data[21] = block_pr_type_to_scsi(rsv.type); 1057 + 1058 + return TCM_NO_SENSE; 1059 + } 1060 + 1061 + static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa, 1062 + unsigned char *param_data) 1063 + { 1064 + sense_reason_t ret = TCM_NO_SENSE; 1065 + 1066 + switch (sa) { 1067 + case PRI_REPORT_CAPABILITIES: 1068 + iblock_pr_report_caps(param_data); 1069 + break; 1070 + case PRI_READ_KEYS: 1071 + ret = iblock_pr_read_keys(cmd, param_data); 1072 + break; 1073 + case PRI_READ_RESERVATION: 1074 + ret = iblock_pr_read_reservation(cmd, param_data); 1075 + break; 1076 + default: 1077 + pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa); 1078 + return TCM_UNSUPPORTED_SCSI_OPCODE; 1079 + } 1080 + 1081 + return ret; 836 1082 } 837 1083 838 1084 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) ··· 1127 869 return bdev_io_opt(bd); 1128 870 } 1129 871 1130 - static struct sbc_ops iblock_sbc_ops = { 872 + static struct exec_cmd_ops iblock_exec_cmd_ops = { 1131 873 .execute_rw = iblock_execute_rw, 1132 874 .execute_sync_cache = iblock_execute_sync_cache, 1133 875 .execute_write_same = iblock_execute_write_same, 1134 876 .execute_unmap = iblock_execute_unmap, 877 + .execute_pr_out = iblock_execute_pr_out, 878 + .execute_pr_in = iblock_execute_pr_in, 1135 879 }; 1136 880 1137 881 static sense_reason_t 1138 882 iblock_parse_cdb(struct se_cmd *cmd) 1139 883 { 1140 - return sbc_parse_cdb(cmd, &iblock_sbc_ops); 884 + return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops); 1141 885 } 1142 886 1143 887 static bool iblock_get_write_cache(struct se_device *dev) ··· 1150 890 static const struct target_backend_ops iblock_ops = { 1151 891 .name = "iblock", 1152 892 .inquiry_prod = "IBLOCK", 893 + .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR, 1153 894 .inquiry_rev = IBLOCK_VERSION, 1154 895 .owner = THIS_MODULE, 1155 896 .attach_hba = iblock_attach_hba,
+78 -1
drivers/target/target_core_pr.c
··· 3538 3538 return ret; 3539 3539 } 3540 3540 3541 + static sense_reason_t 3542 + target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key, 3543 + u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt) 3544 + { 3545 + struct exec_cmd_ops *ops = cmd->protocol_data; 3546 + 3547 + if (!cmd->se_sess || !cmd->se_lun) { 3548 + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); 3549 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3550 + } 3551 + 3552 + if (!ops->execute_pr_out) { 3553 + pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n"); 3554 + return TCM_UNSUPPORTED_SCSI_OPCODE; 3555 + } 3556 + 3557 + switch (sa) { 3558 + case PRO_REGISTER_AND_MOVE: 3559 + case PRO_REPLACE_LOST_RESERVATION: 3560 + pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n"); 3561 + return TCM_UNSUPPORTED_SCSI_OPCODE; 3562 + } 3563 + 3564 + if (spec_i_pt || all_tg_pt) { 3565 + pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n"); 3566 + return TCM_UNSUPPORTED_SCSI_OPCODE; 3567 + } 3568 + 3569 + return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl); 3570 + } 3571 + 3541 3572 /* 3542 3573 * See spc4r17 section 6.14 Table 170 3543 3574 */ ··· 3672 3641 return TCM_PARAMETER_LIST_LENGTH_ERROR; 3673 3642 } 3674 3643 3644 + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) { 3645 + ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type, 3646 + aptpl, all_tg_pt, spec_i_pt); 3647 + goto done; 3648 + } 3649 + 3675 3650 /* 3676 3651 * (core_scsi3_emulate_pro_* function parameters 3677 3652 * are defined by spc4r17 Table 174: ··· 3719 3682 return TCM_INVALID_CDB_FIELD; 3720 3683 } 3721 3684 3685 + done: 3722 3686 if (!ret) 3723 3687 target_complete_cmd(cmd, SAM_STAT_GOOD); 3724 3688 return ret; ··· 4077 4039 return 0; 4078 4040 } 4079 4041 4042 + static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa) 4043 + { 4044 + struct exec_cmd_ops *ops = cmd->protocol_data; 4045 + unsigned char *buf; 4046 + sense_reason_t ret; 4047 + 4048 + if (cmd->data_length < 8) { 4049 + pr_err("PRIN SA SCSI Data Length: %u too small\n", 4050 + cmd->data_length); 4051 + return TCM_INVALID_CDB_FIELD; 4052 + } 4053 + 4054 + if (!ops->execute_pr_in) { 4055 + pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n"); 4056 + return TCM_UNSUPPORTED_SCSI_OPCODE; 4057 + } 4058 + 4059 + if (sa == PRI_READ_FULL_STATUS) { 4060 + pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n"); 4061 + return TCM_UNSUPPORTED_SCSI_OPCODE; 4062 + } 4063 + 4064 + buf = transport_kmap_data_sg(cmd); 4065 + if (!buf) 4066 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 4067 + 4068 + ret = ops->execute_pr_in(cmd, sa, buf); 4069 + 4070 + transport_kunmap_data_sg(cmd); 4071 + return ret; 4072 + } 4073 + 4080 4074 sense_reason_t 4081 4075 target_scsi3_emulate_pr_in(struct se_cmd *cmd) 4082 4076 { 4077 + u8 sa = cmd->t_task_cdb[1] & 0x1f; 4083 4078 sense_reason_t ret; 4084 4079 4085 4080 /* ··· 4131 4060 return TCM_RESERVATION_CONFLICT; 4132 4061 } 4133 4062 4134 - switch (cmd->t_task_cdb[1] & 0x1f) { 4063 + if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) { 4064 + ret = target_try_pr_in_pt(cmd, sa); 4065 + goto done; 4066 + } 4067 + 4068 + switch (sa) { 4135 4069 case PRI_READ_KEYS: 4136 4070 ret = core_scsi3_pri_read_keys(cmd); 4137 4071 break; ··· 4155 4079 return TCM_INVALID_CDB_FIELD; 4156 4080 } 4157 4081 4082 + done: 4158 4083 if (!ret) 4159 4084 target_complete_cmd(cmd, SAM_STAT_GOOD); 4160 4085 return ret;
+2 -2
drivers/target/target_core_rd.c
··· 643 643 rd_release_prot_space(rd_dev); 644 644 } 645 645 646 - static struct sbc_ops rd_sbc_ops = { 646 + static struct exec_cmd_ops rd_exec_cmd_ops = { 647 647 .execute_rw = rd_execute_rw, 648 648 }; 649 649 650 650 static sense_reason_t 651 651 rd_parse_cdb(struct se_cmd *cmd) 652 652 { 653 - return sbc_parse_cdb(cmd, &rd_sbc_ops); 653 + return sbc_parse_cdb(cmd, &rd_exec_cmd_ops); 654 654 } 655 655 656 656 static const struct target_backend_ops rd_mcp_ops = {
+7 -6
drivers/target/target_core_sbc.c
··· 192 192 static sense_reason_t 193 193 sbc_execute_write_same_unmap(struct se_cmd *cmd) 194 194 { 195 - struct sbc_ops *ops = cmd->protocol_data; 195 + struct exec_cmd_ops *ops = cmd->protocol_data; 196 196 sector_t nolb = sbc_get_write_same_sectors(cmd); 197 197 sense_reason_t ret; 198 198 ··· 271 271 } 272 272 273 273 static sense_reason_t 274 - sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) 274 + sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, 275 + struct exec_cmd_ops *ops) 275 276 { 276 277 struct se_device *dev = cmd->se_dev; 277 278 sector_t end_lba = dev->transport->get_blocks(dev) + 1; ··· 341 340 static sense_reason_t 342 341 sbc_execute_rw(struct se_cmd *cmd) 343 342 { 344 - struct sbc_ops *ops = cmd->protocol_data; 343 + struct exec_cmd_ops *ops = cmd->protocol_data; 345 344 346 345 return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 347 346 cmd->data_direction); ··· 567 566 static sense_reason_t 568 567 sbc_compare_and_write(struct se_cmd *cmd) 569 568 { 570 - struct sbc_ops *ops = cmd->protocol_data; 569 + struct exec_cmd_ops *ops = cmd->protocol_data; 571 570 struct se_device *dev = cmd->se_dev; 572 571 sense_reason_t ret; 573 572 int rc; ··· 765 764 } 766 765 767 766 sense_reason_t 768 - sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 767 + sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) 769 768 { 770 769 struct se_device *dev = cmd->se_dev; 771 770 unsigned char *cdb = cmd->t_task_cdb; ··· 1077 1076 static sense_reason_t 1078 1077 sbc_execute_unmap(struct se_cmd *cmd) 1079 1078 { 1080 - struct sbc_ops *ops = cmd->protocol_data; 1079 + struct exec_cmd_ops *ops = cmd->protocol_data; 1081 1080 struct se_device *dev = cmd->se_dev; 1082 1081 unsigned char *buf, *ptr = NULL; 1083 1082 sector_t lba;
+79 -34
drivers/target/target_core_spc.c
··· 1424 1424 .update_usage_bits = set_dpofua_usage_bits, 1425 1425 }; 1426 1426 1427 - static bool tcm_is_ws_enabled(struct se_cmd *cmd) 1427 + static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr, 1428 + struct se_cmd *cmd) 1428 1429 { 1429 - struct sbc_ops *ops = cmd->protocol_data; 1430 + struct exec_cmd_ops *ops = cmd->protocol_data; 1430 1431 struct se_device *dev = cmd->se_dev; 1431 1432 1432 1433 return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) || ··· 1452 1451 .update_usage_bits = set_dpofua_usage_bits32, 1453 1452 }; 1454 1453 1455 - static bool tcm_is_caw_enabled(struct se_cmd *cmd) 1454 + static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr, 1455 + struct se_cmd *cmd) 1456 1456 { 1457 1457 struct se_device *dev = cmd->se_dev; 1458 1458 ··· 1493 1491 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1494 1492 }; 1495 1493 1496 - static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd) 1494 + static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr, 1495 + struct se_cmd *cmd) 1497 1496 { 1498 1497 struct se_device *dev = cmd->se_dev; 1499 1498 ··· 1505 1502 } 1506 1503 spin_unlock(&dev->t10_alua.lba_map_lock); 1507 1504 return true; 1508 - 1509 1505 } 1510 1506 1511 1507 static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { ··· 1539 1537 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1540 1538 }; 1541 1539 1542 - static bool tcm_is_unmap_enabled(struct se_cmd *cmd) 1540 + static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr, 1541 + struct se_cmd *cmd) 1543 1542 { 1544 - struct sbc_ops *ops = cmd->protocol_data; 1543 + struct exec_cmd_ops *ops = cmd->protocol_data; 1545 1544 struct se_device *dev = cmd->se_dev; 1546 1545 1547 1546 return ops->execute_unmap && dev->dev_attrib.emulate_tpu; ··· 1662 1659 0xff, SCSI_CONTROL_MASK}, 1663 1660 }; 1664 1661 1665 - static bool tcm_is_pr_enabled(struct se_cmd *cmd) 1662 + static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr, 1663 + struct se_cmd *cmd) 1666 1664 { 1667 1665 struct se_device *dev = cmd->se_dev; 1668 1666 1669 - return dev->dev_attrib.emulate_pr; 1667 + if (!dev->dev_attrib.emulate_pr) 1668 + return false; 1669 + 1670 + if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) 1671 + return true; 1672 + 1673 + switch (descr->opcode) { 1674 + case RESERVE: 1675 + case RESERVE_10: 1676 + case RELEASE: 1677 + case RELEASE_10: 1678 + /* 1679 + * The pr_ops which are used by the backend modules don't 1680 + * support these commands. 1681 + */ 1682 + return false; 1683 + case PERSISTENT_RESERVE_OUT: 1684 + switch (descr->service_action) { 1685 + case PRO_REGISTER_AND_MOVE: 1686 + case PRO_REPLACE_LOST_RESERVATION: 1687 + /* 1688 + * The backend modules don't have access to ports and 1689 + * I_T nexuses so they can't handle these type of 1690 + * requests. 1691 + */ 1692 + return false; 1693 + } 1694 + break; 1695 + case PERSISTENT_RESERVE_IN: 1696 + if (descr->service_action == PRI_READ_FULL_STATUS) 1697 + return false; 1698 + break; 1699 + } 1700 + 1701 + return true; 1670 1702 } 1671 1703 1672 1704 static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { ··· 1826 1788 .enabled = tcm_is_pr_enabled, 1827 1789 }; 1828 1790 1829 - static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd) 1830 - { 1831 - struct se_device *dev = cmd->se_dev; 1832 - 1833 - return dev->dev_attrib.emulate_pr; 1834 - } 1835 - 1836 1791 static struct target_opcode_descriptor tcm_opcode_release = { 1837 1792 .support = SCSI_SUPPORT_FULL, 1838 1793 .opcode = RELEASE, 1839 1794 .cdb_size = 6, 1840 1795 .usage_bits = {RELEASE, 0x00, 0x00, 0x00, 1841 1796 0x00, SCSI_CONTROL_MASK}, 1842 - .enabled = tcm_is_scsi2_reservations_enabled, 1797 + .enabled = tcm_is_pr_enabled, 1843 1798 }; 1844 1799 1845 1800 static struct target_opcode_descriptor tcm_opcode_release10 = { ··· 1842 1811 .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00, 1843 1812 0x00, 0x00, 0x00, 0xff, 1844 1813 0xff, SCSI_CONTROL_MASK}, 1845 - .enabled = tcm_is_scsi2_reservations_enabled, 1814 + .enabled = tcm_is_pr_enabled, 1846 1815 }; 1847 1816 1848 1817 static struct target_opcode_descriptor tcm_opcode_reserve = { ··· 1851 1820 .cdb_size = 6, 1852 1821 .usage_bits = {RESERVE, 0x00, 0x00, 0x00, 1853 1822 0x00, SCSI_CONTROL_MASK}, 1854 - .enabled = tcm_is_scsi2_reservations_enabled, 1823 + .enabled = tcm_is_pr_enabled, 1855 1824 }; 1856 1825 1857 1826 static struct target_opcode_descriptor tcm_opcode_reserve10 = { ··· 1861 1830 .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00, 1862 1831 0x00, 0x00, 0x00, 0xff, 1863 1832 0xff, SCSI_CONTROL_MASK}, 1864 - .enabled = tcm_is_scsi2_reservations_enabled, 1833 + .enabled = tcm_is_pr_enabled, 1865 1834 }; 1866 1835 1867 1836 static struct target_opcode_descriptor tcm_opcode_request_sense = { ··· 1880 1849 0xff, SCSI_CONTROL_MASK}, 1881 1850 }; 1882 1851 1883 - static bool tcm_is_3pc_enabled(struct se_cmd *cmd) 1852 + static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr, 1853 + struct se_cmd *cmd) 1884 1854 { 1885 1855 struct se_device *dev = cmd->se_dev; 1886 1856 ··· 1942 1910 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1943 1911 }; 1944 1912 1945 - 1946 - static bool spc_rsoc_enabled(struct se_cmd *cmd) 1913 + static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr, 1914 + struct se_cmd *cmd) 1947 1915 { 1948 1916 struct se_device *dev = cmd->se_dev; 1949 1917 ··· 1963 1931 .enabled = spc_rsoc_enabled, 1964 1932 }; 1965 1933 1966 - static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd) 1934 + static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr, 1935 + struct se_cmd *cmd) 1967 1936 { 1968 1937 struct t10_alua_tg_pt_gp *l_tg_pt_gp; 1969 1938 struct se_lun *l_lun = cmd->se_lun; ··· 2151 2118 if (descr->serv_action_valid) 2152 2119 return TCM_INVALID_CDB_FIELD; 2153 2120 2154 - if (!descr->enabled || descr->enabled(cmd)) 2121 + if (!descr->enabled || descr->enabled(descr, cmd)) 2155 2122 *opcode = descr; 2156 2123 break; 2157 2124 case 0x2: ··· 2165 2132 */ 2166 2133 if (descr->serv_action_valid && 2167 2134 descr->service_action == requested_sa) { 2168 - if (!descr->enabled || descr->enabled(cmd)) 2135 + if (!descr->enabled || descr->enabled(descr, 2136 + cmd)) 2169 2137 *opcode = descr; 2170 2138 } else if (!descr->serv_action_valid) 2171 2139 return TCM_INVALID_CDB_FIELD; ··· 2179 2145 * be returned in the one_command parameter data format. 2180 2146 */ 2181 2147 if (descr->service_action == requested_sa) 2182 - if (!descr->enabled || descr->enabled(cmd)) 2148 + if (!descr->enabled || descr->enabled(descr, 2149 + cmd)) 2183 2150 *opcode = descr; 2184 2151 break; 2185 2152 } ··· 2237 2202 2238 2203 for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { 2239 2204 descr = tcm_supported_opcodes[i]; 2240 - if (descr->enabled && !descr->enabled(cmd)) 2205 + if (descr->enabled && !descr->enabled(descr, cmd)) 2241 2206 continue; 2242 2207 2243 2208 response_length += spc_rsoc_encode_command_descriptor( ··· 2266 2231 struct se_device *dev = cmd->se_dev; 2267 2232 unsigned char *cdb = cmd->t_task_cdb; 2268 2233 2269 - if (!dev->dev_attrib.emulate_pr && 2270 - ((cdb[0] == PERSISTENT_RESERVE_IN) || 2271 - (cdb[0] == PERSISTENT_RESERVE_OUT) || 2272 - (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 2273 - (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 2274 - return TCM_UNSUPPORTED_SCSI_OPCODE; 2234 + switch (cdb[0]) { 2235 + case RESERVE: 2236 + case RESERVE_10: 2237 + case RELEASE: 2238 + case RELEASE_10: 2239 + if (!dev->dev_attrib.emulate_pr) 2240 + return TCM_UNSUPPORTED_SCSI_OPCODE; 2241 + 2242 + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) 2243 + return TCM_UNSUPPORTED_SCSI_OPCODE; 2244 + break; 2245 + case PERSISTENT_RESERVE_IN: 2246 + case PERSISTENT_RESERVE_OUT: 2247 + if (!dev->dev_attrib.emulate_pr) 2248 + return TCM_UNSUPPORTED_SCSI_OPCODE; 2249 + break; 2275 2250 } 2276 2251 2277 2252 switch (cdb[0]) {
+2 -2
include/linux/blk_types.h
··· 101 101 #define BLK_STS_NOSPC ((__force blk_status_t)3) 102 102 #define BLK_STS_TRANSPORT ((__force blk_status_t)4) 103 103 #define BLK_STS_TARGET ((__force blk_status_t)5) 104 - #define BLK_STS_NEXUS ((__force blk_status_t)6) 104 + #define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) 105 105 #define BLK_STS_MEDIUM ((__force blk_status_t)7) 106 106 #define BLK_STS_PROTECTION ((__force blk_status_t)8) 107 107 #define BLK_STS_RESOURCE ((__force blk_status_t)9) ··· 189 189 case BLK_STS_NOTSUPP: 190 190 case BLK_STS_NOSPC: 191 191 case BLK_STS_TARGET: 192 - case BLK_STS_NEXUS: 192 + case BLK_STS_RESV_CONFLICT: 193 193 case BLK_STS_MEDIUM: 194 194 case BLK_STS_PROTECTION: 195 195 return false;
+43 -8
include/linux/nvme.h
··· 759 759 NVME_LBART_ATTRIB_HIDE = 1 << 1, 760 760 }; 761 761 762 + enum nvme_pr_type { 763 + NVME_PR_WRITE_EXCLUSIVE = 1, 764 + NVME_PR_EXCLUSIVE_ACCESS = 2, 765 + NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, 766 + NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, 767 + NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, 768 + NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, 769 + }; 770 + 771 + enum nvme_eds { 772 + NVME_EXTENDED_DATA_STRUCT = 0x1, 773 + }; 774 + 775 + struct nvme_registered_ctrl { 776 + __le16 cntlid; 777 + __u8 rcsts; 778 + __u8 rsvd3[5]; 779 + __le64 hostid; 780 + __le64 rkey; 781 + }; 782 + 762 783 struct nvme_reservation_status { 763 784 __le32 gen; 764 785 __u8 rtype; 765 786 __u8 regctl[2]; 766 787 __u8 resv5[2]; 767 788 __u8 ptpls; 768 - __u8 resv10[13]; 769 - struct { 770 - __le16 cntlid; 771 - __u8 rcsts; 772 - __u8 resv3[5]; 773 - __le64 hostid; 774 - __le64 rkey; 775 - } regctl_ds[]; 789 + __u8 resv10[14]; 790 + struct nvme_registered_ctrl regctl_ds[]; 791 + }; 792 + 793 + struct nvme_registered_ctrl_ext { 794 + __le16 cntlid; 795 + __u8 rcsts; 796 + __u8 rsvd3[5]; 797 + __le64 rkey; 798 + __u8 hostid[16]; 799 + __u8 rsvd32[32]; 800 + }; 801 + 802 + struct nvme_reservation_status_ext { 803 + __le32 gen; 804 + __u8 rtype; 805 + __u8 regctl[2]; 806 + __u8 resv5[2]; 807 + __u8 ptpls; 808 + __u8 resv10[14]; 809 + __u8 rsvd24[40]; 810 + struct nvme_registered_ctrl_ext regctl_eds[]; 776 811 }; 777 812 778 813 enum nvme_async_event_type {
+25
include/linux/pr.h
··· 4 4 5 5 #include <uapi/linux/pr.h> 6 6 7 + struct pr_keys { 8 + u32 generation; 9 + u32 num_keys; 10 + u64 keys[]; 11 + }; 12 + 13 + struct pr_held_reservation { 14 + u64 key; 15 + u32 generation; 16 + enum pr_type type; 17 + }; 18 + 7 19 struct pr_ops { 8 20 int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, 9 21 u32 flags); ··· 26 14 int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, 27 15 enum pr_type type, bool abort); 28 16 int (*pr_clear)(struct block_device *bdev, u64 key); 17 + /* 18 + * pr_read_keys - Read the registered keys and return them in the 19 + * pr_keys->keys array. The keys array will have been allocated at the 20 + * end of the pr_keys struct, and pr_keys->num_keys must be set to the 21 + * number of keys the array can hold. If there are more than can fit 22 + * in the array, success will still be returned and pr_keys->num_keys 23 + * will reflect the total number of keys the device contains, so the 24 + * caller can retry with a larger array. 25 + */ 26 + int (*pr_read_keys)(struct block_device *bdev, 27 + struct pr_keys *keys_info); 28 + int (*pr_read_reservation)(struct block_device *bdev, 29 + struct pr_held_reservation *rsv); 29 30 }; 30 31 31 32 #endif /* LINUX_PR_H */
+13
include/scsi/scsi_common.h
··· 7 7 #define _SCSI_COMMON_H_ 8 8 9 9 #include <linux/types.h> 10 + #include <uapi/linux/pr.h> 10 11 #include <scsi/scsi_proto.h> 12 + 13 + enum scsi_pr_type { 14 + SCSI_PR_WRITE_EXCLUSIVE = 0x01, 15 + SCSI_PR_EXCLUSIVE_ACCESS = 0x03, 16 + SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 0x05, 17 + SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 0x06, 18 + SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 0x07, 19 + SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 0x08, 20 + }; 21 + 22 + enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type); 23 + enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type); 11 24 12 25 static inline unsigned 13 26 scsi_varlen_cdb_length(const void *hdr)
+5
include/scsi/scsi_proto.h
··· 151 151 #define ZO_FINISH_ZONE 0x02 152 152 #define ZO_OPEN_ZONE 0x03 153 153 #define ZO_RESET_WRITE_POINTER 0x04 154 + /* values for PR in service action */ 155 + #define READ_KEYS 0x00 156 + #define READ_RESERVATION 0x01 157 + #define REPORT_CAPABILITES 0x02 158 + #define READ_FULL_STATUS 0x03 154 159 /* values for variable length command */ 155 160 #define XDREAD_32 0x03 156 161 #define XDWRITE_32 0x04
+6 -2
include/target/target_core_backend.h
··· 62 62 struct configfs_attribute **tb_dev_action_attrs; 63 63 }; 64 64 65 - struct sbc_ops { 65 + struct exec_cmd_ops { 66 66 sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *, 67 67 u32, enum dma_data_direction); 68 68 sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); 69 69 sense_reason_t (*execute_write_same)(struct se_cmd *cmd); 70 70 sense_reason_t (*execute_unmap)(struct se_cmd *cmd, 71 71 sector_t lba, sector_t nolb); 72 + sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key, 73 + u64 sa_key, u8 type, bool aptpl); 74 + sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa, 75 + unsigned char *param_data); 72 76 }; 73 77 74 78 int transport_backend_register(const struct target_backend_ops *); ··· 90 86 sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *); 91 87 sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *); 92 88 93 - sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops); 89 + sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops); 94 90 u32 sbc_get_device_rev(struct se_device *dev); 95 91 u32 sbc_get_device_type(struct se_device *dev); 96 92 sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
+2 -1
include/target/target_core_base.h
··· 880 880 u8 specific_timeout; 881 881 u16 nominal_timeout; 882 882 u16 recommended_timeout; 883 - bool (*enabled)(struct se_cmd *cmd); 883 + bool (*enabled)(struct target_opcode_descriptor *descr, 884 + struct se_cmd *cmd); 884 885 void (*update_usage_bits)(u8 *usage_bits, 885 886 struct se_device *dev); 886 887 u8 usage_bits[];