Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*******************************************************************************
3 * Filename: target_core_iblock.c
4 *
5 * This file contains the Storage Engine <-> Linux BlockIO transport
6 * specific functions.
7 *
8 * (c) Copyright 2003-2013 Datera, Inc.
9 *
10 * Nicholas A. Bellinger <nab@kernel.org>
11 *
12 ******************************************************************************/
13
14#include <linux/string.h>
15#include <linux/parser.h>
16#include <linux/timer.h>
17#include <linux/fs.h>
18#include <linux/blkdev.h>
19#include <linux/blk-integrity.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/bio.h>
23#include <linux/genhd.h>
24#include <linux/file.h>
25#include <linux/module.h>
26#include <linux/scatterlist.h>
27#include <scsi/scsi_proto.h>
28#include <asm/unaligned.h>
29
30#include <target/target_core_base.h>
31#include <target/target_core_backend.h>
32
33#include "target_core_iblock.h"
34
35#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
36#define IBLOCK_BIO_POOL_SIZE 128
37
38static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
39{
40 return container_of(dev, struct iblock_dev, dev);
41}
42
43
44static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
45{
46 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
47 " Generic Target Core Stack %s\n", hba->hba_id,
48 IBLOCK_VERSION, TARGET_CORE_VERSION);
49 return 0;
50}
51
52static void iblock_detach_hba(struct se_hba *hba)
53{
54}
55
56static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
57{
58 struct iblock_dev *ib_dev = NULL;
59
60 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
61 if (!ib_dev) {
62 pr_err("Unable to allocate struct iblock_dev\n");
63 return NULL;
64 }
65
66 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
67 GFP_KERNEL);
68 if (!ib_dev->ibd_plug)
69 goto free_dev;
70
71 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
72
73 return &ib_dev->dev;
74
75free_dev:
76 kfree(ib_dev);
77 return NULL;
78}
79
80static int iblock_configure_device(struct se_device *dev)
81{
82 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
83 struct request_queue *q;
84 struct block_device *bd = NULL;
85 struct blk_integrity *bi;
86 fmode_t mode;
87 unsigned int max_write_zeroes_sectors;
88 int ret;
89
90 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
91 pr_err("Missing udev_path= parameters for IBLOCK\n");
92 return -EINVAL;
93 }
94
95 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
96 if (ret) {
97 pr_err("IBLOCK: Unable to create bioset\n");
98 goto out;
99 }
100
101 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
102 ib_dev->ibd_udev_path);
103
104 mode = FMODE_READ|FMODE_EXCL;
105 if (!ib_dev->ibd_readonly)
106 mode |= FMODE_WRITE;
107 else
108 dev->dev_flags |= DF_READ_ONLY;
109
110 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
111 if (IS_ERR(bd)) {
112 ret = PTR_ERR(bd);
113 goto out_free_bioset;
114 }
115 ib_dev->ibd_bd = bd;
116
117 q = bdev_get_queue(bd);
118
119 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
120 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
121 dev->dev_attrib.hw_queue_depth = q->nr_requests;
122
123 if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
124 pr_debug("IBLOCK: BLOCK Discard support available,"
125 " disabled by default\n");
126
127 /*
128 * Enable write same emulation for IBLOCK and use 0xFFFF as
129 * the smaller WRITE_SAME(10) only has a two-byte block count.
130 */
131 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
132 if (max_write_zeroes_sectors)
133 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
134 else
135 dev->dev_attrib.max_write_same_len = 0xFFFF;
136
137 if (blk_queue_nonrot(q))
138 dev->dev_attrib.is_nonrot = 1;
139
140 bi = bdev_get_integrity(bd);
141 if (bi) {
142 struct bio_set *bs = &ib_dev->ibd_bio_set;
143
144 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
145 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
146 pr_err("IBLOCK export of blk_integrity: %s not"
147 " supported\n", bi->profile->name);
148 ret = -ENOSYS;
149 goto out_blkdev_put;
150 }
151
152 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
153 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
154 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
155 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
156 }
157
158 if (dev->dev_attrib.pi_prot_type) {
159 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
160 pr_err("Unable to allocate bioset for PI\n");
161 ret = -ENOMEM;
162 goto out_blkdev_put;
163 }
164 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
165 &bs->bio_integrity_pool);
166 }
167 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
168 }
169
170 return 0;
171
172out_blkdev_put:
173 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
174out_free_bioset:
175 bioset_exit(&ib_dev->ibd_bio_set);
176out:
177 return ret;
178}
179
180static void iblock_dev_call_rcu(struct rcu_head *p)
181{
182 struct se_device *dev = container_of(p, struct se_device, rcu_head);
183 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
184
185 kfree(ib_dev->ibd_plug);
186 kfree(ib_dev);
187}
188
189static void iblock_free_device(struct se_device *dev)
190{
191 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
192}
193
194static void iblock_destroy_device(struct se_device *dev)
195{
196 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
197
198 if (ib_dev->ibd_bd != NULL)
199 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
200 bioset_exit(&ib_dev->ibd_bio_set);
201}
202
203static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
204{
205 struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
206 struct iblock_dev_plug *ib_dev_plug;
207
208 /*
209 * Each se_device has a per cpu work this can be run from. We
210 * shouldn't have multiple threads on the same cpu calling this
211 * at the same time.
212 */
213 ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
214 if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
215 return NULL;
216
217 blk_start_plug(&ib_dev_plug->blk_plug);
218 return &ib_dev_plug->se_plug;
219}
220
221static void iblock_unplug_device(struct se_dev_plug *se_plug)
222{
223 struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
224 struct iblock_dev_plug, se_plug);
225
226 blk_finish_plug(&ib_dev_plug->blk_plug);
227 clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
228}
229
230static unsigned long long iblock_emulate_read_cap_with_block_size(
231 struct se_device *dev,
232 struct block_device *bd,
233 struct request_queue *q)
234{
235 u32 block_size = bdev_logical_block_size(bd);
236 unsigned long long blocks_long =
237 div_u64(bdev_nr_bytes(bd), block_size) - 1;
238
239 if (block_size == dev->dev_attrib.block_size)
240 return blocks_long;
241
242 switch (block_size) {
243 case 4096:
244 switch (dev->dev_attrib.block_size) {
245 case 2048:
246 blocks_long <<= 1;
247 break;
248 case 1024:
249 blocks_long <<= 2;
250 break;
251 case 512:
252 blocks_long <<= 3;
253 break;
254 default:
255 break;
256 }
257 break;
258 case 2048:
259 switch (dev->dev_attrib.block_size) {
260 case 4096:
261 blocks_long >>= 1;
262 break;
263 case 1024:
264 blocks_long <<= 1;
265 break;
266 case 512:
267 blocks_long <<= 2;
268 break;
269 default:
270 break;
271 }
272 break;
273 case 1024:
274 switch (dev->dev_attrib.block_size) {
275 case 4096:
276 blocks_long >>= 2;
277 break;
278 case 2048:
279 blocks_long >>= 1;
280 break;
281 case 512:
282 blocks_long <<= 1;
283 break;
284 default:
285 break;
286 }
287 break;
288 case 512:
289 switch (dev->dev_attrib.block_size) {
290 case 4096:
291 blocks_long >>= 3;
292 break;
293 case 2048:
294 blocks_long >>= 2;
295 break;
296 case 1024:
297 blocks_long >>= 1;
298 break;
299 default:
300 break;
301 }
302 break;
303 default:
304 break;
305 }
306
307 return blocks_long;
308}
309
310static void iblock_complete_cmd(struct se_cmd *cmd)
311{
312 struct iblock_req *ibr = cmd->priv;
313 u8 status;
314
315 if (!refcount_dec_and_test(&ibr->pending))
316 return;
317
318 if (atomic_read(&ibr->ib_bio_err_cnt))
319 status = SAM_STAT_CHECK_CONDITION;
320 else
321 status = SAM_STAT_GOOD;
322
323 target_complete_cmd(cmd, status);
324 kfree(ibr);
325}
326
327static void iblock_bio_done(struct bio *bio)
328{
329 struct se_cmd *cmd = bio->bi_private;
330 struct iblock_req *ibr = cmd->priv;
331
332 if (bio->bi_status) {
333 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
334 /*
335 * Bump the ib_bio_err_cnt and release bio.
336 */
337 atomic_inc(&ibr->ib_bio_err_cnt);
338 smp_mb__after_atomic();
339 }
340
341 bio_put(bio);
342
343 iblock_complete_cmd(cmd);
344}
345
346static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
347 unsigned int opf)
348{
349 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
350 struct bio *bio;
351
352 /*
353 * Only allocate as many vector entries as the bio code allows us to,
354 * we'll loop later on until we have handled the whole request.
355 */
356 bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
357 &ib_dev->ibd_bio_set);
358 if (!bio) {
359 pr_err("Unable to allocate memory for bio\n");
360 return NULL;
361 }
362
363 bio_set_dev(bio, ib_dev->ibd_bd);
364 bio->bi_private = cmd;
365 bio->bi_end_io = &iblock_bio_done;
366 bio->bi_iter.bi_sector = lba;
367 bio->bi_opf = opf;
368
369 return bio;
370}
371
372static void iblock_submit_bios(struct bio_list *list)
373{
374 struct blk_plug plug;
375 struct bio *bio;
376 /*
377 * The block layer handles nested plugs, so just plug/unplug to handle
378 * fabric drivers that didn't support batching and multi bio cmds.
379 */
380 blk_start_plug(&plug);
381 while ((bio = bio_list_pop(list)))
382 submit_bio(bio);
383 blk_finish_plug(&plug);
384}
385
386static void iblock_end_io_flush(struct bio *bio)
387{
388 struct se_cmd *cmd = bio->bi_private;
389
390 if (bio->bi_status)
391 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
392
393 if (cmd) {
394 if (bio->bi_status)
395 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
396 else
397 target_complete_cmd(cmd, SAM_STAT_GOOD);
398 }
399
400 bio_put(bio);
401}
402
403/*
404 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
405 * always flush the whole cache.
406 */
407static sense_reason_t
408iblock_execute_sync_cache(struct se_cmd *cmd)
409{
410 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
411 int immed = (cmd->t_task_cdb[1] & 0x2);
412 struct bio *bio;
413
414 /*
415 * If the Immediate bit is set, queue up the GOOD response
416 * for this SYNCHRONIZE_CACHE op.
417 */
418 if (immed)
419 target_complete_cmd(cmd, SAM_STAT_GOOD);
420
421 bio = bio_alloc(GFP_KERNEL, 0);
422 bio->bi_end_io = iblock_end_io_flush;
423 bio_set_dev(bio, ib_dev->ibd_bd);
424 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
425 if (!immed)
426 bio->bi_private = cmd;
427 submit_bio(bio);
428 return 0;
429}
430
431static sense_reason_t
432iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
433{
434 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
435 struct se_device *dev = cmd->se_dev;
436 int ret;
437
438 ret = blkdev_issue_discard(bdev,
439 target_to_linux_sector(dev, lba),
440 target_to_linux_sector(dev, nolb),
441 GFP_KERNEL, 0);
442 if (ret < 0) {
443 pr_err("blkdev_issue_discard() failed: %d\n", ret);
444 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
445 }
446
447 return 0;
448}
449
450static sense_reason_t
451iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
452{
453 struct se_device *dev = cmd->se_dev;
454 struct scatterlist *sg = &cmd->t_data_sg[0];
455 unsigned char *buf, *not_zero;
456 int ret;
457
458 buf = kmap(sg_page(sg)) + sg->offset;
459 if (!buf)
460 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
461 /*
462 * Fall back to block_execute_write_same() slow-path if
463 * incoming WRITE_SAME payload does not contain zeros.
464 */
465 not_zero = memchr_inv(buf, 0x00, cmd->data_length);
466 kunmap(sg_page(sg));
467
468 if (not_zero)
469 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
470
471 ret = blkdev_issue_zeroout(bdev,
472 target_to_linux_sector(dev, cmd->t_task_lba),
473 target_to_linux_sector(dev,
474 sbc_get_write_same_sectors(cmd)),
475 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
476 if (ret)
477 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
478
479 target_complete_cmd(cmd, SAM_STAT_GOOD);
480 return 0;
481}
482
483static sense_reason_t
484iblock_execute_write_same(struct se_cmd *cmd)
485{
486 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
487 struct iblock_req *ibr;
488 struct scatterlist *sg;
489 struct bio *bio;
490 struct bio_list list;
491 struct se_device *dev = cmd->se_dev;
492 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
493 sector_t sectors = target_to_linux_sector(dev,
494 sbc_get_write_same_sectors(cmd));
495
496 if (cmd->prot_op) {
497 pr_err("WRITE_SAME: Protection information with IBLOCK"
498 " backends not supported\n");
499 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
500 }
501 sg = &cmd->t_data_sg[0];
502
503 if (cmd->t_data_nents > 1 ||
504 sg->length != cmd->se_dev->dev_attrib.block_size) {
505 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
506 " block_size: %u\n", cmd->t_data_nents, sg->length,
507 cmd->se_dev->dev_attrib.block_size);
508 return TCM_INVALID_CDB_FIELD;
509 }
510
511 if (bdev_write_zeroes_sectors(bdev)) {
512 if (!iblock_execute_zero_out(bdev, cmd))
513 return 0;
514 }
515
516 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
517 if (!ibr)
518 goto fail;
519 cmd->priv = ibr;
520
521 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
522 if (!bio)
523 goto fail_free_ibr;
524
525 bio_list_init(&list);
526 bio_list_add(&list, bio);
527
528 refcount_set(&ibr->pending, 1);
529
530 while (sectors) {
531 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
532 != sg->length) {
533
534 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
535 if (!bio)
536 goto fail_put_bios;
537
538 refcount_inc(&ibr->pending);
539 bio_list_add(&list, bio);
540 }
541
542 /* Always in 512 byte units for Linux/Block */
543 block_lba += sg->length >> SECTOR_SHIFT;
544 sectors -= sg->length >> SECTOR_SHIFT;
545 }
546
547 iblock_submit_bios(&list);
548 return 0;
549
550fail_put_bios:
551 while ((bio = bio_list_pop(&list)))
552 bio_put(bio);
553fail_free_ibr:
554 kfree(ibr);
555fail:
556 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
557}
558
559enum {
560 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
561};
562
563static match_table_t tokens = {
564 {Opt_udev_path, "udev_path=%s"},
565 {Opt_readonly, "readonly=%d"},
566 {Opt_force, "force=%d"},
567 {Opt_err, NULL}
568};
569
570static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
571 const char *page, ssize_t count)
572{
573 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
574 char *orig, *ptr, *arg_p, *opts;
575 substring_t args[MAX_OPT_ARGS];
576 int ret = 0, token;
577 unsigned long tmp_readonly;
578
579 opts = kstrdup(page, GFP_KERNEL);
580 if (!opts)
581 return -ENOMEM;
582
583 orig = opts;
584
585 while ((ptr = strsep(&opts, ",\n")) != NULL) {
586 if (!*ptr)
587 continue;
588
589 token = match_token(ptr, tokens, args);
590 switch (token) {
591 case Opt_udev_path:
592 if (ib_dev->ibd_bd) {
593 pr_err("Unable to set udev_path= while"
594 " ib_dev->ibd_bd exists\n");
595 ret = -EEXIST;
596 goto out;
597 }
598 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
599 SE_UDEV_PATH_LEN) == 0) {
600 ret = -EINVAL;
601 break;
602 }
603 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
604 ib_dev->ibd_udev_path);
605 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
606 break;
607 case Opt_readonly:
608 arg_p = match_strdup(&args[0]);
609 if (!arg_p) {
610 ret = -ENOMEM;
611 break;
612 }
613 ret = kstrtoul(arg_p, 0, &tmp_readonly);
614 kfree(arg_p);
615 if (ret < 0) {
616 pr_err("kstrtoul() failed for"
617 " readonly=\n");
618 goto out;
619 }
620 ib_dev->ibd_readonly = tmp_readonly;
621 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
622 break;
623 case Opt_force:
624 break;
625 default:
626 break;
627 }
628 }
629
630out:
631 kfree(orig);
632 return (!ret) ? count : ret;
633}
634
635static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
636{
637 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
638 struct block_device *bd = ib_dev->ibd_bd;
639 ssize_t bl = 0;
640
641 if (bd)
642 bl += sprintf(b + bl, "iBlock device: %pg", bd);
643 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
644 bl += sprintf(b + bl, " UDEV PATH: %s",
645 ib_dev->ibd_udev_path);
646 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
647
648 bl += sprintf(b + bl, " ");
649 if (bd) {
650 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
651 MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
652 "CLAIMED: IBLOCK");
653 } else {
654 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
655 }
656
657 return bl;
658}
659
660static int
661iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
662 struct sg_mapping_iter *miter)
663{
664 struct se_device *dev = cmd->se_dev;
665 struct blk_integrity *bi;
666 struct bio_integrity_payload *bip;
667 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
668 int rc;
669 size_t resid, len;
670
671 bi = bdev_get_integrity(ib_dev->ibd_bd);
672 if (!bi) {
673 pr_err("Unable to locate bio_integrity\n");
674 return -ENODEV;
675 }
676
677 bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
678 if (IS_ERR(bip)) {
679 pr_err("Unable to allocate bio_integrity_payload\n");
680 return PTR_ERR(bip);
681 }
682
683 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
684 /* virtual start sector must be in integrity interval units */
685 bip_set_seed(bip, bio->bi_iter.bi_sector >>
686 (bi->interval_exp - SECTOR_SHIFT));
687
688 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
689 (unsigned long long)bip->bip_iter.bi_sector);
690
691 resid = bip->bip_iter.bi_size;
692 while (resid > 0 && sg_miter_next(miter)) {
693
694 len = min_t(size_t, miter->length, resid);
695 rc = bio_integrity_add_page(bio, miter->page, len,
696 offset_in_page(miter->addr));
697 if (rc != len) {
698 pr_err("bio_integrity_add_page() failed; %d\n", rc);
699 sg_miter_stop(miter);
700 return -ENOMEM;
701 }
702
703 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
704 miter->page, len, offset_in_page(miter->addr));
705
706 resid -= len;
707 if (len < miter->length)
708 miter->consumed -= miter->length - len;
709 }
710 sg_miter_stop(miter);
711
712 return 0;
713}
714
715static sense_reason_t
716iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
717 enum dma_data_direction data_direction)
718{
719 struct se_device *dev = cmd->se_dev;
720 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
721 struct iblock_req *ibr;
722 struct bio *bio;
723 struct bio_list list;
724 struct scatterlist *sg;
725 u32 sg_num = sgl_nents;
726 unsigned int opf;
727 unsigned bio_cnt;
728 int i, rc;
729 struct sg_mapping_iter prot_miter;
730 unsigned int miter_dir;
731
732 if (data_direction == DMA_TO_DEVICE) {
733 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
734 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
735 /*
736 * Force writethrough using REQ_FUA if a volatile write cache
737 * is not enabled, or if initiator set the Force Unit Access bit.
738 */
739 opf = REQ_OP_WRITE;
740 miter_dir = SG_MITER_TO_SG;
741 if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
742 if (cmd->se_cmd_flags & SCF_FUA)
743 opf |= REQ_FUA;
744 else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
745 opf |= REQ_FUA;
746 }
747 } else {
748 opf = REQ_OP_READ;
749 miter_dir = SG_MITER_FROM_SG;
750 }
751
752 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
753 if (!ibr)
754 goto fail;
755 cmd->priv = ibr;
756
757 if (!sgl_nents) {
758 refcount_set(&ibr->pending, 1);
759 iblock_complete_cmd(cmd);
760 return 0;
761 }
762
763 bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
764 if (!bio)
765 goto fail_free_ibr;
766
767 bio_list_init(&list);
768 bio_list_add(&list, bio);
769
770 refcount_set(&ibr->pending, 2);
771 bio_cnt = 1;
772
773 if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
774 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
775 miter_dir);
776
777 for_each_sg(sgl, sg, sgl_nents, i) {
778 /*
779 * XXX: if the length the device accepts is shorter than the
780 * length of the S/G list entry this will cause and
781 * endless loop. Better hope no driver uses huge pages.
782 */
783 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
784 != sg->length) {
785 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
786 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
787 if (rc)
788 goto fail_put_bios;
789 }
790
791 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
792 iblock_submit_bios(&list);
793 bio_cnt = 0;
794 }
795
796 bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
797 if (!bio)
798 goto fail_put_bios;
799
800 refcount_inc(&ibr->pending);
801 bio_list_add(&list, bio);
802 bio_cnt++;
803 }
804
805 /* Always in 512 byte units for Linux/Block */
806 block_lba += sg->length >> SECTOR_SHIFT;
807 sg_num--;
808 }
809
810 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
811 rc = iblock_alloc_bip(cmd, bio, &prot_miter);
812 if (rc)
813 goto fail_put_bios;
814 }
815
816 iblock_submit_bios(&list);
817 iblock_complete_cmd(cmd);
818 return 0;
819
820fail_put_bios:
821 while ((bio = bio_list_pop(&list)))
822 bio_put(bio);
823fail_free_ibr:
824 kfree(ibr);
825fail:
826 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
827}
828
829static sector_t iblock_get_blocks(struct se_device *dev)
830{
831 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
832 struct block_device *bd = ib_dev->ibd_bd;
833 struct request_queue *q = bdev_get_queue(bd);
834
835 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
836}
837
838static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
839{
840 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
841 struct block_device *bd = ib_dev->ibd_bd;
842 int ret;
843
844 ret = bdev_alignment_offset(bd);
845 if (ret == -1)
846 return 0;
847
848 /* convert offset-bytes to offset-lbas */
849 return ret / bdev_logical_block_size(bd);
850}
851
852static unsigned int iblock_get_lbppbe(struct se_device *dev)
853{
854 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
855 struct block_device *bd = ib_dev->ibd_bd;
856 unsigned int logs_per_phys =
857 bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
858
859 return ilog2(logs_per_phys);
860}
861
862static unsigned int iblock_get_io_min(struct se_device *dev)
863{
864 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
865 struct block_device *bd = ib_dev->ibd_bd;
866
867 return bdev_io_min(bd);
868}
869
870static unsigned int iblock_get_io_opt(struct se_device *dev)
871{
872 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
873 struct block_device *bd = ib_dev->ibd_bd;
874
875 return bdev_io_opt(bd);
876}
877
878static struct sbc_ops iblock_sbc_ops = {
879 .execute_rw = iblock_execute_rw,
880 .execute_sync_cache = iblock_execute_sync_cache,
881 .execute_write_same = iblock_execute_write_same,
882 .execute_unmap = iblock_execute_unmap,
883};
884
885static sense_reason_t
886iblock_parse_cdb(struct se_cmd *cmd)
887{
888 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
889}
890
891static bool iblock_get_write_cache(struct se_device *dev)
892{
893 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
894 struct block_device *bd = ib_dev->ibd_bd;
895 struct request_queue *q = bdev_get_queue(bd);
896
897 return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
898}
899
900static const struct target_backend_ops iblock_ops = {
901 .name = "iblock",
902 .inquiry_prod = "IBLOCK",
903 .inquiry_rev = IBLOCK_VERSION,
904 .owner = THIS_MODULE,
905 .attach_hba = iblock_attach_hba,
906 .detach_hba = iblock_detach_hba,
907 .alloc_device = iblock_alloc_device,
908 .configure_device = iblock_configure_device,
909 .destroy_device = iblock_destroy_device,
910 .free_device = iblock_free_device,
911 .plug_device = iblock_plug_device,
912 .unplug_device = iblock_unplug_device,
913 .parse_cdb = iblock_parse_cdb,
914 .set_configfs_dev_params = iblock_set_configfs_dev_params,
915 .show_configfs_dev_params = iblock_show_configfs_dev_params,
916 .get_device_type = sbc_get_device_type,
917 .get_blocks = iblock_get_blocks,
918 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
919 .get_lbppbe = iblock_get_lbppbe,
920 .get_io_min = iblock_get_io_min,
921 .get_io_opt = iblock_get_io_opt,
922 .get_write_cache = iblock_get_write_cache,
923 .tb_dev_attrib_attrs = sbc_attrib_attrs,
924};
925
926static int __init iblock_module_init(void)
927{
928 return transport_backend_register(&iblock_ops);
929}
930
931static void __exit iblock_module_exit(void)
932{
933 target_backend_unregister(&iblock_ops);
934}
935
936MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
937MODULE_AUTHOR("nab@Linux-iSCSI.org");
938MODULE_LICENSE("GPL");
939
940module_init(iblock_module_init);
941module_exit(iblock_module_exit);