Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4 * Copyright (C) 2014 Red Hat, Inc.
5 * Copyright (C) 2015 Arrikto, Inc.
6 * Copyright (C) 2017 Chinamobile, Inc.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/module.h>
11#include <linux/idr.h>
12#include <linux/kernel.h>
13#include <linux/timer.h>
14#include <linux/parser.h>
15#include <linux/vmalloc.h>
16#include <linux/uio_driver.h>
17#include <linux/radix-tree.h>
18#include <linux/stringify.h>
19#include <linux/bitops.h>
20#include <linux/highmem.h>
21#include <linux/configfs.h>
22#include <linux/mutex.h>
23#include <linux/workqueue.h>
24#include <net/genetlink.h>
25#include <scsi/scsi_common.h>
26#include <scsi/scsi_proto.h>
27#include <target/target_core_base.h>
28#include <target/target_core_fabric.h>
29#include <target/target_core_backend.h>
30
31#include <linux/target_core_user.h>
32
33/**
34 * DOC: Userspace I/O
35 * Userspace I/O
36 * -------------
37 *
38 * Define a shared-memory interface for LIO to pass SCSI commands and
39 * data to userspace for processing. This is to allow backends that
40 * are too complex for in-kernel support to be possible.
41 *
42 * It uses the UIO framework to do a lot of the device-creation and
43 * introspection work for us.
44 *
45 * See the .h file for how the ring is laid out. Note that while the
46 * command ring is defined, the particulars of the data area are
47 * not. Offset values in the command entry point to other locations
48 * internal to the mmap-ed area. There is separate space outside the
49 * command ring for data buffers. This leaves maximum flexibility for
50 * moving buffer allocations, or even page flipping or other
51 * allocation techniques, without altering the command ring layout.
52 *
53 * SECURITY:
54 * The user process must be assumed to be malicious. There's no way to
55 * prevent it breaking the command ring protocol if it wants, but in
56 * order to prevent other issues we must only ever read *data* from
57 * the shared memory area, not offsets or sizes. This applies to
58 * command ring entries as well as the mailbox. Extra code needed for
59 * this may have a 'UAM' comment.
60 */
61
62#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
63
64/* For cmd area, the size is fixed 8MB */
65#define CMDR_SIZE (8 * 1024 * 1024)
66
67/*
68 * For data area, the block size is PAGE_SIZE and
69 * the total size is 256K * PAGE_SIZE.
70 */
71#define DATA_BLOCK_SIZE PAGE_SIZE
72#define DATA_BLOCK_SHIFT PAGE_SHIFT
73#define DATA_BLOCK_BITS_DEF (256 * 1024)
74
75#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
76#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
77
78/*
79 * Default number of global data blocks(512K * PAGE_SIZE)
80 * when the unmap thread will be started.
81 */
82#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
83
84static u8 tcmu_kern_cmd_reply_supported;
85static u8 tcmu_netlink_blocked;
86
87static struct device *tcmu_root_device;
88
89struct tcmu_hba {
90 u32 host_id;
91};
92
93#define TCMU_CONFIG_LEN 256
94
95static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
96static LIST_HEAD(tcmu_nl_cmd_list);
97
98struct tcmu_dev;
99
100struct tcmu_nl_cmd {
101 /* wake up thread waiting for reply */
102 struct completion complete;
103 struct list_head nl_list;
104 struct tcmu_dev *udev;
105 int cmd;
106 int status;
107};
108
109struct tcmu_dev {
110 struct list_head node;
111 struct kref kref;
112
113 struct se_device se_dev;
114
115 char *name;
116 struct se_hba *hba;
117
118#define TCMU_DEV_BIT_OPEN 0
119#define TCMU_DEV_BIT_BROKEN 1
120#define TCMU_DEV_BIT_BLOCKED 2
121 unsigned long flags;
122
123 struct uio_info uio_info;
124
125 struct inode *inode;
126
127 struct tcmu_mailbox *mb_addr;
128 uint64_t dev_size;
129 u32 cmdr_size;
130 u32 cmdr_last_cleaned;
131 /* Offset of data area from start of mb */
132 /* Must add data_off and mb_addr to get the address */
133 size_t data_off;
134 size_t data_size;
135 uint32_t max_blocks;
136 size_t ring_size;
137
138 struct mutex cmdr_lock;
139 struct list_head qfull_queue;
140
141 uint32_t dbi_max;
142 uint32_t dbi_thresh;
143 unsigned long *data_bitmap;
144 struct radix_tree_root data_blocks;
145
146 struct idr commands;
147
148 struct timer_list cmd_timer;
149 unsigned int cmd_time_out;
150 struct list_head inflight_queue;
151
152 struct timer_list qfull_timer;
153 int qfull_time_out;
154
155 struct list_head timedout_entry;
156
157 struct tcmu_nl_cmd curr_nl_cmd;
158
159 char dev_config[TCMU_CONFIG_LEN];
160
161 int nl_reply_supported;
162};
163
164#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
165
166#define CMDR_OFF sizeof(struct tcmu_mailbox)
167
168struct tcmu_cmd {
169 struct se_cmd *se_cmd;
170 struct tcmu_dev *tcmu_dev;
171 struct list_head queue_entry;
172
173 uint16_t cmd_id;
174
175 /* Can't use se_cmd when cleaning up expired cmds, because if
176 cmd has been completed then accessing se_cmd is off limits */
177 uint32_t dbi_cnt;
178 uint32_t dbi_cur;
179 uint32_t *dbi;
180
181 unsigned long deadline;
182
183#define TCMU_CMD_BIT_EXPIRED 0
184#define TCMU_CMD_BIT_INFLIGHT 1
185 unsigned long flags;
186};
187/*
188 * To avoid dead lock the mutex lock order should always be:
189 *
190 * mutex_lock(&root_udev_mutex);
191 * ...
192 * mutex_lock(&tcmu_dev->cmdr_lock);
193 * mutex_unlock(&tcmu_dev->cmdr_lock);
194 * ...
195 * mutex_unlock(&root_udev_mutex);
196 */
197static DEFINE_MUTEX(root_udev_mutex);
198static LIST_HEAD(root_udev);
199
200static DEFINE_SPINLOCK(timed_out_udevs_lock);
201static LIST_HEAD(timed_out_udevs);
202
203static struct kmem_cache *tcmu_cmd_cache;
204
205static atomic_t global_db_count = ATOMIC_INIT(0);
206static struct delayed_work tcmu_unmap_work;
207static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
208
209static int tcmu_set_global_max_data_area(const char *str,
210 const struct kernel_param *kp)
211{
212 int ret, max_area_mb;
213
214 ret = kstrtoint(str, 10, &max_area_mb);
215 if (ret)
216 return -EINVAL;
217
218 if (max_area_mb <= 0) {
219 pr_err("global_max_data_area must be larger than 0.\n");
220 return -EINVAL;
221 }
222
223 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
224 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
225 schedule_delayed_work(&tcmu_unmap_work, 0);
226 else
227 cancel_delayed_work_sync(&tcmu_unmap_work);
228
229 return 0;
230}
231
232static int tcmu_get_global_max_data_area(char *buffer,
233 const struct kernel_param *kp)
234{
235 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
236}
237
238static const struct kernel_param_ops tcmu_global_max_data_area_op = {
239 .set = tcmu_set_global_max_data_area,
240 .get = tcmu_get_global_max_data_area,
241};
242
243module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
244 S_IWUSR | S_IRUGO);
245MODULE_PARM_DESC(global_max_data_area_mb,
246 "Max MBs allowed to be allocated to all the tcmu device's "
247 "data areas.");
248
249static int tcmu_get_block_netlink(char *buffer,
250 const struct kernel_param *kp)
251{
252 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
253 "blocked" : "unblocked");
254}
255
256static int tcmu_set_block_netlink(const char *str,
257 const struct kernel_param *kp)
258{
259 int ret;
260 u8 val;
261
262 ret = kstrtou8(str, 0, &val);
263 if (ret < 0)
264 return ret;
265
266 if (val > 1) {
267 pr_err("Invalid block netlink value %u\n", val);
268 return -EINVAL;
269 }
270
271 tcmu_netlink_blocked = val;
272 return 0;
273}
274
275static const struct kernel_param_ops tcmu_block_netlink_op = {
276 .set = tcmu_set_block_netlink,
277 .get = tcmu_get_block_netlink,
278};
279
280module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
281MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
282
283static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
284{
285 struct tcmu_dev *udev = nl_cmd->udev;
286
287 if (!tcmu_netlink_blocked) {
288 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
289 return -EBUSY;
290 }
291
292 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
293 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
294 nl_cmd->status = -EINTR;
295 list_del(&nl_cmd->nl_list);
296 complete(&nl_cmd->complete);
297 }
298 return 0;
299}
300
301static int tcmu_set_reset_netlink(const char *str,
302 const struct kernel_param *kp)
303{
304 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
305 int ret;
306 u8 val;
307
308 ret = kstrtou8(str, 0, &val);
309 if (ret < 0)
310 return ret;
311
312 if (val != 1) {
313 pr_err("Invalid reset netlink value %u\n", val);
314 return -EINVAL;
315 }
316
317 mutex_lock(&tcmu_nl_cmd_mutex);
318 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
319 ret = tcmu_fail_netlink_cmd(nl_cmd);
320 if (ret)
321 break;
322 }
323 mutex_unlock(&tcmu_nl_cmd_mutex);
324
325 return ret;
326}
327
328static const struct kernel_param_ops tcmu_reset_netlink_op = {
329 .set = tcmu_set_reset_netlink,
330};
331
332module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
333MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
334
335/* multicast group */
336enum tcmu_multicast_groups {
337 TCMU_MCGRP_CONFIG,
338};
339
340static const struct genl_multicast_group tcmu_mcgrps[] = {
341 [TCMU_MCGRP_CONFIG] = { .name = "config", },
342};
343
344static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
345 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
346 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
347 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
348 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
349 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
350};
351
352static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
353{
354 struct tcmu_dev *udev = NULL;
355 struct tcmu_nl_cmd *nl_cmd;
356 int dev_id, rc, ret = 0;
357
358 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
359 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
360 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
361 return -EINVAL;
362 }
363
364 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
365 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
366
367 mutex_lock(&tcmu_nl_cmd_mutex);
368 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
369 if (nl_cmd->udev->se_dev.dev_index == dev_id) {
370 udev = nl_cmd->udev;
371 break;
372 }
373 }
374
375 if (!udev) {
376 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
377 completed_cmd, rc, dev_id);
378 ret = -ENODEV;
379 goto unlock;
380 }
381 list_del(&nl_cmd->nl_list);
382
383 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
384 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
385 nl_cmd->status);
386
387 if (nl_cmd->cmd != completed_cmd) {
388 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
389 udev->name, completed_cmd, nl_cmd->cmd);
390 ret = -EINVAL;
391 goto unlock;
392 }
393
394 nl_cmd->status = rc;
395 complete(&nl_cmd->complete);
396unlock:
397 mutex_unlock(&tcmu_nl_cmd_mutex);
398 return ret;
399}
400
401static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
402{
403 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
404}
405
406static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
407{
408 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
409}
410
411static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
412 struct genl_info *info)
413{
414 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
415}
416
417static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
418{
419 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
420 tcmu_kern_cmd_reply_supported =
421 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
422 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
423 tcmu_kern_cmd_reply_supported);
424 }
425
426 return 0;
427}
428
429static const struct genl_ops tcmu_genl_ops[] = {
430 {
431 .cmd = TCMU_CMD_SET_FEATURES,
432 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
433 .flags = GENL_ADMIN_PERM,
434 .doit = tcmu_genl_set_features,
435 },
436 {
437 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
438 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
439 .flags = GENL_ADMIN_PERM,
440 .doit = tcmu_genl_add_dev_done,
441 },
442 {
443 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
444 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
445 .flags = GENL_ADMIN_PERM,
446 .doit = tcmu_genl_rm_dev_done,
447 },
448 {
449 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
450 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
451 .flags = GENL_ADMIN_PERM,
452 .doit = tcmu_genl_reconfig_dev_done,
453 },
454};
455
456/* Our generic netlink family */
457static struct genl_family tcmu_genl_family __ro_after_init = {
458 .module = THIS_MODULE,
459 .hdrsize = 0,
460 .name = "TCM-USER",
461 .version = 2,
462 .maxattr = TCMU_ATTR_MAX,
463 .policy = tcmu_attr_policy,
464 .mcgrps = tcmu_mcgrps,
465 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
466 .netnsok = true,
467 .ops = tcmu_genl_ops,
468 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
469};
470
471#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
472#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
473#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
474#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
475
476static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
477{
478 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
479 uint32_t i;
480
481 for (i = 0; i < len; i++)
482 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
483}
484
485static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
486 struct tcmu_cmd *tcmu_cmd)
487{
488 struct page *page;
489 int ret, dbi;
490
491 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
492 if (dbi == udev->dbi_thresh)
493 return false;
494
495 page = radix_tree_lookup(&udev->data_blocks, dbi);
496 if (!page) {
497 if (atomic_add_return(1, &global_db_count) >
498 tcmu_global_max_blocks)
499 schedule_delayed_work(&tcmu_unmap_work, 0);
500
501 /* try to get new page from the mm */
502 page = alloc_page(GFP_NOIO);
503 if (!page)
504 goto err_alloc;
505
506 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
507 if (ret)
508 goto err_insert;
509 }
510
511 if (dbi > udev->dbi_max)
512 udev->dbi_max = dbi;
513
514 set_bit(dbi, udev->data_bitmap);
515 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
516
517 return true;
518err_insert:
519 __free_page(page);
520err_alloc:
521 atomic_dec(&global_db_count);
522 return false;
523}
524
525static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
526 struct tcmu_cmd *tcmu_cmd)
527{
528 int i;
529
530 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
531 if (!tcmu_get_empty_block(udev, tcmu_cmd))
532 return false;
533 }
534 return true;
535}
536
537static inline struct page *
538tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
539{
540 return radix_tree_lookup(&udev->data_blocks, dbi);
541}
542
543static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
544{
545 kfree(tcmu_cmd->dbi);
546 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
547}
548
549static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
550{
551 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
552 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
553
554 if (se_cmd->se_cmd_flags & SCF_BIDI) {
555 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
556 data_length += round_up(se_cmd->t_bidi_data_sg->length,
557 DATA_BLOCK_SIZE);
558 }
559
560 return data_length;
561}
562
563static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
564{
565 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
566
567 return data_length / DATA_BLOCK_SIZE;
568}
569
570static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
571{
572 struct se_device *se_dev = se_cmd->se_dev;
573 struct tcmu_dev *udev = TCMU_DEV(se_dev);
574 struct tcmu_cmd *tcmu_cmd;
575
576 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
577 if (!tcmu_cmd)
578 return NULL;
579
580 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
581 tcmu_cmd->se_cmd = se_cmd;
582 tcmu_cmd->tcmu_dev = udev;
583
584 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
585 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
586 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
587 GFP_NOIO);
588 if (!tcmu_cmd->dbi) {
589 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
590 return NULL;
591 }
592
593 return tcmu_cmd;
594}
595
596static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
597{
598 unsigned long offset = offset_in_page(vaddr);
599 void *start = vaddr - offset;
600
601 size = round_up(size+offset, PAGE_SIZE);
602
603 while (size) {
604 flush_dcache_page(virt_to_page(start));
605 start += PAGE_SIZE;
606 size -= PAGE_SIZE;
607 }
608}
609
610/*
611 * Some ring helper functions. We don't assume size is a power of 2 so
612 * we can't use circ_buf.h.
613 */
614static inline size_t spc_used(size_t head, size_t tail, size_t size)
615{
616 int diff = head - tail;
617
618 if (diff >= 0)
619 return diff;
620 else
621 return size + diff;
622}
623
624static inline size_t spc_free(size_t head, size_t tail, size_t size)
625{
626 /* Keep 1 byte unused or we can't tell full from empty */
627 return (size - spc_used(head, tail, size) - 1);
628}
629
630static inline size_t head_to_end(size_t head, size_t size)
631{
632 return size - head;
633}
634
635static inline void new_iov(struct iovec **iov, int *iov_cnt)
636{
637 struct iovec *iovec;
638
639 if (*iov_cnt != 0)
640 (*iov)++;
641 (*iov_cnt)++;
642
643 iovec = *iov;
644 memset(iovec, 0, sizeof(struct iovec));
645}
646
647#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
648
649/* offset is relative to mb_addr */
650static inline size_t get_block_offset_user(struct tcmu_dev *dev,
651 int dbi, int remaining)
652{
653 return dev->data_off + dbi * DATA_BLOCK_SIZE +
654 DATA_BLOCK_SIZE - remaining;
655}
656
657static inline size_t iov_tail(struct iovec *iov)
658{
659 return (size_t)iov->iov_base + iov->iov_len;
660}
661
662static void scatter_data_area(struct tcmu_dev *udev,
663 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
664 unsigned int data_nents, struct iovec **iov,
665 int *iov_cnt, bool copy_data)
666{
667 int i, dbi;
668 int block_remaining = 0;
669 void *from, *to = NULL;
670 size_t copy_bytes, to_offset, offset;
671 struct scatterlist *sg;
672 struct page *page;
673
674 for_each_sg(data_sg, sg, data_nents, i) {
675 int sg_remaining = sg->length;
676 from = kmap_atomic(sg_page(sg)) + sg->offset;
677 while (sg_remaining > 0) {
678 if (block_remaining == 0) {
679 if (to)
680 kunmap_atomic(to);
681
682 block_remaining = DATA_BLOCK_SIZE;
683 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
684 page = tcmu_get_block_page(udev, dbi);
685 to = kmap_atomic(page);
686 }
687
688 /*
689 * Covert to virtual offset of the ring data area.
690 */
691 to_offset = get_block_offset_user(udev, dbi,
692 block_remaining);
693
694 /*
695 * The following code will gather and map the blocks
696 * to the same iovec when the blocks are all next to
697 * each other.
698 */
699 copy_bytes = min_t(size_t, sg_remaining,
700 block_remaining);
701 if (*iov_cnt != 0 &&
702 to_offset == iov_tail(*iov)) {
703 /*
704 * Will append to the current iovec, because
705 * the current block page is next to the
706 * previous one.
707 */
708 (*iov)->iov_len += copy_bytes;
709 } else {
710 /*
711 * Will allocate a new iovec because we are
712 * first time here or the current block page
713 * is not next to the previous one.
714 */
715 new_iov(iov, iov_cnt);
716 (*iov)->iov_base = (void __user *)to_offset;
717 (*iov)->iov_len = copy_bytes;
718 }
719
720 if (copy_data) {
721 offset = DATA_BLOCK_SIZE - block_remaining;
722 memcpy(to + offset,
723 from + sg->length - sg_remaining,
724 copy_bytes);
725 tcmu_flush_dcache_range(to, copy_bytes);
726 }
727
728 sg_remaining -= copy_bytes;
729 block_remaining -= copy_bytes;
730 }
731 kunmap_atomic(from - sg->offset);
732 }
733
734 if (to)
735 kunmap_atomic(to);
736}
737
738static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
739 bool bidi, uint32_t read_len)
740{
741 struct se_cmd *se_cmd = cmd->se_cmd;
742 int i, dbi;
743 int block_remaining = 0;
744 void *from = NULL, *to;
745 size_t copy_bytes, offset;
746 struct scatterlist *sg, *data_sg;
747 struct page *page;
748 unsigned int data_nents;
749 uint32_t count = 0;
750
751 if (!bidi) {
752 data_sg = se_cmd->t_data_sg;
753 data_nents = se_cmd->t_data_nents;
754 } else {
755
756 /*
757 * For bidi case, the first count blocks are for Data-Out
758 * buffer blocks, and before gathering the Data-In buffer
759 * the Data-Out buffer blocks should be discarded.
760 */
761 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
762
763 data_sg = se_cmd->t_bidi_data_sg;
764 data_nents = se_cmd->t_bidi_data_nents;
765 }
766
767 tcmu_cmd_set_dbi_cur(cmd, count);
768
769 for_each_sg(data_sg, sg, data_nents, i) {
770 int sg_remaining = sg->length;
771 to = kmap_atomic(sg_page(sg)) + sg->offset;
772 while (sg_remaining > 0 && read_len > 0) {
773 if (block_remaining == 0) {
774 if (from)
775 kunmap_atomic(from);
776
777 block_remaining = DATA_BLOCK_SIZE;
778 dbi = tcmu_cmd_get_dbi(cmd);
779 page = tcmu_get_block_page(udev, dbi);
780 from = kmap_atomic(page);
781 }
782 copy_bytes = min_t(size_t, sg_remaining,
783 block_remaining);
784 if (read_len < copy_bytes)
785 copy_bytes = read_len;
786 offset = DATA_BLOCK_SIZE - block_remaining;
787 tcmu_flush_dcache_range(from, copy_bytes);
788 memcpy(to + sg->length - sg_remaining, from + offset,
789 copy_bytes);
790
791 sg_remaining -= copy_bytes;
792 block_remaining -= copy_bytes;
793 read_len -= copy_bytes;
794 }
795 kunmap_atomic(to - sg->offset);
796 if (read_len == 0)
797 break;
798 }
799 if (from)
800 kunmap_atomic(from);
801}
802
803static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
804{
805 return thresh - bitmap_weight(bitmap, thresh);
806}
807
808/*
809 * We can't queue a command until we have space available on the cmd ring *and*
810 * space available on the data area.
811 *
812 * Called with ring lock held.
813 */
814static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
815 size_t cmd_size, size_t data_needed)
816{
817 struct tcmu_mailbox *mb = udev->mb_addr;
818 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
819 / DATA_BLOCK_SIZE;
820 size_t space, cmd_needed;
821 u32 cmd_head;
822
823 tcmu_flush_dcache_range(mb, sizeof(*mb));
824
825 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
826
827 /*
828 * If cmd end-of-ring space is too small then we need space for a NOP plus
829 * original cmd - cmds are internally contiguous.
830 */
831 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
832 cmd_needed = cmd_size;
833 else
834 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
835
836 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
837 if (space < cmd_needed) {
838 pr_debug("no cmd space: %u %u %u\n", cmd_head,
839 udev->cmdr_last_cleaned, udev->cmdr_size);
840 return false;
841 }
842
843 /* try to check and get the data blocks as needed */
844 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
845 if ((space * DATA_BLOCK_SIZE) < data_needed) {
846 unsigned long blocks_left =
847 (udev->max_blocks - udev->dbi_thresh) + space;
848
849 if (blocks_left < blocks_needed) {
850 pr_debug("no data space: only %lu available, but ask for %zu\n",
851 blocks_left * DATA_BLOCK_SIZE,
852 data_needed);
853 return false;
854 }
855
856 udev->dbi_thresh += blocks_needed;
857 if (udev->dbi_thresh > udev->max_blocks)
858 udev->dbi_thresh = udev->max_blocks;
859 }
860
861 return tcmu_get_empty_blocks(udev, cmd);
862}
863
864static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
865{
866 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
867 sizeof(struct tcmu_cmd_entry));
868}
869
870static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
871 size_t base_command_size)
872{
873 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
874 size_t command_size;
875
876 command_size = base_command_size +
877 round_up(scsi_command_size(se_cmd->t_task_cdb),
878 TCMU_OP_ALIGN_SIZE);
879
880 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
881
882 return command_size;
883}
884
885static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
886 struct timer_list *timer)
887{
888 if (!tmo)
889 return;
890
891 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
892 if (!timer_pending(timer))
893 mod_timer(timer, tcmu_cmd->deadline);
894
895 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
896 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
897}
898
899static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
900{
901 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
902 unsigned int tmo;
903
904 /*
905 * For backwards compat if qfull_time_out is not set use
906 * cmd_time_out and if that's not set use the default time out.
907 */
908 if (!udev->qfull_time_out)
909 return -ETIMEDOUT;
910 else if (udev->qfull_time_out > 0)
911 tmo = udev->qfull_time_out;
912 else if (udev->cmd_time_out)
913 tmo = udev->cmd_time_out;
914 else
915 tmo = TCMU_TIME_OUT;
916
917 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
918
919 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
920 pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
921 tcmu_cmd, udev->name);
922 return 0;
923}
924
925/**
926 * queue_cmd_ring - queue cmd to ring or internally
927 * @tcmu_cmd: cmd to queue
928 * @scsi_err: TCM error code if failure (-1) returned.
929 *
930 * Returns:
931 * -1 we cannot queue internally or to the ring.
932 * 0 success
933 * 1 internally queued to wait for ring memory to free.
934 */
935static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
936{
937 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
938 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
939 size_t base_command_size, command_size;
940 struct tcmu_mailbox *mb;
941 struct tcmu_cmd_entry *entry;
942 struct iovec *iov;
943 int iov_cnt, cmd_id;
944 uint32_t cmd_head;
945 uint64_t cdb_off;
946 bool copy_to_data_area;
947 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
948
949 *scsi_err = TCM_NO_SENSE;
950
951 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
952 *scsi_err = TCM_LUN_BUSY;
953 return -1;
954 }
955
956 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
957 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
958 return -1;
959 }
960
961 /*
962 * Must be a certain minimum size for response sense info, but
963 * also may be larger if the iov array is large.
964 *
965 * We prepare as many iovs as possbile for potential uses here,
966 * because it's expensive to tell how many regions are freed in
967 * the bitmap & global data pool, as the size calculated here
968 * will only be used to do the checks.
969 *
970 * The size will be recalculated later as actually needed to save
971 * cmd area memories.
972 */
973 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
974 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
975
976 if (!list_empty(&udev->qfull_queue))
977 goto queue;
978
979 mb = udev->mb_addr;
980 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
981 if ((command_size > (udev->cmdr_size / 2)) ||
982 data_length > udev->data_size) {
983 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
984 "cmd ring/data area\n", command_size, data_length,
985 udev->cmdr_size, udev->data_size);
986 *scsi_err = TCM_INVALID_CDB_FIELD;
987 return -1;
988 }
989
990 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
991 /*
992 * Don't leave commands partially setup because the unmap
993 * thread might need the blocks to make forward progress.
994 */
995 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
996 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
997 goto queue;
998 }
999
1000 /* Insert a PAD if end-of-ring space is too small */
1001 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
1002 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
1003
1004 entry = (void *) mb + CMDR_OFF + cmd_head;
1005 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
1006 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
1007 entry->hdr.cmd_id = 0; /* not used for PAD */
1008 entry->hdr.kflags = 0;
1009 entry->hdr.uflags = 0;
1010 tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
1011
1012 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
1013 tcmu_flush_dcache_range(mb, sizeof(*mb));
1014
1015 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
1016 WARN_ON(cmd_head != 0);
1017 }
1018
1019 entry = (void *) mb + CMDR_OFF + cmd_head;
1020 memset(entry, 0, command_size);
1021 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1022
1023 /* Handle allocating space from the data area */
1024 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1025 iov = &entry->req.iov[0];
1026 iov_cnt = 0;
1027 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
1028 || se_cmd->se_cmd_flags & SCF_BIDI);
1029 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
1030 se_cmd->t_data_nents, &iov, &iov_cnt,
1031 copy_to_data_area);
1032 entry->req.iov_cnt = iov_cnt;
1033
1034 /* Handle BIDI commands */
1035 iov_cnt = 0;
1036 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1037 iov++;
1038 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
1039 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
1040 false);
1041 }
1042 entry->req.iov_bidi_cnt = iov_cnt;
1043
1044 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
1045 if (cmd_id < 0) {
1046 pr_err("tcmu: Could not allocate cmd id.\n");
1047
1048 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1049 *scsi_err = TCM_OUT_OF_RESOURCES;
1050 return -1;
1051 }
1052 tcmu_cmd->cmd_id = cmd_id;
1053
1054 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1055 tcmu_cmd, udev->name);
1056
1057 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1058
1059 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1060
1061 /*
1062 * Recalaulate the command's base size and size according
1063 * to the actual needs
1064 */
1065 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
1066 entry->req.iov_bidi_cnt);
1067 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1068
1069 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1070
1071 /* All offsets relative to mb_addr, not start of entry! */
1072 cdb_off = CMDR_OFF + cmd_head + base_command_size;
1073 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1074 entry->req.cdb_off = cdb_off;
1075 tcmu_flush_dcache_range(entry, command_size);
1076
1077 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1078 tcmu_flush_dcache_range(mb, sizeof(*mb));
1079
1080 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1081 set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1082
1083 /* TODO: only if FLUSH and FUA? */
1084 uio_event_notify(&udev->uio_info);
1085
1086 return 0;
1087
1088queue:
1089 if (add_to_qfull_queue(tcmu_cmd)) {
1090 *scsi_err = TCM_OUT_OF_RESOURCES;
1091 return -1;
1092 }
1093
1094 return 1;
1095}
1096
1097static sense_reason_t
1098tcmu_queue_cmd(struct se_cmd *se_cmd)
1099{
1100 struct se_device *se_dev = se_cmd->se_dev;
1101 struct tcmu_dev *udev = TCMU_DEV(se_dev);
1102 struct tcmu_cmd *tcmu_cmd;
1103 sense_reason_t scsi_ret;
1104 int ret;
1105
1106 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1107 if (!tcmu_cmd)
1108 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1109
1110 mutex_lock(&udev->cmdr_lock);
1111 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1112 mutex_unlock(&udev->cmdr_lock);
1113 if (ret < 0)
1114 tcmu_free_cmd(tcmu_cmd);
1115 return scsi_ret;
1116}
1117
1118static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
1119{
1120 struct se_cmd *se_cmd = cmd->se_cmd;
1121 struct tcmu_dev *udev = cmd->tcmu_dev;
1122 bool read_len_valid = false;
1123 uint32_t read_len;
1124
1125 /*
1126 * cmd has been completed already from timeout, just reclaim
1127 * data area space and free cmd
1128 */
1129 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1130 WARN_ON_ONCE(se_cmd);
1131 goto out;
1132 }
1133
1134 list_del_init(&cmd->queue_entry);
1135
1136 tcmu_cmd_reset_dbi_cur(cmd);
1137
1138 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1139 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1140 cmd->se_cmd);
1141 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1142 goto done;
1143 }
1144
1145 read_len = se_cmd->data_length;
1146 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1147 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1148 read_len_valid = true;
1149 if (entry->rsp.read_len < read_len)
1150 read_len = entry->rsp.read_len;
1151 }
1152
1153 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1154 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1155 if (!read_len_valid )
1156 goto done;
1157 else
1158 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1159 }
1160 if (se_cmd->se_cmd_flags & SCF_BIDI) {
1161 /* Get Data-In buffer before clean up */
1162 gather_data_area(udev, cmd, true, read_len);
1163 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1164 gather_data_area(udev, cmd, false, read_len);
1165 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1166 /* TODO: */
1167 } else if (se_cmd->data_direction != DMA_NONE) {
1168 pr_warn("TCMU: data direction was %d!\n",
1169 se_cmd->data_direction);
1170 }
1171
1172done:
1173 if (read_len_valid) {
1174 pr_debug("read_len = %d\n", read_len);
1175 target_complete_cmd_with_length(cmd->se_cmd,
1176 entry->rsp.scsi_status, read_len);
1177 } else
1178 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1179
1180out:
1181 cmd->se_cmd = NULL;
1182 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1183 tcmu_free_cmd(cmd);
1184}
1185
1186static void tcmu_set_next_deadline(struct list_head *queue,
1187 struct timer_list *timer)
1188{
1189 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1190 unsigned long deadline = 0;
1191
1192 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1193 if (!time_after(jiffies, tcmu_cmd->deadline)) {
1194 deadline = tcmu_cmd->deadline;
1195 break;
1196 }
1197 }
1198
1199 if (deadline)
1200 mod_timer(timer, deadline);
1201 else
1202 del_timer(timer);
1203}
1204
1205static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1206{
1207 struct tcmu_mailbox *mb;
1208 struct tcmu_cmd *cmd;
1209 int handled = 0;
1210
1211 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1212 pr_err("ring broken, not handling completions\n");
1213 return 0;
1214 }
1215
1216 mb = udev->mb_addr;
1217 tcmu_flush_dcache_range(mb, sizeof(*mb));
1218
1219 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1220
1221 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1222
1223 tcmu_flush_dcache_range(entry, sizeof(*entry));
1224
1225 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
1226 UPDATE_HEAD(udev->cmdr_last_cleaned,
1227 tcmu_hdr_get_len(entry->hdr.len_op),
1228 udev->cmdr_size);
1229 continue;
1230 }
1231 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1232
1233 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1234 if (!cmd) {
1235 pr_err("cmd_id %u not found, ring is broken\n",
1236 entry->hdr.cmd_id);
1237 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1238 break;
1239 }
1240
1241 tcmu_handle_completion(cmd, entry);
1242
1243 UPDATE_HEAD(udev->cmdr_last_cleaned,
1244 tcmu_hdr_get_len(entry->hdr.len_op),
1245 udev->cmdr_size);
1246
1247 handled++;
1248 }
1249
1250 if (mb->cmd_tail == mb->cmd_head) {
1251 /* no more pending commands */
1252 del_timer(&udev->cmd_timer);
1253
1254 if (list_empty(&udev->qfull_queue)) {
1255 /*
1256 * no more pending or waiting commands so try to
1257 * reclaim blocks if needed.
1258 */
1259 if (atomic_read(&global_db_count) >
1260 tcmu_global_max_blocks)
1261 schedule_delayed_work(&tcmu_unmap_work, 0);
1262 }
1263 } else if (udev->cmd_time_out) {
1264 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1265 }
1266
1267 return handled;
1268}
1269
1270static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1271{
1272 struct se_cmd *se_cmd;
1273
1274 if (!time_after(jiffies, cmd->deadline))
1275 return;
1276
1277 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1278 list_del_init(&cmd->queue_entry);
1279 se_cmd = cmd->se_cmd;
1280 cmd->se_cmd = NULL;
1281
1282 pr_debug("Timing out inflight cmd %u on dev %s.\n",
1283 cmd->cmd_id, cmd->tcmu_dev->name);
1284
1285 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1286}
1287
1288static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1289{
1290 struct se_cmd *se_cmd;
1291
1292 if (!time_after(jiffies, cmd->deadline))
1293 return;
1294
1295 pr_debug("Timing out queued cmd %p on dev %s.\n",
1296 cmd, cmd->tcmu_dev->name);
1297
1298 list_del_init(&cmd->queue_entry);
1299 se_cmd = cmd->se_cmd;
1300 tcmu_free_cmd(cmd);
1301
1302 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1303}
1304
1305static void tcmu_device_timedout(struct tcmu_dev *udev)
1306{
1307 spin_lock(&timed_out_udevs_lock);
1308 if (list_empty(&udev->timedout_entry))
1309 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1310 spin_unlock(&timed_out_udevs_lock);
1311
1312 schedule_delayed_work(&tcmu_unmap_work, 0);
1313}
1314
1315static void tcmu_cmd_timedout(struct timer_list *t)
1316{
1317 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1318
1319 pr_debug("%s cmd timeout has expired\n", udev->name);
1320 tcmu_device_timedout(udev);
1321}
1322
1323static void tcmu_qfull_timedout(struct timer_list *t)
1324{
1325 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1326
1327 pr_debug("%s qfull timeout has expired\n", udev->name);
1328 tcmu_device_timedout(udev);
1329}
1330
1331static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1332{
1333 struct tcmu_hba *tcmu_hba;
1334
1335 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1336 if (!tcmu_hba)
1337 return -ENOMEM;
1338
1339 tcmu_hba->host_id = host_id;
1340 hba->hba_ptr = tcmu_hba;
1341
1342 return 0;
1343}
1344
1345static void tcmu_detach_hba(struct se_hba *hba)
1346{
1347 kfree(hba->hba_ptr);
1348 hba->hba_ptr = NULL;
1349}
1350
1351static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1352{
1353 struct tcmu_dev *udev;
1354
1355 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1356 if (!udev)
1357 return NULL;
1358 kref_init(&udev->kref);
1359
1360 udev->name = kstrdup(name, GFP_KERNEL);
1361 if (!udev->name) {
1362 kfree(udev);
1363 return NULL;
1364 }
1365
1366 udev->hba = hba;
1367 udev->cmd_time_out = TCMU_TIME_OUT;
1368 udev->qfull_time_out = -1;
1369
1370 udev->max_blocks = DATA_BLOCK_BITS_DEF;
1371 mutex_init(&udev->cmdr_lock);
1372
1373 INIT_LIST_HEAD(&udev->node);
1374 INIT_LIST_HEAD(&udev->timedout_entry);
1375 INIT_LIST_HEAD(&udev->qfull_queue);
1376 INIT_LIST_HEAD(&udev->inflight_queue);
1377 idr_init(&udev->commands);
1378
1379 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1380 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1381
1382 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1383
1384 return &udev->se_dev;
1385}
1386
1387static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1388{
1389 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1390 LIST_HEAD(cmds);
1391 sense_reason_t scsi_ret;
1392 int ret;
1393
1394 if (list_empty(&udev->qfull_queue))
1395 return;
1396
1397 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1398
1399 list_splice_init(&udev->qfull_queue, &cmds);
1400
1401 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1402 list_del_init(&tcmu_cmd->queue_entry);
1403
1404 pr_debug("removing cmd %p on dev %s from queue\n",
1405 tcmu_cmd, udev->name);
1406
1407 if (fail) {
1408 /*
1409 * We were not able to even start the command, so
1410 * fail with busy to allow a retry in case runner
1411 * was only temporarily down. If the device is being
1412 * removed then LIO core will do the right thing and
1413 * fail the retry.
1414 */
1415 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1416 tcmu_free_cmd(tcmu_cmd);
1417 continue;
1418 }
1419
1420 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1421 if (ret < 0) {
1422 pr_debug("cmd %p on dev %s failed with %u\n",
1423 tcmu_cmd, udev->name, scsi_ret);
1424 /*
1425 * Ignore scsi_ret for now. target_complete_cmd
1426 * drops it.
1427 */
1428 target_complete_cmd(tcmu_cmd->se_cmd,
1429 SAM_STAT_CHECK_CONDITION);
1430 tcmu_free_cmd(tcmu_cmd);
1431 } else if (ret > 0) {
1432 pr_debug("ran out of space during cmdr queue run\n");
1433 /*
1434 * cmd was requeued, so just put all cmds back in
1435 * the queue
1436 */
1437 list_splice_tail(&cmds, &udev->qfull_queue);
1438 break;
1439 }
1440 }
1441
1442 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1443}
1444
1445static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1446{
1447 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1448
1449 mutex_lock(&udev->cmdr_lock);
1450 tcmu_handle_completions(udev);
1451 run_qfull_queue(udev, false);
1452 mutex_unlock(&udev->cmdr_lock);
1453
1454 return 0;
1455}
1456
1457/*
1458 * mmap code from uio.c. Copied here because we want to hook mmap()
1459 * and this stuff must come along.
1460 */
1461static int tcmu_find_mem_index(struct vm_area_struct *vma)
1462{
1463 struct tcmu_dev *udev = vma->vm_private_data;
1464 struct uio_info *info = &udev->uio_info;
1465
1466 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1467 if (info->mem[vma->vm_pgoff].size == 0)
1468 return -1;
1469 return (int)vma->vm_pgoff;
1470 }
1471 return -1;
1472}
1473
1474static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1475{
1476 struct page *page;
1477
1478 mutex_lock(&udev->cmdr_lock);
1479 page = tcmu_get_block_page(udev, dbi);
1480 if (likely(page)) {
1481 mutex_unlock(&udev->cmdr_lock);
1482 return page;
1483 }
1484
1485 /*
1486 * Userspace messed up and passed in a address not in the
1487 * data iov passed to it.
1488 */
1489 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
1490 dbi, udev->name);
1491 page = NULL;
1492 mutex_unlock(&udev->cmdr_lock);
1493
1494 return page;
1495}
1496
1497static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1498{
1499 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1500 struct uio_info *info = &udev->uio_info;
1501 struct page *page;
1502 unsigned long offset;
1503 void *addr;
1504
1505 int mi = tcmu_find_mem_index(vmf->vma);
1506 if (mi < 0)
1507 return VM_FAULT_SIGBUS;
1508
1509 /*
1510 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1511 * to use mem[N].
1512 */
1513 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1514
1515 if (offset < udev->data_off) {
1516 /* For the vmalloc()ed cmd area pages */
1517 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1518 page = vmalloc_to_page(addr);
1519 } else {
1520 uint32_t dbi;
1521
1522 /* For the dynamically growing data area pages */
1523 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1524 page = tcmu_try_get_block_page(udev, dbi);
1525 if (!page)
1526 return VM_FAULT_SIGBUS;
1527 }
1528
1529 get_page(page);
1530 vmf->page = page;
1531 return 0;
1532}
1533
1534static const struct vm_operations_struct tcmu_vm_ops = {
1535 .fault = tcmu_vma_fault,
1536};
1537
1538static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1539{
1540 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1541
1542 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1543 vma->vm_ops = &tcmu_vm_ops;
1544
1545 vma->vm_private_data = udev;
1546
1547 /* Ensure the mmap is exactly the right size */
1548 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
1549 return -EINVAL;
1550
1551 return 0;
1552}
1553
1554static int tcmu_open(struct uio_info *info, struct inode *inode)
1555{
1556 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1557
1558 /* O_EXCL not supported for char devs, so fake it? */
1559 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1560 return -EBUSY;
1561
1562 udev->inode = inode;
1563 kref_get(&udev->kref);
1564
1565 pr_debug("open\n");
1566
1567 return 0;
1568}
1569
1570static void tcmu_dev_call_rcu(struct rcu_head *p)
1571{
1572 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1573 struct tcmu_dev *udev = TCMU_DEV(dev);
1574
1575 kfree(udev->uio_info.name);
1576 kfree(udev->name);
1577 kfree(udev);
1578}
1579
1580static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1581{
1582 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1583 kmem_cache_free(tcmu_cmd_cache, cmd);
1584 return 0;
1585 }
1586 return -EINVAL;
1587}
1588
1589static void tcmu_blocks_release(struct radix_tree_root *blocks,
1590 int start, int end)
1591{
1592 int i;
1593 struct page *page;
1594
1595 for (i = start; i < end; i++) {
1596 page = radix_tree_delete(blocks, i);
1597 if (page) {
1598 __free_page(page);
1599 atomic_dec(&global_db_count);
1600 }
1601 }
1602}
1603
1604static void tcmu_dev_kref_release(struct kref *kref)
1605{
1606 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1607 struct se_device *dev = &udev->se_dev;
1608 struct tcmu_cmd *cmd;
1609 bool all_expired = true;
1610 int i;
1611
1612 vfree(udev->mb_addr);
1613 udev->mb_addr = NULL;
1614
1615 spin_lock_bh(&timed_out_udevs_lock);
1616 if (!list_empty(&udev->timedout_entry))
1617 list_del(&udev->timedout_entry);
1618 spin_unlock_bh(&timed_out_udevs_lock);
1619
1620 /* Upper layer should drain all requests before calling this */
1621 mutex_lock(&udev->cmdr_lock);
1622 idr_for_each_entry(&udev->commands, cmd, i) {
1623 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1624 all_expired = false;
1625 }
1626 if (!list_empty(&udev->qfull_queue))
1627 all_expired = false;
1628 idr_destroy(&udev->commands);
1629 WARN_ON(!all_expired);
1630
1631 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1632 bitmap_free(udev->data_bitmap);
1633 mutex_unlock(&udev->cmdr_lock);
1634
1635 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1636}
1637
1638static int tcmu_release(struct uio_info *info, struct inode *inode)
1639{
1640 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1641
1642 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1643
1644 pr_debug("close\n");
1645 /* release ref from open */
1646 kref_put(&udev->kref, tcmu_dev_kref_release);
1647 return 0;
1648}
1649
1650static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1651{
1652 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1653
1654 if (!tcmu_kern_cmd_reply_supported)
1655 return 0;
1656
1657 if (udev->nl_reply_supported <= 0)
1658 return 0;
1659
1660 mutex_lock(&tcmu_nl_cmd_mutex);
1661
1662 if (tcmu_netlink_blocked) {
1663 mutex_unlock(&tcmu_nl_cmd_mutex);
1664 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1665 udev->name);
1666 return -EAGAIN;
1667 }
1668
1669 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1670 mutex_unlock(&tcmu_nl_cmd_mutex);
1671 pr_warn("netlink cmd %d already executing on %s\n",
1672 nl_cmd->cmd, udev->name);
1673 return -EBUSY;
1674 }
1675
1676 memset(nl_cmd, 0, sizeof(*nl_cmd));
1677 nl_cmd->cmd = cmd;
1678 nl_cmd->udev = udev;
1679 init_completion(&nl_cmd->complete);
1680 INIT_LIST_HEAD(&nl_cmd->nl_list);
1681
1682 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
1683
1684 mutex_unlock(&tcmu_nl_cmd_mutex);
1685 return 0;
1686}
1687
1688static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
1689{
1690 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1691
1692 if (!tcmu_kern_cmd_reply_supported)
1693 return;
1694
1695 if (udev->nl_reply_supported <= 0)
1696 return;
1697
1698 mutex_lock(&tcmu_nl_cmd_mutex);
1699
1700 list_del(&nl_cmd->nl_list);
1701 memset(nl_cmd, 0, sizeof(*nl_cmd));
1702
1703 mutex_unlock(&tcmu_nl_cmd_mutex);
1704}
1705
1706static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1707{
1708 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1709 int ret;
1710
1711 if (!tcmu_kern_cmd_reply_supported)
1712 return 0;
1713
1714 if (udev->nl_reply_supported <= 0)
1715 return 0;
1716
1717 pr_debug("sleeping for nl reply\n");
1718 wait_for_completion(&nl_cmd->complete);
1719
1720 mutex_lock(&tcmu_nl_cmd_mutex);
1721 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1722 ret = nl_cmd->status;
1723 mutex_unlock(&tcmu_nl_cmd_mutex);
1724
1725 return ret;
1726}
1727
1728static int tcmu_netlink_event_init(struct tcmu_dev *udev,
1729 enum tcmu_genl_cmd cmd,
1730 struct sk_buff **buf, void **hdr)
1731{
1732 struct sk_buff *skb;
1733 void *msg_header;
1734 int ret = -ENOMEM;
1735
1736 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1737 if (!skb)
1738 return ret;
1739
1740 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1741 if (!msg_header)
1742 goto free_skb;
1743
1744 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1745 if (ret < 0)
1746 goto free_skb;
1747
1748 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1749 if (ret < 0)
1750 goto free_skb;
1751
1752 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1753 if (ret < 0)
1754 goto free_skb;
1755
1756 *buf = skb;
1757 *hdr = msg_header;
1758 return ret;
1759
1760free_skb:
1761 nlmsg_free(skb);
1762 return ret;
1763}
1764
1765static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1766 enum tcmu_genl_cmd cmd,
1767 struct sk_buff *skb, void *msg_header)
1768{
1769 int ret;
1770
1771 genlmsg_end(skb, msg_header);
1772
1773 ret = tcmu_init_genl_cmd_reply(udev, cmd);
1774 if (ret) {
1775 nlmsg_free(skb);
1776 return ret;
1777 }
1778
1779 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1780 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1781
1782 /* Wait during an add as the listener may not be up yet */
1783 if (ret == 0 ||
1784 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1785 return tcmu_wait_genl_cmd_reply(udev);
1786 else
1787 tcmu_destroy_genl_cmd_reply(udev);
1788
1789 return ret;
1790}
1791
1792static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
1793{
1794 struct sk_buff *skb = NULL;
1795 void *msg_header = NULL;
1796 int ret = 0;
1797
1798 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
1799 &msg_header);
1800 if (ret < 0)
1801 return ret;
1802 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
1803 msg_header);
1804}
1805
1806static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
1807{
1808 struct sk_buff *skb = NULL;
1809 void *msg_header = NULL;
1810 int ret = 0;
1811
1812 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
1813 &skb, &msg_header);
1814 if (ret < 0)
1815 return ret;
1816 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
1817 skb, msg_header);
1818}
1819
1820static int tcmu_update_uio_info(struct tcmu_dev *udev)
1821{
1822 struct tcmu_hba *hba = udev->hba->hba_ptr;
1823 struct uio_info *info;
1824 char *str;
1825
1826 info = &udev->uio_info;
1827
1828 if (udev->dev_config[0])
1829 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
1830 udev->name, udev->dev_config);
1831 else
1832 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
1833 udev->name);
1834 if (!str)
1835 return -ENOMEM;
1836
1837 /* If the old string exists, free it */
1838 kfree(info->name);
1839 info->name = str;
1840
1841 return 0;
1842}
1843
1844static int tcmu_configure_device(struct se_device *dev)
1845{
1846 struct tcmu_dev *udev = TCMU_DEV(dev);
1847 struct uio_info *info;
1848 struct tcmu_mailbox *mb;
1849 int ret = 0;
1850
1851 ret = tcmu_update_uio_info(udev);
1852 if (ret)
1853 return ret;
1854
1855 info = &udev->uio_info;
1856
1857 mutex_lock(&udev->cmdr_lock);
1858 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
1859 mutex_unlock(&udev->cmdr_lock);
1860 if (!udev->data_bitmap) {
1861 ret = -ENOMEM;
1862 goto err_bitmap_alloc;
1863 }
1864
1865 udev->mb_addr = vzalloc(CMDR_SIZE);
1866 if (!udev->mb_addr) {
1867 ret = -ENOMEM;
1868 goto err_vzalloc;
1869 }
1870
1871 /* mailbox fits in first part of CMDR space */
1872 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1873 udev->data_off = CMDR_SIZE;
1874 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
1875 udev->dbi_thresh = 0; /* Default in Idle state */
1876
1877 /* Initialise the mailbox of the ring buffer */
1878 mb = udev->mb_addr;
1879 mb->version = TCMU_MAILBOX_VERSION;
1880 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
1881 mb->cmdr_off = CMDR_OFF;
1882 mb->cmdr_size = udev->cmdr_size;
1883
1884 WARN_ON(!PAGE_ALIGNED(udev->data_off));
1885 WARN_ON(udev->data_size % PAGE_SIZE);
1886 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1887
1888 info->version = __stringify(TCMU_MAILBOX_VERSION);
1889
1890 info->mem[0].name = "tcm-user command & data buffer";
1891 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1892 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
1893 info->mem[0].memtype = UIO_MEM_NONE;
1894
1895 info->irqcontrol = tcmu_irqcontrol;
1896 info->irq = UIO_IRQ_CUSTOM;
1897
1898 info->mmap = tcmu_mmap;
1899 info->open = tcmu_open;
1900 info->release = tcmu_release;
1901
1902 ret = uio_register_device(tcmu_root_device, info);
1903 if (ret)
1904 goto err_register;
1905
1906 /* User can set hw_block_size before enable the device */
1907 if (dev->dev_attrib.hw_block_size == 0)
1908 dev->dev_attrib.hw_block_size = 512;
1909 /* Other attributes can be configured in userspace */
1910 if (!dev->dev_attrib.hw_max_sectors)
1911 dev->dev_attrib.hw_max_sectors = 128;
1912 if (!dev->dev_attrib.emulate_write_cache)
1913 dev->dev_attrib.emulate_write_cache = 0;
1914 dev->dev_attrib.hw_queue_depth = 128;
1915
1916 /* If user didn't explicitly disable netlink reply support, use
1917 * module scope setting.
1918 */
1919 if (udev->nl_reply_supported >= 0)
1920 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1921
1922 /*
1923 * Get a ref incase userspace does a close on the uio device before
1924 * LIO has initiated tcmu_free_device.
1925 */
1926 kref_get(&udev->kref);
1927
1928 ret = tcmu_send_dev_add_event(udev);
1929 if (ret)
1930 goto err_netlink;
1931
1932 mutex_lock(&root_udev_mutex);
1933 list_add(&udev->node, &root_udev);
1934 mutex_unlock(&root_udev_mutex);
1935
1936 return 0;
1937
1938err_netlink:
1939 kref_put(&udev->kref, tcmu_dev_kref_release);
1940 uio_unregister_device(&udev->uio_info);
1941err_register:
1942 vfree(udev->mb_addr);
1943 udev->mb_addr = NULL;
1944err_vzalloc:
1945 bitmap_free(udev->data_bitmap);
1946 udev->data_bitmap = NULL;
1947err_bitmap_alloc:
1948 kfree(info->name);
1949 info->name = NULL;
1950
1951 return ret;
1952}
1953
1954static void tcmu_free_device(struct se_device *dev)
1955{
1956 struct tcmu_dev *udev = TCMU_DEV(dev);
1957
1958 /* release ref from init */
1959 kref_put(&udev->kref, tcmu_dev_kref_release);
1960}
1961
1962static void tcmu_destroy_device(struct se_device *dev)
1963{
1964 struct tcmu_dev *udev = TCMU_DEV(dev);
1965
1966 del_timer_sync(&udev->cmd_timer);
1967 del_timer_sync(&udev->qfull_timer);
1968
1969 mutex_lock(&root_udev_mutex);
1970 list_del(&udev->node);
1971 mutex_unlock(&root_udev_mutex);
1972
1973 tcmu_send_dev_remove_event(udev);
1974
1975 uio_unregister_device(&udev->uio_info);
1976
1977 /* release ref from configure */
1978 kref_put(&udev->kref, tcmu_dev_kref_release);
1979}
1980
1981static void tcmu_unblock_dev(struct tcmu_dev *udev)
1982{
1983 mutex_lock(&udev->cmdr_lock);
1984 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
1985 mutex_unlock(&udev->cmdr_lock);
1986}
1987
1988static void tcmu_block_dev(struct tcmu_dev *udev)
1989{
1990 mutex_lock(&udev->cmdr_lock);
1991
1992 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
1993 goto unlock;
1994
1995 /* complete IO that has executed successfully */
1996 tcmu_handle_completions(udev);
1997 /* fail IO waiting to be queued */
1998 run_qfull_queue(udev, true);
1999
2000unlock:
2001 mutex_unlock(&udev->cmdr_lock);
2002}
2003
2004static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2005{
2006 struct tcmu_mailbox *mb;
2007 struct tcmu_cmd *cmd;
2008 int i;
2009
2010 mutex_lock(&udev->cmdr_lock);
2011
2012 idr_for_each_entry(&udev->commands, cmd, i) {
2013 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
2014 cmd->cmd_id, udev->name,
2015 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
2016
2017 idr_remove(&udev->commands, i);
2018 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2019 WARN_ON(!cmd->se_cmd);
2020 list_del_init(&cmd->queue_entry);
2021 if (err_level == 1) {
2022 /*
2023 * Userspace was not able to start the
2024 * command or it is retryable.
2025 */
2026 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2027 } else {
2028 /* hard failure */
2029 target_complete_cmd(cmd->se_cmd,
2030 SAM_STAT_CHECK_CONDITION);
2031 }
2032 }
2033 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2034 tcmu_free_cmd(cmd);
2035 }
2036
2037 mb = udev->mb_addr;
2038 tcmu_flush_dcache_range(mb, sizeof(*mb));
2039 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2040 mb->cmd_tail, mb->cmd_head);
2041
2042 udev->cmdr_last_cleaned = 0;
2043 mb->cmd_tail = 0;
2044 mb->cmd_head = 0;
2045 tcmu_flush_dcache_range(mb, sizeof(*mb));
2046 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2047
2048 del_timer(&udev->cmd_timer);
2049
2050 run_qfull_queue(udev, false);
2051
2052 mutex_unlock(&udev->cmdr_lock);
2053}
2054
2055enum {
2056 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2057 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
2058};
2059
2060static match_table_t tokens = {
2061 {Opt_dev_config, "dev_config=%s"},
2062 {Opt_dev_size, "dev_size=%s"},
2063 {Opt_hw_block_size, "hw_block_size=%d"},
2064 {Opt_hw_max_sectors, "hw_max_sectors=%d"},
2065 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
2066 {Opt_max_data_area_mb, "max_data_area_mb=%d"},
2067 {Opt_err, NULL}
2068};
2069
2070static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2071{
2072 int val, ret;
2073
2074 ret = match_int(arg, &val);
2075 if (ret < 0) {
2076 pr_err("match_int() failed for dev attrib. Error %d.\n",
2077 ret);
2078 return ret;
2079 }
2080
2081 if (val <= 0) {
2082 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2083 val);
2084 return -EINVAL;
2085 }
2086 *dev_attrib = val;
2087 return 0;
2088}
2089
2090static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2091{
2092 int val, ret;
2093
2094 ret = match_int(arg, &val);
2095 if (ret < 0) {
2096 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2097 ret);
2098 return ret;
2099 }
2100
2101 if (val <= 0) {
2102 pr_err("Invalid max_data_area %d.\n", val);
2103 return -EINVAL;
2104 }
2105
2106 mutex_lock(&udev->cmdr_lock);
2107 if (udev->data_bitmap) {
2108 pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2109 ret = -EINVAL;
2110 goto unlock;
2111 }
2112
2113 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
2114 if (udev->max_blocks > tcmu_global_max_blocks) {
2115 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2116 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
2117 udev->max_blocks = tcmu_global_max_blocks;
2118 }
2119
2120unlock:
2121 mutex_unlock(&udev->cmdr_lock);
2122 return ret;
2123}
2124
2125static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2126 const char *page, ssize_t count)
2127{
2128 struct tcmu_dev *udev = TCMU_DEV(dev);
2129 char *orig, *ptr, *opts;
2130 substring_t args[MAX_OPT_ARGS];
2131 int ret = 0, token;
2132
2133 opts = kstrdup(page, GFP_KERNEL);
2134 if (!opts)
2135 return -ENOMEM;
2136
2137 orig = opts;
2138
2139 while ((ptr = strsep(&opts, ",\n")) != NULL) {
2140 if (!*ptr)
2141 continue;
2142
2143 token = match_token(ptr, tokens, args);
2144 switch (token) {
2145 case Opt_dev_config:
2146 if (match_strlcpy(udev->dev_config, &args[0],
2147 TCMU_CONFIG_LEN) == 0) {
2148 ret = -EINVAL;
2149 break;
2150 }
2151 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2152 break;
2153 case Opt_dev_size:
2154 ret = match_u64(&args[0], &udev->dev_size);
2155 if (ret < 0)
2156 pr_err("match_u64() failed for dev_size=. Error %d.\n",
2157 ret);
2158 break;
2159 case Opt_hw_block_size:
2160 ret = tcmu_set_dev_attrib(&args[0],
2161 &(dev->dev_attrib.hw_block_size));
2162 break;
2163 case Opt_hw_max_sectors:
2164 ret = tcmu_set_dev_attrib(&args[0],
2165 &(dev->dev_attrib.hw_max_sectors));
2166 break;
2167 case Opt_nl_reply_supported:
2168 ret = match_int(&args[0], &udev->nl_reply_supported);
2169 if (ret < 0)
2170 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2171 ret);
2172 break;
2173 case Opt_max_data_area_mb:
2174 ret = tcmu_set_max_blocks_param(udev, &args[0]);
2175 break;
2176 default:
2177 break;
2178 }
2179
2180 if (ret)
2181 break;
2182 }
2183
2184 kfree(orig);
2185 return (!ret) ? count : ret;
2186}
2187
2188static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2189{
2190 struct tcmu_dev *udev = TCMU_DEV(dev);
2191 ssize_t bl = 0;
2192
2193 bl = sprintf(b + bl, "Config: %s ",
2194 udev->dev_config[0] ? udev->dev_config : "NULL");
2195 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2196 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
2197 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2198
2199 return bl;
2200}
2201
2202static sector_t tcmu_get_blocks(struct se_device *dev)
2203{
2204 struct tcmu_dev *udev = TCMU_DEV(dev);
2205
2206 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2207 dev->dev_attrib.block_size);
2208}
2209
2210static sense_reason_t
2211tcmu_parse_cdb(struct se_cmd *cmd)
2212{
2213 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2214}
2215
2216static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2217{
2218 struct se_dev_attrib *da = container_of(to_config_group(item),
2219 struct se_dev_attrib, da_group);
2220 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2221
2222 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2223}
2224
2225static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2226 size_t count)
2227{
2228 struct se_dev_attrib *da = container_of(to_config_group(item),
2229 struct se_dev_attrib, da_group);
2230 struct tcmu_dev *udev = container_of(da->da_dev,
2231 struct tcmu_dev, se_dev);
2232 u32 val;
2233 int ret;
2234
2235 if (da->da_dev->export_count) {
2236 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2237 return -EINVAL;
2238 }
2239
2240 ret = kstrtou32(page, 0, &val);
2241 if (ret < 0)
2242 return ret;
2243
2244 udev->cmd_time_out = val * MSEC_PER_SEC;
2245 return count;
2246}
2247CONFIGFS_ATTR(tcmu_, cmd_time_out);
2248
2249static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2250{
2251 struct se_dev_attrib *da = container_of(to_config_group(item),
2252 struct se_dev_attrib, da_group);
2253 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2254
2255 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2256 udev->qfull_time_out :
2257 udev->qfull_time_out / MSEC_PER_SEC);
2258}
2259
2260static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2261 const char *page, size_t count)
2262{
2263 struct se_dev_attrib *da = container_of(to_config_group(item),
2264 struct se_dev_attrib, da_group);
2265 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2266 s32 val;
2267 int ret;
2268
2269 ret = kstrtos32(page, 0, &val);
2270 if (ret < 0)
2271 return ret;
2272
2273 if (val >= 0) {
2274 udev->qfull_time_out = val * MSEC_PER_SEC;
2275 } else if (val == -1) {
2276 udev->qfull_time_out = val;
2277 } else {
2278 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2279 return -EINVAL;
2280 }
2281 return count;
2282}
2283CONFIGFS_ATTR(tcmu_, qfull_time_out);
2284
2285static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2286{
2287 struct se_dev_attrib *da = container_of(to_config_group(item),
2288 struct se_dev_attrib, da_group);
2289 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2290
2291 return snprintf(page, PAGE_SIZE, "%u\n",
2292 TCMU_BLOCKS_TO_MBS(udev->max_blocks));
2293}
2294CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2295
2296static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2297{
2298 struct se_dev_attrib *da = container_of(to_config_group(item),
2299 struct se_dev_attrib, da_group);
2300 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2301
2302 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2303}
2304
2305static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2306 const char *reconfig_data)
2307{
2308 struct sk_buff *skb = NULL;
2309 void *msg_header = NULL;
2310 int ret = 0;
2311
2312 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2313 &skb, &msg_header);
2314 if (ret < 0)
2315 return ret;
2316 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2317 if (ret < 0) {
2318 nlmsg_free(skb);
2319 return ret;
2320 }
2321 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2322 skb, msg_header);
2323}
2324
2325
2326static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2327 size_t count)
2328{
2329 struct se_dev_attrib *da = container_of(to_config_group(item),
2330 struct se_dev_attrib, da_group);
2331 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2332 int ret, len;
2333
2334 len = strlen(page);
2335 if (!len || len > TCMU_CONFIG_LEN - 1)
2336 return -EINVAL;
2337
2338 /* Check if device has been configured before */
2339 if (target_dev_configured(&udev->se_dev)) {
2340 ret = tcmu_send_dev_config_event(udev, page);
2341 if (ret) {
2342 pr_err("Unable to reconfigure device\n");
2343 return ret;
2344 }
2345 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2346
2347 ret = tcmu_update_uio_info(udev);
2348 if (ret)
2349 return ret;
2350 return count;
2351 }
2352 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2353
2354 return count;
2355}
2356CONFIGFS_ATTR(tcmu_, dev_config);
2357
2358static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2359{
2360 struct se_dev_attrib *da = container_of(to_config_group(item),
2361 struct se_dev_attrib, da_group);
2362 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2363
2364 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2365}
2366
2367static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2368{
2369 struct sk_buff *skb = NULL;
2370 void *msg_header = NULL;
2371 int ret = 0;
2372
2373 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2374 &skb, &msg_header);
2375 if (ret < 0)
2376 return ret;
2377 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2378 size, TCMU_ATTR_PAD);
2379 if (ret < 0) {
2380 nlmsg_free(skb);
2381 return ret;
2382 }
2383 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2384 skb, msg_header);
2385}
2386
2387static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2388 size_t count)
2389{
2390 struct se_dev_attrib *da = container_of(to_config_group(item),
2391 struct se_dev_attrib, da_group);
2392 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2393 u64 val;
2394 int ret;
2395
2396 ret = kstrtou64(page, 0, &val);
2397 if (ret < 0)
2398 return ret;
2399
2400 /* Check if device has been configured before */
2401 if (target_dev_configured(&udev->se_dev)) {
2402 ret = tcmu_send_dev_size_event(udev, val);
2403 if (ret) {
2404 pr_err("Unable to reconfigure device\n");
2405 return ret;
2406 }
2407 }
2408 udev->dev_size = val;
2409 return count;
2410}
2411CONFIGFS_ATTR(tcmu_, dev_size);
2412
2413static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2414 char *page)
2415{
2416 struct se_dev_attrib *da = container_of(to_config_group(item),
2417 struct se_dev_attrib, da_group);
2418 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2419
2420 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2421}
2422
2423static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2424 const char *page, size_t count)
2425{
2426 struct se_dev_attrib *da = container_of(to_config_group(item),
2427 struct se_dev_attrib, da_group);
2428 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2429 s8 val;
2430 int ret;
2431
2432 ret = kstrtos8(page, 0, &val);
2433 if (ret < 0)
2434 return ret;
2435
2436 udev->nl_reply_supported = val;
2437 return count;
2438}
2439CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2440
2441static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2442 char *page)
2443{
2444 struct se_dev_attrib *da = container_of(to_config_group(item),
2445 struct se_dev_attrib, da_group);
2446
2447 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2448}
2449
2450static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2451{
2452 struct sk_buff *skb = NULL;
2453 void *msg_header = NULL;
2454 int ret = 0;
2455
2456 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2457 &skb, &msg_header);
2458 if (ret < 0)
2459 return ret;
2460 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2461 if (ret < 0) {
2462 nlmsg_free(skb);
2463 return ret;
2464 }
2465 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2466 skb, msg_header);
2467}
2468
2469static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2470 const char *page, size_t count)
2471{
2472 struct se_dev_attrib *da = container_of(to_config_group(item),
2473 struct se_dev_attrib, da_group);
2474 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2475 u8 val;
2476 int ret;
2477
2478 ret = kstrtou8(page, 0, &val);
2479 if (ret < 0)
2480 return ret;
2481
2482 /* Check if device has been configured before */
2483 if (target_dev_configured(&udev->se_dev)) {
2484 ret = tcmu_send_emulate_write_cache(udev, val);
2485 if (ret) {
2486 pr_err("Unable to reconfigure device\n");
2487 return ret;
2488 }
2489 }
2490
2491 da->emulate_write_cache = val;
2492 return count;
2493}
2494CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2495
2496static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2497{
2498 struct se_device *se_dev = container_of(to_config_group(item),
2499 struct se_device,
2500 dev_action_group);
2501 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2502
2503 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2504 return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2505 else
2506 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2507}
2508
2509static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2510 size_t count)
2511{
2512 struct se_device *se_dev = container_of(to_config_group(item),
2513 struct se_device,
2514 dev_action_group);
2515 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2516 u8 val;
2517 int ret;
2518
2519 if (!target_dev_configured(&udev->se_dev)) {
2520 pr_err("Device is not configured.\n");
2521 return -EINVAL;
2522 }
2523
2524 ret = kstrtou8(page, 0, &val);
2525 if (ret < 0)
2526 return ret;
2527
2528 if (val > 1) {
2529 pr_err("Invalid block value %d\n", val);
2530 return -EINVAL;
2531 }
2532
2533 if (!val)
2534 tcmu_unblock_dev(udev);
2535 else
2536 tcmu_block_dev(udev);
2537 return count;
2538}
2539CONFIGFS_ATTR(tcmu_, block_dev);
2540
2541static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2542 size_t count)
2543{
2544 struct se_device *se_dev = container_of(to_config_group(item),
2545 struct se_device,
2546 dev_action_group);
2547 struct tcmu_dev *udev = TCMU_DEV(se_dev);
2548 u8 val;
2549 int ret;
2550
2551 if (!target_dev_configured(&udev->se_dev)) {
2552 pr_err("Device is not configured.\n");
2553 return -EINVAL;
2554 }
2555
2556 ret = kstrtou8(page, 0, &val);
2557 if (ret < 0)
2558 return ret;
2559
2560 if (val != 1 && val != 2) {
2561 pr_err("Invalid reset ring value %d\n", val);
2562 return -EINVAL;
2563 }
2564
2565 tcmu_reset_ring(udev, val);
2566 return count;
2567}
2568CONFIGFS_ATTR_WO(tcmu_, reset_ring);
2569
2570static struct configfs_attribute *tcmu_attrib_attrs[] = {
2571 &tcmu_attr_cmd_time_out,
2572 &tcmu_attr_qfull_time_out,
2573 &tcmu_attr_max_data_area_mb,
2574 &tcmu_attr_dev_config,
2575 &tcmu_attr_dev_size,
2576 &tcmu_attr_emulate_write_cache,
2577 &tcmu_attr_nl_reply_supported,
2578 NULL,
2579};
2580
2581static struct configfs_attribute **tcmu_attrs;
2582
2583static struct configfs_attribute *tcmu_action_attrs[] = {
2584 &tcmu_attr_block_dev,
2585 &tcmu_attr_reset_ring,
2586 NULL,
2587};
2588
2589static struct target_backend_ops tcmu_ops = {
2590 .name = "user",
2591 .owner = THIS_MODULE,
2592 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
2593 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
2594 TRANSPORT_FLAG_PASSTHROUGH_ALUA,
2595 .attach_hba = tcmu_attach_hba,
2596 .detach_hba = tcmu_detach_hba,
2597 .alloc_device = tcmu_alloc_device,
2598 .configure_device = tcmu_configure_device,
2599 .destroy_device = tcmu_destroy_device,
2600 .free_device = tcmu_free_device,
2601 .parse_cdb = tcmu_parse_cdb,
2602 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
2603 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
2604 .get_device_type = sbc_get_device_type,
2605 .get_blocks = tcmu_get_blocks,
2606 .tb_dev_action_attrs = tcmu_action_attrs,
2607};
2608
2609static void find_free_blocks(void)
2610{
2611 struct tcmu_dev *udev;
2612 loff_t off;
2613 u32 start, end, block, total_freed = 0;
2614
2615 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
2616 return;
2617
2618 mutex_lock(&root_udev_mutex);
2619 list_for_each_entry(udev, &root_udev, node) {
2620 mutex_lock(&udev->cmdr_lock);
2621
2622 if (!target_dev_configured(&udev->se_dev)) {
2623 mutex_unlock(&udev->cmdr_lock);
2624 continue;
2625 }
2626
2627 /* Try to complete the finished commands first */
2628 tcmu_handle_completions(udev);
2629
2630 /* Skip the udevs in idle */
2631 if (!udev->dbi_thresh) {
2632 mutex_unlock(&udev->cmdr_lock);
2633 continue;
2634 }
2635
2636 end = udev->dbi_max + 1;
2637 block = find_last_bit(udev->data_bitmap, end);
2638 if (block == udev->dbi_max) {
2639 /*
2640 * The last bit is dbi_max, so it is not possible
2641 * reclaim any blocks.
2642 */
2643 mutex_unlock(&udev->cmdr_lock);
2644 continue;
2645 } else if (block == end) {
2646 /* The current udev will goto idle state */
2647 udev->dbi_thresh = start = 0;
2648 udev->dbi_max = 0;
2649 } else {
2650 udev->dbi_thresh = start = block + 1;
2651 udev->dbi_max = block;
2652 }
2653
2654 /* Here will truncate the data area from off */
2655 off = udev->data_off + start * DATA_BLOCK_SIZE;
2656 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2657
2658 /* Release the block pages */
2659 tcmu_blocks_release(&udev->data_blocks, start, end);
2660 mutex_unlock(&udev->cmdr_lock);
2661
2662 total_freed += end - start;
2663 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
2664 total_freed, udev->name);
2665 }
2666 mutex_unlock(&root_udev_mutex);
2667
2668 if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
2669 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
2670}
2671
2672static void check_timedout_devices(void)
2673{
2674 struct tcmu_dev *udev, *tmp_dev;
2675 struct tcmu_cmd *cmd, *tmp_cmd;
2676 LIST_HEAD(devs);
2677
2678 spin_lock_bh(&timed_out_udevs_lock);
2679 list_splice_init(&timed_out_udevs, &devs);
2680
2681 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2682 list_del_init(&udev->timedout_entry);
2683 spin_unlock_bh(&timed_out_udevs_lock);
2684
2685 mutex_lock(&udev->cmdr_lock);
2686
2687 /*
2688 * If cmd_time_out is disabled but qfull is set deadline
2689 * will only reflect the qfull timeout. Ignore it.
2690 */
2691 if (udev->cmd_time_out) {
2692 list_for_each_entry_safe(cmd, tmp_cmd,
2693 &udev->inflight_queue,
2694 queue_entry) {
2695 tcmu_check_expired_ring_cmd(cmd);
2696 }
2697 tcmu_set_next_deadline(&udev->inflight_queue,
2698 &udev->cmd_timer);
2699 }
2700 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
2701 queue_entry) {
2702 tcmu_check_expired_queue_cmd(cmd);
2703 }
2704 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2705
2706 mutex_unlock(&udev->cmdr_lock);
2707
2708 spin_lock_bh(&timed_out_udevs_lock);
2709 }
2710
2711 spin_unlock_bh(&timed_out_udevs_lock);
2712}
2713
2714static void tcmu_unmap_work_fn(struct work_struct *work)
2715{
2716 check_timedout_devices();
2717 find_free_blocks();
2718}
2719
2720static int __init tcmu_module_init(void)
2721{
2722 int ret, i, k, len = 0;
2723
2724 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2725
2726 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2727
2728 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2729 sizeof(struct tcmu_cmd),
2730 __alignof__(struct tcmu_cmd),
2731 0, NULL);
2732 if (!tcmu_cmd_cache)
2733 return -ENOMEM;
2734
2735 tcmu_root_device = root_device_register("tcm_user");
2736 if (IS_ERR(tcmu_root_device)) {
2737 ret = PTR_ERR(tcmu_root_device);
2738 goto out_free_cache;
2739 }
2740
2741 ret = genl_register_family(&tcmu_genl_family);
2742 if (ret < 0) {
2743 goto out_unreg_device;
2744 }
2745
2746 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
2747 len += sizeof(struct configfs_attribute *);
2748 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
2749 len += sizeof(struct configfs_attribute *);
2750 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
2751 len += sizeof(struct configfs_attribute *);
2752 len += sizeof(struct configfs_attribute *);
2753
2754 tcmu_attrs = kzalloc(len, GFP_KERNEL);
2755 if (!tcmu_attrs) {
2756 ret = -ENOMEM;
2757 goto out_unreg_genl;
2758 }
2759
2760 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
2761 tcmu_attrs[i] = passthrough_attrib_attrs[i];
2762 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
2763 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
2764 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
2765 tcmu_attrs[i++] = tcmu_attrib_attrs[k];
2766 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2767
2768 ret = transport_backend_register(&tcmu_ops);
2769 if (ret)
2770 goto out_attrs;
2771
2772 return 0;
2773
2774out_attrs:
2775 kfree(tcmu_attrs);
2776out_unreg_genl:
2777 genl_unregister_family(&tcmu_genl_family);
2778out_unreg_device:
2779 root_device_unregister(tcmu_root_device);
2780out_free_cache:
2781 kmem_cache_destroy(tcmu_cmd_cache);
2782
2783 return ret;
2784}
2785
2786static void __exit tcmu_module_exit(void)
2787{
2788 cancel_delayed_work_sync(&tcmu_unmap_work);
2789 target_backend_unregister(&tcmu_ops);
2790 kfree(tcmu_attrs);
2791 genl_unregister_family(&tcmu_genl_family);
2792 root_device_unregister(tcmu_root_device);
2793 kmem_cache_destroy(tcmu_cmd_cache);
2794}
2795
2796MODULE_DESCRIPTION("TCM USER subsystem plugin");
2797MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2798MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2799MODULE_LICENSE("GPL");
2800
2801module_init(tcmu_module_init);
2802module_exit(tcmu_module_exit);