Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/scm_block: force cluster writes

Force writes to Storage Class Memory (SCM) to be in done in clusters.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Sebastian Ott and committed by
Martin Schwidefsky
0d804b20 f30664e2

+310 -3
+7
drivers/s390/block/Kconfig
··· 81 81 82 82 To compile this driver as a module, choose M here: the 83 83 module will be called scm_block. 84 + 85 + config SCM_BLOCK_CLUSTER_WRITE 86 + def_bool y 87 + prompt "SCM force cluster writes" 88 + depends on SCM_BLOCK 89 + help 90 + Force writes to Storage Class Memory (SCM) to be in done in clusters.
+3
drivers/s390/block/Makefile
··· 19 19 obj-$(CONFIG_DCSSBLK) += dcssblk.o 20 20 21 21 scm_block-objs := scm_drv.o scm_blk.o 22 + ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 23 + scm_block-objs += scm_blk_cluster.o 24 + endif 22 25 obj-$(CONFIG_SCM_BLOCK) += scm_block.o
+34 -3
drivers/s390/block/scm_blk.c
··· 37 37 38 38 free_page((unsigned long) scmrq->aob); 39 39 free_page((unsigned long) scmrq->aidaw); 40 + __scm_free_rq_cluster(scmrq); 40 41 kfree(aobrq); 41 42 } 42 43 ··· 71 70 __scm_free_rq(scmrq); 72 71 return -ENOMEM; 73 72 } 73 + 74 + if (__scm_alloc_rq_cluster(scmrq)) { 75 + __scm_free_rq(scmrq); 76 + return -ENOMEM; 77 + } 78 + 74 79 INIT_LIST_HEAD(&scmrq->list); 75 80 spin_lock_irq(&list_lock); 76 81 list_add(&scmrq->list, &inactive_requests); ··· 177 170 scmrq->bdev = bdev; 178 171 scmrq->retries = 4; 179 172 scmrq->error = 0; 173 + scm_request_cluster_init(scmrq); 180 174 } 181 175 182 176 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) ··· 189 181 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); 190 182 } 191 183 192 - static void scm_request_requeue(struct scm_request *scmrq) 184 + void scm_request_requeue(struct scm_request *scmrq) 193 185 { 194 186 struct scm_blk_dev *bdev = scmrq->bdev; 195 187 188 + scm_release_cluster(scmrq); 196 189 blk_requeue_request(bdev->rq, scmrq->request); 197 190 scm_request_done(scmrq); 198 191 scm_ensure_queue_restart(bdev); 199 192 } 200 193 201 - static void scm_request_finish(struct scm_request *scmrq) 194 + void scm_request_finish(struct scm_request *scmrq) 202 195 { 196 + scm_release_cluster(scmrq); 203 197 blk_end_request_all(scmrq->request, scmrq->error); 204 198 scm_request_done(scmrq); 205 199 } ··· 225 215 return; 226 216 } 227 217 scm_request_init(bdev, scmrq, req); 218 + if (!scm_reserve_cluster(scmrq)) { 219 + SCM_LOG(5, "cluster busy"); 220 + scm_request_done(scmrq); 221 + return; 222 + } 223 + if (scm_need_cluster_request(scmrq)) { 224 + blk_start_request(req); 225 + scm_initiate_cluster_request(scmrq); 226 + return; 227 + } 228 228 scm_request_prepare(scmrq); 229 229 blk_start_request(req); 230 230 ··· 302 282 spin_lock_irqsave(&bdev->lock, flags); 303 283 continue; 304 284 } 285 + 286 + if (scm_test_cluster_request(scmrq)) { 287 + scm_cluster_request_irq(scmrq); 288 + spin_lock_irqsave(&bdev->lock, flags); 289 + continue; 290 + } 291 + 305 292 scm_request_finish(scmrq); 306 293 atomic_dec(&bdev->queued_reqs); 307 294 spin_lock_irqsave(&bdev->lock, flags); ··· 352 325 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ 353 326 blk_queue_max_segments(rq, nr_max_blk); 354 327 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); 328 + scm_blk_dev_cluster_setup(bdev); 355 329 356 330 bdev->gendisk = alloc_disk(SCM_NR_PARTS); 357 331 if (!bdev->gendisk) ··· 398 370 399 371 static int __init scm_blk_init(void) 400 372 { 401 - int ret; 373 + int ret = -EINVAL; 374 + 375 + if (!scm_cluster_size_valid()) 376 + goto out; 402 377 403 378 ret = register_blkdev(0, "scm"); 404 379 if (ret < 0)
+38
drivers/s390/block/scm_blk.h
··· 22 22 spinlock_t lock; /* guard the rest of the blockdev */ 23 23 atomic_t queued_reqs; 24 24 struct list_head finished_requests; 25 + #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 26 + struct list_head cluster_list; 27 + #endif 25 28 }; 26 29 27 30 struct scm_request { ··· 35 32 struct list_head list; 36 33 u8 retries; 37 34 int error; 35 + #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 36 + struct { 37 + enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state; 38 + struct list_head list; 39 + void **buf; 40 + } cluster; 41 + #endif 38 42 }; 39 43 40 44 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) ··· 50 40 void scm_blk_dev_cleanup(struct scm_blk_dev *); 51 41 void scm_blk_irq(struct scm_device *, void *, int); 52 42 43 + void scm_request_finish(struct scm_request *); 44 + void scm_request_requeue(struct scm_request *); 45 + 53 46 int scm_drv_init(void); 54 47 void scm_drv_cleanup(void); 55 48 49 + #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE 50 + void __scm_free_rq_cluster(struct scm_request *); 51 + int __scm_alloc_rq_cluster(struct scm_request *); 52 + void scm_request_cluster_init(struct scm_request *); 53 + bool scm_reserve_cluster(struct scm_request *); 54 + void scm_release_cluster(struct scm_request *); 55 + void scm_blk_dev_cluster_setup(struct scm_blk_dev *); 56 + bool scm_need_cluster_request(struct scm_request *); 57 + void scm_initiate_cluster_request(struct scm_request *); 58 + void scm_cluster_request_irq(struct scm_request *); 59 + bool scm_test_cluster_request(struct scm_request *); 60 + bool scm_cluster_size_valid(void); 61 + #else 62 + #define __scm_free_rq_cluster(scmrq) {} 63 + #define __scm_alloc_rq_cluster(scmrq) 0 64 + #define scm_request_cluster_init(scmrq) {} 65 + #define scm_reserve_cluster(scmrq) true 66 + #define scm_release_cluster(scmrq) {} 67 + #define scm_blk_dev_cluster_setup(bdev) {} 68 + #define scm_need_cluster_request(scmrq) false 69 + #define scm_initiate_cluster_request(scmrq) {} 70 + #define scm_cluster_request_irq(scmrq) {} 71 + #define scm_test_cluster_request(scmrq) false 72 + #define scm_cluster_size_valid() true 73 + #endif 56 74 57 75 extern debug_info_t *scm_debug; 58 76
+228
drivers/s390/block/scm_blk_cluster.c
··· 1 + /* 2 + * Block driver for s390 storage class memory. 3 + * 4 + * Copyright IBM Corp. 2012 5 + * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com> 6 + */ 7 + 8 + #include <linux/spinlock.h> 9 + #include <linux/module.h> 10 + #include <linux/blkdev.h> 11 + #include <linux/genhd.h> 12 + #include <linux/slab.h> 13 + #include <linux/list.h> 14 + #include <asm/eadm.h> 15 + #include "scm_blk.h" 16 + 17 + static unsigned int write_cluster_size = 64; 18 + module_param(write_cluster_size, uint, S_IRUGO); 19 + MODULE_PARM_DESC(write_cluster_size, 20 + "Number of pages used for contiguous writes."); 21 + 22 + #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE) 23 + 24 + void __scm_free_rq_cluster(struct scm_request *scmrq) 25 + { 26 + int i; 27 + 28 + if (!scmrq->cluster.buf) 29 + return; 30 + 31 + for (i = 0; i < 2 * write_cluster_size; i++) 32 + free_page((unsigned long) scmrq->cluster.buf[i]); 33 + 34 + kfree(scmrq->cluster.buf); 35 + } 36 + 37 + int __scm_alloc_rq_cluster(struct scm_request *scmrq) 38 + { 39 + int i; 40 + 41 + scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size, 42 + GFP_KERNEL); 43 + if (!scmrq->cluster.buf) 44 + return -ENOMEM; 45 + 46 + for (i = 0; i < 2 * write_cluster_size; i++) { 47 + scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA); 48 + if (!scmrq->cluster.buf[i]) 49 + return -ENOMEM; 50 + } 51 + INIT_LIST_HEAD(&scmrq->cluster.list); 52 + return 0; 53 + } 54 + 55 + void scm_request_cluster_init(struct scm_request *scmrq) 56 + { 57 + scmrq->cluster.state = CLUSTER_NONE; 58 + } 59 + 60 + static bool clusters_intersect(struct scm_request *A, struct scm_request *B) 61 + { 62 + unsigned long firstA, lastA, firstB, lastB; 63 + 64 + firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; 65 + lastA = (((u64) blk_rq_pos(A->request) << 9) + 66 + blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; 67 + 68 + firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; 69 + lastB = (((u64) blk_rq_pos(B->request) << 9) + 70 + blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; 71 + 72 + return (firstB <= lastA && firstA <= lastB); 73 + } 74 + 75 + bool scm_reserve_cluster(struct scm_request *scmrq) 76 + { 77 + struct scm_blk_dev *bdev = scmrq->bdev; 78 + struct scm_request *iter; 79 + 80 + if (write_cluster_size == 0) 81 + return true; 82 + 83 + spin_lock(&bdev->lock); 84 + list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { 85 + if (clusters_intersect(scmrq, iter) && 86 + (rq_data_dir(scmrq->request) == WRITE || 87 + rq_data_dir(iter->request) == WRITE)) { 88 + spin_unlock(&bdev->lock); 89 + return false; 90 + } 91 + } 92 + list_add(&scmrq->cluster.list, &bdev->cluster_list); 93 + spin_unlock(&bdev->lock); 94 + 95 + return true; 96 + } 97 + 98 + void scm_release_cluster(struct scm_request *scmrq) 99 + { 100 + struct scm_blk_dev *bdev = scmrq->bdev; 101 + unsigned long flags; 102 + 103 + if (write_cluster_size == 0) 104 + return; 105 + 106 + spin_lock_irqsave(&bdev->lock, flags); 107 + list_del(&scmrq->cluster.list); 108 + spin_unlock_irqrestore(&bdev->lock, flags); 109 + } 110 + 111 + void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) 112 + { 113 + INIT_LIST_HEAD(&bdev->cluster_list); 114 + blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); 115 + } 116 + 117 + static void scm_prepare_cluster_request(struct scm_request *scmrq) 118 + { 119 + struct scm_blk_dev *bdev = scmrq->bdev; 120 + struct scm_device *scmdev = bdev->gendisk->private_data; 121 + struct request *req = scmrq->request; 122 + struct aidaw *aidaw = scmrq->aidaw; 123 + struct msb *msb = &scmrq->aob->msb[0]; 124 + struct req_iterator iter; 125 + struct bio_vec *bv; 126 + int i = 0; 127 + u64 addr; 128 + 129 + switch (scmrq->cluster.state) { 130 + case CLUSTER_NONE: 131 + scmrq->cluster.state = CLUSTER_READ; 132 + /* fall through */ 133 + case CLUSTER_READ: 134 + scmrq->aob->request.msb_count = 1; 135 + msb->bs = MSB_BS_4K; 136 + msb->oc = MSB_OC_READ; 137 + msb->flags = MSB_FLAG_IDA; 138 + msb->data_addr = (u64) aidaw; 139 + msb->blk_count = write_cluster_size; 140 + 141 + addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 142 + msb->scm_addr = round_down(addr, CLUSTER_SIZE); 143 + 144 + if (msb->scm_addr != 145 + round_down(addr + (u64) blk_rq_bytes(req) - 1, 146 + CLUSTER_SIZE)) 147 + msb->blk_count = 2 * write_cluster_size; 148 + 149 + for (i = 0; i < msb->blk_count; i++) { 150 + aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 151 + aidaw++; 152 + } 153 + 154 + break; 155 + case CLUSTER_WRITE: 156 + msb->oc = MSB_OC_WRITE; 157 + 158 + for (addr = msb->scm_addr; 159 + addr < scmdev->address + ((u64) blk_rq_pos(req) << 9); 160 + addr += PAGE_SIZE) { 161 + aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 162 + aidaw++; 163 + i++; 164 + } 165 + rq_for_each_segment(bv, req, iter) { 166 + aidaw->data_addr = (u64) page_address(bv->bv_page); 167 + aidaw++; 168 + i++; 169 + } 170 + for (; i < msb->blk_count; i++) { 171 + aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 172 + aidaw++; 173 + } 174 + break; 175 + } 176 + } 177 + 178 + bool scm_need_cluster_request(struct scm_request *scmrq) 179 + { 180 + if (rq_data_dir(scmrq->request) == READ) 181 + return false; 182 + 183 + return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; 184 + } 185 + 186 + /* Called with queue lock held. */ 187 + void scm_initiate_cluster_request(struct scm_request *scmrq) 188 + { 189 + scm_prepare_cluster_request(scmrq); 190 + if (scm_start_aob(scmrq->aob)) 191 + scm_request_requeue(scmrq); 192 + } 193 + 194 + bool scm_test_cluster_request(struct scm_request *scmrq) 195 + { 196 + return scmrq->cluster.state != CLUSTER_NONE; 197 + } 198 + 199 + void scm_cluster_request_irq(struct scm_request *scmrq) 200 + { 201 + struct scm_blk_dev *bdev = scmrq->bdev; 202 + unsigned long flags; 203 + 204 + switch (scmrq->cluster.state) { 205 + case CLUSTER_NONE: 206 + BUG(); 207 + break; 208 + case CLUSTER_READ: 209 + if (scmrq->error) { 210 + scm_request_finish(scmrq); 211 + break; 212 + } 213 + scmrq->cluster.state = CLUSTER_WRITE; 214 + spin_lock_irqsave(&bdev->rq_lock, flags); 215 + scm_initiate_cluster_request(scmrq); 216 + spin_unlock_irqrestore(&bdev->rq_lock, flags); 217 + break; 218 + case CLUSTER_WRITE: 219 + scm_request_finish(scmrq); 220 + break; 221 + } 222 + } 223 + 224 + bool scm_cluster_size_valid(void) 225 + { 226 + return write_cluster_size == 0 || write_cluster_size == 32 || 227 + write_cluster_size == 64 || write_cluster_size == 128; 228 + }