Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mtd: mtdblock: Dynamically allocate cache info structures

Since we allocate struct mtd_blktrans_dev for each block device, we
can add our own structure members to the end. Therefore embed
struct mtd_blktrans_dev in struct mtdblk_dev and remove the static
array of struct mtdblk_dev. Also remove the redundant pointer to
struct mtd_info.

This is preparation for removing the static limit on the number of MTD
devices.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>

authored by

Ben Hutchings and committed by
David Woodhouse
cbfe93e9 24c15496

+31 -43
+31 -43
drivers/mtd/mtdblock.c
··· 19 19 #include <linux/mutex.h> 20 20 21 21 22 - static struct mtdblk_dev { 23 - struct mtd_info *mtd; 22 + struct mtdblk_dev { 23 + struct mtd_blktrans_dev mbd; 24 24 int count; 25 25 struct mutex cache_mutex; 26 26 unsigned char *cache_data; 27 27 unsigned long cache_offset; 28 28 unsigned int cache_size; 29 29 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; 30 - } *mtdblks[MAX_MTD_DEVICES]; 30 + }; 31 31 32 32 static struct mutex mtdblks_lock; 33 33 ··· 98 98 99 99 static int write_cached_data (struct mtdblk_dev *mtdblk) 100 100 { 101 - struct mtd_info *mtd = mtdblk->mtd; 101 + struct mtd_info *mtd = mtdblk->mbd.mtd; 102 102 int ret; 103 103 104 104 if (mtdblk->cache_state != STATE_DIRTY) ··· 128 128 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, 129 129 int len, const char *buf) 130 130 { 131 - struct mtd_info *mtd = mtdblk->mtd; 131 + struct mtd_info *mtd = mtdblk->mbd.mtd; 132 132 unsigned int sect_size = mtdblk->cache_size; 133 133 size_t retlen; 134 134 int ret; ··· 198 198 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, 199 199 int len, char *buf) 200 200 { 201 - struct mtd_info *mtd = mtdblk->mtd; 201 + struct mtd_info *mtd = mtdblk->mbd.mtd; 202 202 unsigned int sect_size = mtdblk->cache_size; 203 203 size_t retlen; 204 204 int ret; ··· 244 244 static int mtdblock_readsect(struct mtd_blktrans_dev *dev, 245 245 unsigned long block, char *buf) 246 246 { 247 - struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 247 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 248 248 return do_cached_read(mtdblk, block<<9, 512, buf); 249 249 } 250 250 251 251 static int mtdblock_writesect(struct mtd_blktrans_dev *dev, 252 252 unsigned long block, char *buf) 253 253 { 254 - struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 254 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 255 255 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { 256 - mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize); 256 + mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); 257 257 if (!mtdblk->cache_data) 258 258 return -EINTR; 259 259 /* -EINTR is not really correct, but it is the best match ··· 266 266 267 267 static int mtdblock_open(struct mtd_blktrans_dev *mbd) 268 268 { 269 - struct mtdblk_dev *mtdblk; 270 - struct mtd_info *mtd = mbd->mtd; 271 - int dev = mbd->devnum; 269 + struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 272 270 273 271 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 274 272 275 273 mutex_lock(&mtdblks_lock); 276 - if (mtdblks[dev]) { 277 - mtdblks[dev]->count++; 274 + if (mtdblk->count) { 275 + mtdblk->count++; 278 276 mutex_unlock(&mtdblks_lock); 279 277 return 0; 280 278 } 281 279 282 280 /* OK, it's not open. Create cache info for it */ 283 - mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); 284 - if (!mtdblk) { 285 - mutex_unlock(&mtdblks_lock); 286 - return -ENOMEM; 287 - } 288 - 289 281 mtdblk->count = 1; 290 - mtdblk->mtd = mtd; 291 - 292 282 mutex_init(&mtdblk->cache_mutex); 293 283 mtdblk->cache_state = STATE_EMPTY; 294 - if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) { 295 - mtdblk->cache_size = mtdblk->mtd->erasesize; 284 + if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { 285 + mtdblk->cache_size = mbd->mtd->erasesize; 296 286 mtdblk->cache_data = NULL; 297 287 } 298 288 299 - mtdblks[dev] = mtdblk; 300 289 mutex_unlock(&mtdblks_lock); 301 290 302 291 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); ··· 295 306 296 307 static int mtdblock_release(struct mtd_blktrans_dev *mbd) 297 308 { 298 - int dev = mbd->devnum; 299 - struct mtdblk_dev *mtdblk = mtdblks[dev]; 309 + struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); 300 310 301 311 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); 302 312 ··· 306 318 mutex_unlock(&mtdblk->cache_mutex); 307 319 308 320 if (!--mtdblk->count) { 309 - /* It was the last usage. Free the device */ 310 - mtdblks[dev] = NULL; 311 - if (mtdblk->mtd->sync) 312 - mtdblk->mtd->sync(mtdblk->mtd); 321 + /* It was the last usage. Free the cache */ 322 + if (mbd->mtd->sync) 323 + mbd->mtd->sync(mbd->mtd); 313 324 vfree(mtdblk->cache_data); 314 - kfree(mtdblk); 315 325 } 316 326 317 327 mutex_unlock(&mtdblks_lock); ··· 321 335 322 336 static int mtdblock_flush(struct mtd_blktrans_dev *dev) 323 337 { 324 - struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; 338 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 325 339 326 340 mutex_lock(&mtdblk->cache_mutex); 327 341 write_cached_data(mtdblk); 328 342 mutex_unlock(&mtdblk->cache_mutex); 329 343 330 - if (mtdblk->mtd->sync) 331 - mtdblk->mtd->sync(mtdblk->mtd); 344 + if (dev->mtd->sync) 345 + dev->mtd->sync(dev->mtd); 332 346 return 0; 333 347 } 334 348 335 349 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) 336 350 { 337 - struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); 351 + struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); 338 352 339 353 if (!dev) 340 354 return; 341 355 342 - dev->mtd = mtd; 343 - dev->devnum = mtd->index; 356 + dev->mbd.mtd = mtd; 357 + dev->mbd.devnum = mtd->index; 344 358 345 - dev->size = mtd->size >> 9; 346 - dev->tr = tr; 359 + dev->mbd.size = mtd->size >> 9; 360 + dev->mbd.tr = tr; 347 361 348 362 if (!(mtd->flags & MTD_WRITEABLE)) 349 - dev->readonly = 1; 363 + dev->mbd.readonly = 1; 350 364 351 - add_mtd_blktrans_dev(dev); 365 + add_mtd_blktrans_dev(&dev->mbd); 352 366 } 353 367 354 368 static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) 355 369 { 370 + struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); 371 + 356 372 del_mtd_blktrans_dev(dev); 357 - kfree(dev); 373 + kfree(mtdblk); 358 374 } 359 375 360 376 static struct mtd_blktrans_ops mtdblock_tr = {