at v2.6.34 254 lines 6.3 kB view raw
1/* 2 * fs/logfs/dev_mtd.c - Device access methods for MTD 3 * 4 * As should be obvious for Linux kernel code, license is GPLv2 5 * 6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> 7 */ 8#include "logfs.h" 9#include <linux/completion.h> 10#include <linux/mount.h> 11#include <linux/sched.h> 12 13#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) 14 15static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf) 16{ 17 struct mtd_info *mtd = logfs_super(sb)->s_mtd; 18 size_t retlen; 19 int ret; 20 21 ret = mtd->read(mtd, ofs, len, &retlen, buf); 22 BUG_ON(ret == -EINVAL); 23 if (ret) 24 return ret; 25 26 /* Not sure if we should loop instead. */ 27 if (retlen != len) 28 return -EIO; 29 30 return 0; 31} 32 33static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf) 34{ 35 struct logfs_super *super = logfs_super(sb); 36 struct mtd_info *mtd = super->s_mtd; 37 size_t retlen; 38 loff_t page_start, page_end; 39 int ret; 40 41 if (super->s_flags & LOGFS_SB_FLAG_RO) 42 return -EROFS; 43 44 BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); 45 BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); 46 BUG_ON(len > PAGE_CACHE_SIZE); 47 page_start = ofs & PAGE_CACHE_MASK; 48 page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; 49 ret = mtd->write(mtd, ofs, len, &retlen, buf); 50 if (ret || (retlen != len)) 51 return -EIO; 52 53 return 0; 54} 55 56/* 57 * For as long as I can remember (since about 2001) mtd->erase has been an 58 * asynchronous interface lacking the first driver to actually use the 59 * asynchronous properties. So just to prevent the first implementor of such 60 * a thing from breaking logfs in 2350, we do the usual pointless dance to 61 * declare a completion variable and wait for completion before returning 62 * from mtd_erase(). What an excercise in futility! 63 */ 64static void logfs_erase_callback(struct erase_info *ei) 65{ 66 complete((struct completion *)ei->priv); 67} 68 69static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len) 70{ 71 struct logfs_super *super = logfs_super(sb); 72 struct address_space *mapping = super->s_mapping_inode->i_mapping; 73 struct page *page; 74 pgoff_t index = ofs >> PAGE_SHIFT; 75 76 for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) { 77 page = find_get_page(mapping, index); 78 if (!page) 79 continue; 80 memset(page_address(page), 0xFF, PAGE_SIZE); 81 page_cache_release(page); 82 } 83 return 0; 84} 85 86static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len, 87 int ensure_write) 88{ 89 struct mtd_info *mtd = logfs_super(sb)->s_mtd; 90 struct erase_info ei; 91 DECLARE_COMPLETION_ONSTACK(complete); 92 int ret; 93 94 BUG_ON(len % mtd->erasesize); 95 if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO) 96 return -EROFS; 97 98 memset(&ei, 0, sizeof(ei)); 99 ei.mtd = mtd; 100 ei.addr = ofs; 101 ei.len = len; 102 ei.callback = logfs_erase_callback; 103 ei.priv = (long)&complete; 104 ret = mtd->erase(mtd, &ei); 105 if (ret) 106 return -EIO; 107 108 wait_for_completion(&complete); 109 if (ei.state != MTD_ERASE_DONE) 110 return -EIO; 111 return mtd_erase_mapping(sb, ofs, len); 112} 113 114static void mtd_sync(struct super_block *sb) 115{ 116 struct mtd_info *mtd = logfs_super(sb)->s_mtd; 117 118 if (mtd->sync) 119 mtd->sync(mtd); 120} 121 122static int mtd_readpage(void *_sb, struct page *page) 123{ 124 struct super_block *sb = _sb; 125 int err; 126 127 err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 128 page_address(page)); 129 if (err == -EUCLEAN) { 130 err = 0; 131 /* FIXME: force GC this segment */ 132 } 133 if (err) { 134 ClearPageUptodate(page); 135 SetPageError(page); 136 } else { 137 SetPageUptodate(page); 138 ClearPageError(page); 139 } 140 unlock_page(page); 141 return err; 142} 143 144static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs) 145{ 146 struct logfs_super *super = logfs_super(sb); 147 struct address_space *mapping = super->s_mapping_inode->i_mapping; 148 filler_t *filler = mtd_readpage; 149 struct mtd_info *mtd = super->s_mtd; 150 151 if (!mtd->block_isbad) 152 return NULL; 153 154 *ofs = 0; 155 while (mtd->block_isbad(mtd, *ofs)) { 156 *ofs += mtd->erasesize; 157 if (*ofs >= mtd->size) 158 return NULL; 159 } 160 BUG_ON(*ofs & ~PAGE_MASK); 161 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); 162} 163 164static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs) 165{ 166 struct logfs_super *super = logfs_super(sb); 167 struct address_space *mapping = super->s_mapping_inode->i_mapping; 168 filler_t *filler = mtd_readpage; 169 struct mtd_info *mtd = super->s_mtd; 170 171 if (!mtd->block_isbad) 172 return NULL; 173 174 *ofs = mtd->size - mtd->erasesize; 175 while (mtd->block_isbad(mtd, *ofs)) { 176 *ofs -= mtd->erasesize; 177 if (*ofs <= 0) 178 return NULL; 179 } 180 *ofs = *ofs + mtd->erasesize - 0x1000; 181 BUG_ON(*ofs & ~PAGE_MASK); 182 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); 183} 184 185static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, 186 size_t nr_pages) 187{ 188 struct logfs_super *super = logfs_super(sb); 189 struct address_space *mapping = super->s_mapping_inode->i_mapping; 190 struct page *page; 191 int i, err; 192 193 for (i = 0; i < nr_pages; i++) { 194 page = find_lock_page(mapping, index + i); 195 BUG_ON(!page); 196 197 err = mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, 198 page_address(page)); 199 unlock_page(page); 200 page_cache_release(page); 201 if (err) 202 return err; 203 } 204 return 0; 205} 206 207static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len) 208{ 209 struct logfs_super *super = logfs_super(sb); 210 int head; 211 212 if (super->s_flags & LOGFS_SB_FLAG_RO) 213 return; 214 215 if (len == 0) { 216 /* This can happen when the object fit perfectly into a 217 * segment, the segment gets written per sync and subsequently 218 * closed. 219 */ 220 return; 221 } 222 head = ofs & (PAGE_SIZE - 1); 223 if (head) { 224 ofs -= head; 225 len += head; 226 } 227 len = PAGE_ALIGN(len); 228 __mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT); 229} 230 231static void mtd_put_device(struct super_block *sb) 232{ 233 put_mtd_device(logfs_super(sb)->s_mtd); 234} 235 236static const struct logfs_device_ops mtd_devops = { 237 .find_first_sb = mtd_find_first_sb, 238 .find_last_sb = mtd_find_last_sb, 239 .readpage = mtd_readpage, 240 .writeseg = mtd_writeseg, 241 .erase = mtd_erase, 242 .sync = mtd_sync, 243 .put_device = mtd_put_device, 244}; 245 246int logfs_get_sb_mtd(struct file_system_type *type, int flags, 247 int mtdnr, struct vfsmount *mnt) 248{ 249 struct mtd_info *mtd; 250 const struct logfs_device_ops *devops = &mtd_devops; 251 252 mtd = get_mtd_device(NULL, mtdnr); 253 return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt); 254}