Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/backing-dev.h>
5#include <linux/ctype.h>
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
12#include <linux/sched.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/statfs.h>
16#include <linux/string.h>
17
18#include "super.h"
19#include "mds_client.h"
20#include "cache.h"
21
22#include <linux/ceph/ceph_features.h>
23#include <linux/ceph/decode.h>
24#include <linux/ceph/mon_client.h>
25#include <linux/ceph/auth.h>
26#include <linux/ceph/debugfs.h>
27
28/*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34/*
35 * super ops
36 */
37static void ceph_put_super(struct super_block *s)
38{
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43}
44
45static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46{
47 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 struct ceph_mon_client *monc = &fsc->client->monc;
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52 u64 data_pool;
53
54 if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
55 data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
56 } else {
57 data_pool = CEPH_NOPOOL;
58 }
59
60 dout("statfs\n");
61 err = ceph_monc_do_statfs(monc, data_pool, &st);
62 if (err < 0)
63 return err;
64
65 /* fill in kstatfs */
66 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
67
68 /*
69 * express utilization in terms of large blocks to avoid
70 * overflow on 32-bit machines.
71 *
72 * NOTE: for the time being, we make bsize == frsize to humor
73 * not-yet-ancient versions of glibc that are broken.
74 * Someday, we will probably want to report a real block
75 * size... whatever that may mean for a network file system!
76 */
77 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
78 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
79
80 /*
81 * By default use root quota for stats; fallback to overall filesystem
82 * usage if using 'noquotadf' mount option or if the root dir doesn't
83 * have max_bytes quota set.
84 */
85 if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
86 !ceph_quota_update_statfs(fsc, buf)) {
87 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
88 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
89 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
90 }
91
92 buf->f_files = le64_to_cpu(st.num_objects);
93 buf->f_ffree = -1;
94 buf->f_namelen = NAME_MAX;
95
96 /* Must convert the fsid, for consistent values across arches */
97 mutex_lock(&monc->mutex);
98 fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
99 le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
100 mutex_unlock(&monc->mutex);
101
102 buf->f_fsid.val[0] = fsid & 0xffffffff;
103 buf->f_fsid.val[1] = fsid >> 32;
104
105 return 0;
106}
107
108
109static int ceph_sync_fs(struct super_block *sb, int wait)
110{
111 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
112
113 if (!wait) {
114 dout("sync_fs (non-blocking)\n");
115 ceph_flush_dirty_caps(fsc->mdsc);
116 dout("sync_fs (non-blocking) done\n");
117 return 0;
118 }
119
120 dout("sync_fs (blocking)\n");
121 ceph_osdc_sync(&fsc->client->osdc);
122 ceph_mdsc_sync(fsc->mdsc);
123 dout("sync_fs (blocking) done\n");
124 return 0;
125}
126
127/*
128 * mount options
129 */
130enum {
131 Opt_wsize,
132 Opt_rsize,
133 Opt_rasize,
134 Opt_caps_wanted_delay_min,
135 Opt_caps_wanted_delay_max,
136 Opt_readdir_max_entries,
137 Opt_readdir_max_bytes,
138 Opt_congestion_kb,
139 Opt_last_int,
140 /* int args above */
141 Opt_snapdirname,
142 Opt_mds_namespace,
143 Opt_fscache_uniq,
144 Opt_last_string,
145 /* string args above */
146 Opt_dirstat,
147 Opt_nodirstat,
148 Opt_rbytes,
149 Opt_norbytes,
150 Opt_asyncreaddir,
151 Opt_noasyncreaddir,
152 Opt_dcache,
153 Opt_nodcache,
154 Opt_ino32,
155 Opt_noino32,
156 Opt_fscache,
157 Opt_nofscache,
158 Opt_poolperm,
159 Opt_nopoolperm,
160 Opt_require_active_mds,
161 Opt_norequire_active_mds,
162#ifdef CONFIG_CEPH_FS_POSIX_ACL
163 Opt_acl,
164#endif
165 Opt_noacl,
166 Opt_quotadf,
167 Opt_noquotadf,
168};
169
170static match_table_t fsopt_tokens = {
171 {Opt_wsize, "wsize=%d"},
172 {Opt_rsize, "rsize=%d"},
173 {Opt_rasize, "rasize=%d"},
174 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
175 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
176 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
177 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
178 {Opt_congestion_kb, "write_congestion_kb=%d"},
179 /* int args above */
180 {Opt_snapdirname, "snapdirname=%s"},
181 {Opt_mds_namespace, "mds_namespace=%s"},
182 {Opt_fscache_uniq, "fsc=%s"},
183 /* string args above */
184 {Opt_dirstat, "dirstat"},
185 {Opt_nodirstat, "nodirstat"},
186 {Opt_rbytes, "rbytes"},
187 {Opt_norbytes, "norbytes"},
188 {Opt_asyncreaddir, "asyncreaddir"},
189 {Opt_noasyncreaddir, "noasyncreaddir"},
190 {Opt_dcache, "dcache"},
191 {Opt_nodcache, "nodcache"},
192 {Opt_ino32, "ino32"},
193 {Opt_noino32, "noino32"},
194 {Opt_fscache, "fsc"},
195 {Opt_nofscache, "nofsc"},
196 {Opt_poolperm, "poolperm"},
197 {Opt_nopoolperm, "nopoolperm"},
198 {Opt_require_active_mds, "require_active_mds"},
199 {Opt_norequire_active_mds, "norequire_active_mds"},
200#ifdef CONFIG_CEPH_FS_POSIX_ACL
201 {Opt_acl, "acl"},
202#endif
203 {Opt_noacl, "noacl"},
204 {Opt_quotadf, "quotadf"},
205 {Opt_noquotadf, "noquotadf"},
206 {-1, NULL}
207};
208
209static int parse_fsopt_token(char *c, void *private)
210{
211 struct ceph_mount_options *fsopt = private;
212 substring_t argstr[MAX_OPT_ARGS];
213 int token, intval, ret;
214
215 token = match_token((char *)c, fsopt_tokens, argstr);
216 if (token < 0)
217 return -EINVAL;
218
219 if (token < Opt_last_int) {
220 ret = match_int(&argstr[0], &intval);
221 if (ret < 0) {
222 pr_err("bad option arg (not int) at '%s'\n", c);
223 return ret;
224 }
225 dout("got int token %d val %d\n", token, intval);
226 } else if (token > Opt_last_int && token < Opt_last_string) {
227 dout("got string token %d val %s\n", token,
228 argstr[0].from);
229 } else {
230 dout("got token %d\n", token);
231 }
232
233 switch (token) {
234 case Opt_snapdirname:
235 kfree(fsopt->snapdir_name);
236 fsopt->snapdir_name = kstrndup(argstr[0].from,
237 argstr[0].to-argstr[0].from,
238 GFP_KERNEL);
239 if (!fsopt->snapdir_name)
240 return -ENOMEM;
241 break;
242 case Opt_mds_namespace:
243 kfree(fsopt->mds_namespace);
244 fsopt->mds_namespace = kstrndup(argstr[0].from,
245 argstr[0].to-argstr[0].from,
246 GFP_KERNEL);
247 if (!fsopt->mds_namespace)
248 return -ENOMEM;
249 break;
250 case Opt_fscache_uniq:
251 kfree(fsopt->fscache_uniq);
252 fsopt->fscache_uniq = kstrndup(argstr[0].from,
253 argstr[0].to-argstr[0].from,
254 GFP_KERNEL);
255 if (!fsopt->fscache_uniq)
256 return -ENOMEM;
257 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
258 break;
259 /* misc */
260 case Opt_wsize:
261 if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
262 return -EINVAL;
263 fsopt->wsize = ALIGN(intval, PAGE_SIZE);
264 break;
265 case Opt_rsize:
266 if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
267 return -EINVAL;
268 fsopt->rsize = ALIGN(intval, PAGE_SIZE);
269 break;
270 case Opt_rasize:
271 if (intval < 0)
272 return -EINVAL;
273 fsopt->rasize = ALIGN(intval, PAGE_SIZE);
274 break;
275 case Opt_caps_wanted_delay_min:
276 if (intval < 1)
277 return -EINVAL;
278 fsopt->caps_wanted_delay_min = intval;
279 break;
280 case Opt_caps_wanted_delay_max:
281 if (intval < 1)
282 return -EINVAL;
283 fsopt->caps_wanted_delay_max = intval;
284 break;
285 case Opt_readdir_max_entries:
286 if (intval < 1)
287 return -EINVAL;
288 fsopt->max_readdir = intval;
289 break;
290 case Opt_readdir_max_bytes:
291 if (intval < (int)PAGE_SIZE && intval != 0)
292 return -EINVAL;
293 fsopt->max_readdir_bytes = intval;
294 break;
295 case Opt_congestion_kb:
296 if (intval < 1024) /* at least 1M */
297 return -EINVAL;
298 fsopt->congestion_kb = intval;
299 break;
300 case Opt_dirstat:
301 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
302 break;
303 case Opt_nodirstat:
304 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
305 break;
306 case Opt_rbytes:
307 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
308 break;
309 case Opt_norbytes:
310 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
311 break;
312 case Opt_asyncreaddir:
313 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
314 break;
315 case Opt_noasyncreaddir:
316 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
317 break;
318 case Opt_dcache:
319 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
320 break;
321 case Opt_nodcache:
322 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
323 break;
324 case Opt_ino32:
325 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
326 break;
327 case Opt_noino32:
328 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
329 break;
330 case Opt_fscache:
331 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
332 kfree(fsopt->fscache_uniq);
333 fsopt->fscache_uniq = NULL;
334 break;
335 case Opt_nofscache:
336 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
337 kfree(fsopt->fscache_uniq);
338 fsopt->fscache_uniq = NULL;
339 break;
340 case Opt_poolperm:
341 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
342 break;
343 case Opt_nopoolperm:
344 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
345 break;
346 case Opt_require_active_mds:
347 fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
348 break;
349 case Opt_norequire_active_mds:
350 fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
351 break;
352 case Opt_quotadf:
353 fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
354 break;
355 case Opt_noquotadf:
356 fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
357 break;
358#ifdef CONFIG_CEPH_FS_POSIX_ACL
359 case Opt_acl:
360 fsopt->sb_flags |= SB_POSIXACL;
361 break;
362#endif
363 case Opt_noacl:
364 fsopt->sb_flags &= ~SB_POSIXACL;
365 break;
366 default:
367 BUG_ON(token);
368 }
369 return 0;
370}
371
372static void destroy_mount_options(struct ceph_mount_options *args)
373{
374 dout("destroy_mount_options %p\n", args);
375 kfree(args->snapdir_name);
376 kfree(args->mds_namespace);
377 kfree(args->server_path);
378 kfree(args->fscache_uniq);
379 kfree(args);
380}
381
382static int strcmp_null(const char *s1, const char *s2)
383{
384 if (!s1 && !s2)
385 return 0;
386 if (s1 && !s2)
387 return -1;
388 if (!s1 && s2)
389 return 1;
390 return strcmp(s1, s2);
391}
392
393static int compare_mount_options(struct ceph_mount_options *new_fsopt,
394 struct ceph_options *new_opt,
395 struct ceph_fs_client *fsc)
396{
397 struct ceph_mount_options *fsopt1 = new_fsopt;
398 struct ceph_mount_options *fsopt2 = fsc->mount_options;
399 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
400 int ret;
401
402 ret = memcmp(fsopt1, fsopt2, ofs);
403 if (ret)
404 return ret;
405
406 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
407 if (ret)
408 return ret;
409 ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
410 if (ret)
411 return ret;
412 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
413 if (ret)
414 return ret;
415 ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
416 if (ret)
417 return ret;
418
419 return ceph_compare_options(new_opt, fsc->client);
420}
421
422static int parse_mount_options(struct ceph_mount_options **pfsopt,
423 struct ceph_options **popt,
424 int flags, char *options,
425 const char *dev_name)
426{
427 struct ceph_mount_options *fsopt;
428 const char *dev_name_end;
429 int err;
430
431 if (!dev_name || !*dev_name)
432 return -EINVAL;
433
434 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
435 if (!fsopt)
436 return -ENOMEM;
437
438 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
439
440 fsopt->sb_flags = flags;
441 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
442
443 fsopt->wsize = CEPH_MAX_WRITE_SIZE;
444 fsopt->rsize = CEPH_MAX_READ_SIZE;
445 fsopt->rasize = CEPH_RASIZE_DEFAULT;
446 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
447 if (!fsopt->snapdir_name) {
448 err = -ENOMEM;
449 goto out;
450 }
451
452 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
453 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
454 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
455 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
456 fsopt->congestion_kb = default_congestion_kb();
457
458 /*
459 * Distinguish the server list from the path in "dev_name".
460 * Internally we do not include the leading '/' in the path.
461 *
462 * "dev_name" will look like:
463 * <server_spec>[,<server_spec>...]:[<path>]
464 * where
465 * <server_spec> is <ip>[:<port>]
466 * <path> is optional, but if present must begin with '/'
467 */
468 dev_name_end = strchr(dev_name, '/');
469 if (dev_name_end) {
470 if (strlen(dev_name_end) > 1) {
471 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
472 if (!fsopt->server_path) {
473 err = -ENOMEM;
474 goto out;
475 }
476 }
477 } else {
478 dev_name_end = dev_name + strlen(dev_name);
479 }
480 err = -EINVAL;
481 dev_name_end--; /* back up to ':' separator */
482 if (dev_name_end < dev_name || *dev_name_end != ':') {
483 pr_err("device name is missing path (no : separator in %s)\n",
484 dev_name);
485 goto out;
486 }
487 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
488 if (fsopt->server_path)
489 dout("server path '%s'\n", fsopt->server_path);
490
491 *popt = ceph_parse_options(options, dev_name, dev_name_end,
492 parse_fsopt_token, (void *)fsopt);
493 if (IS_ERR(*popt)) {
494 err = PTR_ERR(*popt);
495 goto out;
496 }
497
498 /* success */
499 *pfsopt = fsopt;
500 return 0;
501
502out:
503 destroy_mount_options(fsopt);
504 return err;
505}
506
507/**
508 * ceph_show_options - Show mount options in /proc/mounts
509 * @m: seq_file to write to
510 * @root: root of that (sub)tree
511 */
512static int ceph_show_options(struct seq_file *m, struct dentry *root)
513{
514 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
515 struct ceph_mount_options *fsopt = fsc->mount_options;
516 size_t pos;
517 int ret;
518
519 /* a comma between MNT/MS and client options */
520 seq_putc(m, ',');
521 pos = m->count;
522
523 ret = ceph_print_client_options(m, fsc->client);
524 if (ret)
525 return ret;
526
527 /* retract our comma if no client options */
528 if (m->count == pos)
529 m->count--;
530
531 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
532 seq_puts(m, ",dirstat");
533 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
534 seq_puts(m, ",rbytes");
535 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
536 seq_puts(m, ",noasyncreaddir");
537 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
538 seq_puts(m, ",nodcache");
539 if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
540 seq_puts(m, ",ino32");
541 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
542 seq_show_option(m, "fsc", fsopt->fscache_uniq);
543 }
544 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
545 seq_puts(m, ",nopoolperm");
546 if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
547 seq_puts(m, ",noquotadf");
548
549#ifdef CONFIG_CEPH_FS_POSIX_ACL
550 if (fsopt->sb_flags & SB_POSIXACL)
551 seq_puts(m, ",acl");
552 else
553 seq_puts(m, ",noacl");
554#endif
555
556 if (fsopt->mds_namespace)
557 seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
558 if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
559 seq_printf(m, ",wsize=%d", fsopt->wsize);
560 if (fsopt->rsize != CEPH_MAX_READ_SIZE)
561 seq_printf(m, ",rsize=%d", fsopt->rsize);
562 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
563 seq_printf(m, ",rasize=%d", fsopt->rasize);
564 if (fsopt->congestion_kb != default_congestion_kb())
565 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
566 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
567 seq_printf(m, ",caps_wanted_delay_min=%d",
568 fsopt->caps_wanted_delay_min);
569 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
570 seq_printf(m, ",caps_wanted_delay_max=%d",
571 fsopt->caps_wanted_delay_max);
572 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
573 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
574 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
575 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
576 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
577 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
578
579 return 0;
580}
581
582/*
583 * handle any mon messages the standard library doesn't understand.
584 * return error if we don't either.
585 */
586static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
587{
588 struct ceph_fs_client *fsc = client->private;
589 int type = le16_to_cpu(msg->hdr.type);
590
591 switch (type) {
592 case CEPH_MSG_MDS_MAP:
593 ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
594 return 0;
595 case CEPH_MSG_FS_MAP_USER:
596 ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
597 return 0;
598 default:
599 return -1;
600 }
601}
602
603/*
604 * create a new fs client
605 */
606static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
607 struct ceph_options *opt)
608{
609 struct ceph_fs_client *fsc;
610 int page_count;
611 size_t size;
612 int err = -ENOMEM;
613
614 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
615 if (!fsc)
616 return ERR_PTR(-ENOMEM);
617
618 fsc->client = ceph_create_client(opt, fsc);
619 if (IS_ERR(fsc->client)) {
620 err = PTR_ERR(fsc->client);
621 goto fail;
622 }
623
624 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
625 fsc->client->osdc.abort_on_full = true;
626
627 if (!fsopt->mds_namespace) {
628 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
629 0, true);
630 } else {
631 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
632 0, false);
633 }
634
635 fsc->mount_options = fsopt;
636
637 fsc->sb = NULL;
638 fsc->mount_state = CEPH_MOUNT_MOUNTING;
639
640 atomic_long_set(&fsc->writeback_count, 0);
641
642 err = -ENOMEM;
643 /*
644 * The number of concurrent works can be high but they don't need
645 * to be processed in parallel, limit concurrency.
646 */
647 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
648 if (!fsc->wb_wq)
649 goto fail_client;
650 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
651 if (!fsc->pg_inv_wq)
652 goto fail_wb_wq;
653 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
654 if (!fsc->trunc_wq)
655 goto fail_pg_inv_wq;
656
657 /* set up mempools */
658 err = -ENOMEM;
659 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
660 size = sizeof (struct page *) * (page_count ? page_count : 1);
661 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
662 if (!fsc->wb_pagevec_pool)
663 goto fail_trunc_wq;
664
665 /* caps */
666 fsc->min_caps = fsopt->max_readdir;
667
668 return fsc;
669
670fail_trunc_wq:
671 destroy_workqueue(fsc->trunc_wq);
672fail_pg_inv_wq:
673 destroy_workqueue(fsc->pg_inv_wq);
674fail_wb_wq:
675 destroy_workqueue(fsc->wb_wq);
676fail_client:
677 ceph_destroy_client(fsc->client);
678fail:
679 kfree(fsc);
680 return ERR_PTR(err);
681}
682
683static void flush_fs_workqueues(struct ceph_fs_client *fsc)
684{
685 flush_workqueue(fsc->wb_wq);
686 flush_workqueue(fsc->pg_inv_wq);
687 flush_workqueue(fsc->trunc_wq);
688}
689
690static void destroy_fs_client(struct ceph_fs_client *fsc)
691{
692 dout("destroy_fs_client %p\n", fsc);
693
694 destroy_workqueue(fsc->wb_wq);
695 destroy_workqueue(fsc->pg_inv_wq);
696 destroy_workqueue(fsc->trunc_wq);
697
698 mempool_destroy(fsc->wb_pagevec_pool);
699
700 destroy_mount_options(fsc->mount_options);
701
702 ceph_destroy_client(fsc->client);
703
704 kfree(fsc);
705 dout("destroy_fs_client %p done\n", fsc);
706}
707
708/*
709 * caches
710 */
711struct kmem_cache *ceph_inode_cachep;
712struct kmem_cache *ceph_cap_cachep;
713struct kmem_cache *ceph_cap_flush_cachep;
714struct kmem_cache *ceph_dentry_cachep;
715struct kmem_cache *ceph_file_cachep;
716struct kmem_cache *ceph_dir_file_cachep;
717
718static void ceph_inode_init_once(void *foo)
719{
720 struct ceph_inode_info *ci = foo;
721 inode_init_once(&ci->vfs_inode);
722}
723
724static int __init init_caches(void)
725{
726 int error = -ENOMEM;
727
728 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
729 sizeof(struct ceph_inode_info),
730 __alignof__(struct ceph_inode_info),
731 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
732 SLAB_ACCOUNT, ceph_inode_init_once);
733 if (!ceph_inode_cachep)
734 return -ENOMEM;
735
736 ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
737 if (!ceph_cap_cachep)
738 goto bad_cap;
739 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
740 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
741 if (!ceph_cap_flush_cachep)
742 goto bad_cap_flush;
743
744 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
745 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
746 if (!ceph_dentry_cachep)
747 goto bad_dentry;
748
749 ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
750 if (!ceph_file_cachep)
751 goto bad_file;
752
753 ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
754 if (!ceph_dir_file_cachep)
755 goto bad_dir_file;
756
757 error = ceph_fscache_register();
758 if (error)
759 goto bad_fscache;
760
761 return 0;
762
763bad_fscache:
764 kmem_cache_destroy(ceph_dir_file_cachep);
765bad_dir_file:
766 kmem_cache_destroy(ceph_file_cachep);
767bad_file:
768 kmem_cache_destroy(ceph_dentry_cachep);
769bad_dentry:
770 kmem_cache_destroy(ceph_cap_flush_cachep);
771bad_cap_flush:
772 kmem_cache_destroy(ceph_cap_cachep);
773bad_cap:
774 kmem_cache_destroy(ceph_inode_cachep);
775 return error;
776}
777
778static void destroy_caches(void)
779{
780 /*
781 * Make sure all delayed rcu free inodes are flushed before we
782 * destroy cache.
783 */
784 rcu_barrier();
785
786 kmem_cache_destroy(ceph_inode_cachep);
787 kmem_cache_destroy(ceph_cap_cachep);
788 kmem_cache_destroy(ceph_cap_flush_cachep);
789 kmem_cache_destroy(ceph_dentry_cachep);
790 kmem_cache_destroy(ceph_file_cachep);
791 kmem_cache_destroy(ceph_dir_file_cachep);
792
793 ceph_fscache_unregister();
794}
795
796
797/*
798 * ceph_umount_begin - initiate forced umount. Tear down down the
799 * mount, skipping steps that may hang while waiting for server(s).
800 */
801static void ceph_umount_begin(struct super_block *sb)
802{
803 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
804
805 dout("ceph_umount_begin - starting forced umount\n");
806 if (!fsc)
807 return;
808 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
809 ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
810 ceph_mdsc_force_umount(fsc->mdsc);
811 return;
812}
813
814static const struct super_operations ceph_super_ops = {
815 .alloc_inode = ceph_alloc_inode,
816 .destroy_inode = ceph_destroy_inode,
817 .write_inode = ceph_write_inode,
818 .drop_inode = ceph_drop_inode,
819 .sync_fs = ceph_sync_fs,
820 .put_super = ceph_put_super,
821 .show_options = ceph_show_options,
822 .statfs = ceph_statfs,
823 .umount_begin = ceph_umount_begin,
824};
825
826/*
827 * Bootstrap mount by opening the root directory. Note the mount
828 * @started time from caller, and time out if this takes too long.
829 */
830static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
831 const char *path,
832 unsigned long started)
833{
834 struct ceph_mds_client *mdsc = fsc->mdsc;
835 struct ceph_mds_request *req = NULL;
836 int err;
837 struct dentry *root;
838
839 /* open dir */
840 dout("open_root_inode opening '%s'\n", path);
841 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
842 if (IS_ERR(req))
843 return ERR_CAST(req);
844 req->r_path1 = kstrdup(path, GFP_NOFS);
845 if (!req->r_path1) {
846 root = ERR_PTR(-ENOMEM);
847 goto out;
848 }
849
850 req->r_ino1.ino = CEPH_INO_ROOT;
851 req->r_ino1.snap = CEPH_NOSNAP;
852 req->r_started = started;
853 req->r_timeout = fsc->client->options->mount_timeout;
854 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
855 req->r_num_caps = 2;
856 err = ceph_mdsc_do_request(mdsc, NULL, req);
857 if (err == 0) {
858 struct inode *inode = req->r_target_inode;
859 req->r_target_inode = NULL;
860 dout("open_root_inode success\n");
861 root = d_make_root(inode);
862 if (!root) {
863 root = ERR_PTR(-ENOMEM);
864 goto out;
865 }
866 dout("open_root_inode success, root dentry is %p\n", root);
867 } else {
868 root = ERR_PTR(err);
869 }
870out:
871 ceph_mdsc_put_request(req);
872 return root;
873}
874
875
876
877
878/*
879 * mount: join the ceph cluster, and open root directory.
880 */
881static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
882{
883 int err;
884 unsigned long started = jiffies; /* note the start time */
885 struct dentry *root;
886
887 dout("mount start %p\n", fsc);
888 mutex_lock(&fsc->client->mount_mutex);
889
890 if (!fsc->sb->s_root) {
891 const char *path;
892 err = __ceph_open_session(fsc->client, started);
893 if (err < 0)
894 goto out;
895
896 /* setup fscache */
897 if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
898 err = ceph_fscache_register_fs(fsc);
899 if (err < 0)
900 goto out;
901 }
902
903 if (!fsc->mount_options->server_path) {
904 path = "";
905 dout("mount opening path \\t\n");
906 } else {
907 path = fsc->mount_options->server_path + 1;
908 dout("mount opening path %s\n", path);
909 }
910
911 err = ceph_fs_debugfs_init(fsc);
912 if (err < 0)
913 goto out;
914
915 root = open_root_dentry(fsc, path, started);
916 if (IS_ERR(root)) {
917 err = PTR_ERR(root);
918 goto out;
919 }
920 fsc->sb->s_root = dget(root);
921 } else {
922 root = dget(fsc->sb->s_root);
923 }
924
925 fsc->mount_state = CEPH_MOUNT_MOUNTED;
926 dout("mount success\n");
927 mutex_unlock(&fsc->client->mount_mutex);
928 return root;
929
930out:
931 mutex_unlock(&fsc->client->mount_mutex);
932 return ERR_PTR(err);
933}
934
935static int ceph_set_super(struct super_block *s, void *data)
936{
937 struct ceph_fs_client *fsc = data;
938 int ret;
939
940 dout("set_super %p data %p\n", s, data);
941
942 s->s_flags = fsc->mount_options->sb_flags;
943 s->s_maxbytes = MAX_LFS_FILESIZE;
944
945 s->s_xattr = ceph_xattr_handlers;
946 s->s_fs_info = fsc;
947 fsc->sb = s;
948 fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
949
950 s->s_op = &ceph_super_ops;
951 s->s_d_op = &ceph_dentry_ops;
952 s->s_export_op = &ceph_export_ops;
953
954 s->s_time_gran = 1000; /* 1000 ns == 1 us */
955
956 ret = set_anon_super(s, NULL); /* what is that second arg for? */
957 if (ret != 0)
958 goto fail;
959
960 return ret;
961
962fail:
963 s->s_fs_info = NULL;
964 fsc->sb = NULL;
965 return ret;
966}
967
968/*
969 * share superblock if same fs AND options
970 */
971static int ceph_compare_super(struct super_block *sb, void *data)
972{
973 struct ceph_fs_client *new = data;
974 struct ceph_mount_options *fsopt = new->mount_options;
975 struct ceph_options *opt = new->client->options;
976 struct ceph_fs_client *other = ceph_sb_to_client(sb);
977
978 dout("ceph_compare_super %p\n", sb);
979
980 if (compare_mount_options(fsopt, opt, other)) {
981 dout("monitor(s)/mount options don't match\n");
982 return 0;
983 }
984 if ((opt->flags & CEPH_OPT_FSID) &&
985 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
986 dout("fsid doesn't match\n");
987 return 0;
988 }
989 if (fsopt->sb_flags != other->mount_options->sb_flags) {
990 dout("flags differ\n");
991 return 0;
992 }
993 return 1;
994}
995
996/*
997 * construct our own bdi so we can control readahead, etc.
998 */
999static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1000
1001static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1002{
1003 int err;
1004
1005 err = super_setup_bdi_name(sb, "ceph-%ld",
1006 atomic_long_inc_return(&bdi_seq));
1007 if (err)
1008 return err;
1009
1010 /* set ra_pages based on rasize mount option? */
1011 sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1012
1013 /* set io_pages based on max osd read size */
1014 sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1015
1016 return 0;
1017}
1018
1019static struct dentry *ceph_mount(struct file_system_type *fs_type,
1020 int flags, const char *dev_name, void *data)
1021{
1022 struct super_block *sb;
1023 struct ceph_fs_client *fsc;
1024 struct dentry *res;
1025 int err;
1026 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
1027 struct ceph_mount_options *fsopt = NULL;
1028 struct ceph_options *opt = NULL;
1029
1030 dout("ceph_mount\n");
1031
1032#ifdef CONFIG_CEPH_FS_POSIX_ACL
1033 flags |= SB_POSIXACL;
1034#endif
1035 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
1036 if (err < 0) {
1037 res = ERR_PTR(err);
1038 goto out_final;
1039 }
1040
1041 /* create client (which we may/may not use) */
1042 fsc = create_fs_client(fsopt, opt);
1043 if (IS_ERR(fsc)) {
1044 res = ERR_CAST(fsc);
1045 destroy_mount_options(fsopt);
1046 ceph_destroy_options(opt);
1047 goto out_final;
1048 }
1049
1050 err = ceph_mdsc_init(fsc);
1051 if (err < 0) {
1052 res = ERR_PTR(err);
1053 goto out;
1054 }
1055
1056 if (ceph_test_opt(fsc->client, NOSHARE))
1057 compare_super = NULL;
1058 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
1059 if (IS_ERR(sb)) {
1060 res = ERR_CAST(sb);
1061 goto out;
1062 }
1063
1064 if (ceph_sb_to_client(sb) != fsc) {
1065 ceph_mdsc_destroy(fsc);
1066 destroy_fs_client(fsc);
1067 fsc = ceph_sb_to_client(sb);
1068 dout("get_sb got existing client %p\n", fsc);
1069 } else {
1070 dout("get_sb using new client %p\n", fsc);
1071 err = ceph_setup_bdi(sb, fsc);
1072 if (err < 0) {
1073 res = ERR_PTR(err);
1074 goto out_splat;
1075 }
1076 }
1077
1078 res = ceph_real_mount(fsc);
1079 if (IS_ERR(res))
1080 goto out_splat;
1081 dout("root %p inode %p ino %llx.%llx\n", res,
1082 d_inode(res), ceph_vinop(d_inode(res)));
1083 return res;
1084
1085out_splat:
1086 ceph_mdsc_close_sessions(fsc->mdsc);
1087 deactivate_locked_super(sb);
1088 goto out_final;
1089
1090out:
1091 ceph_mdsc_destroy(fsc);
1092 destroy_fs_client(fsc);
1093out_final:
1094 dout("ceph_mount fail %ld\n", PTR_ERR(res));
1095 return res;
1096}
1097
1098static void ceph_kill_sb(struct super_block *s)
1099{
1100 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1101 dev_t dev = s->s_dev;
1102
1103 dout("kill_sb %p\n", s);
1104
1105 ceph_mdsc_pre_umount(fsc->mdsc);
1106 flush_fs_workqueues(fsc);
1107
1108 generic_shutdown_super(s);
1109
1110 fsc->client->extra_mon_dispatch = NULL;
1111 ceph_fs_debugfs_cleanup(fsc);
1112
1113 ceph_fscache_unregister_fs(fsc);
1114
1115 ceph_mdsc_destroy(fsc);
1116
1117 destroy_fs_client(fsc);
1118 free_anon_bdev(dev);
1119}
1120
1121static struct file_system_type ceph_fs_type = {
1122 .owner = THIS_MODULE,
1123 .name = "ceph",
1124 .mount = ceph_mount,
1125 .kill_sb = ceph_kill_sb,
1126 .fs_flags = FS_RENAME_DOES_D_MOVE,
1127};
1128MODULE_ALIAS_FS("ceph");
1129
1130static int __init init_ceph(void)
1131{
1132 int ret = init_caches();
1133 if (ret)
1134 goto out;
1135
1136 ceph_flock_init();
1137 ceph_xattr_init();
1138 ret = register_filesystem(&ceph_fs_type);
1139 if (ret)
1140 goto out_xattr;
1141
1142 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1143
1144 return 0;
1145
1146out_xattr:
1147 ceph_xattr_exit();
1148 destroy_caches();
1149out:
1150 return ret;
1151}
1152
1153static void __exit exit_ceph(void)
1154{
1155 dout("exit_ceph\n");
1156 unregister_filesystem(&ceph_fs_type);
1157 ceph_xattr_exit();
1158 destroy_caches();
1159}
1160
1161module_init(init_ceph);
1162module_exit(exit_ceph);
1163
1164MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1165MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1166MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1167MODULE_DESCRIPTION("Ceph filesystem for Linux");
1168MODULE_LICENSE("GPL");