Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/ipc/shm.c
4 * Copyright (C) 1992, 1993 Krishna Balasubramanian
5 * Many improvements/fixes by Bruno Haible.
6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 *
9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Better ipc lock (kern_ipc_perm.lock) handling
25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
26 */
27
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/hugetlb.h>
31#include <linux/shm.h>
32#include <linux/init.h>
33#include <linux/file.h>
34#include <linux/mman.h>
35#include <linux/shmem_fs.h>
36#include <linux/security.h>
37#include <linux/syscalls.h>
38#include <linux/audit.h>
39#include <linux/capability.h>
40#include <linux/ptrace.h>
41#include <linux/seq_file.h>
42#include <linux/rwsem.h>
43#include <linux/nsproxy.h>
44#include <linux/mount.h>
45#include <linux/ipc_namespace.h>
46
47#include <linux/uaccess.h>
48
49#include "util.h"
50
51struct shm_file_data {
52 int id;
53 struct ipc_namespace *ns;
54 struct file *file;
55 const struct vm_operations_struct *vm_ops;
56};
57
58#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
59
60static const struct file_operations shm_file_operations;
61static const struct vm_operations_struct shm_vm_ops;
62
63#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
64
65#define shm_unlock(shp) \
66 ipc_unlock(&(shp)->shm_perm)
67
68static int newseg(struct ipc_namespace *, struct ipc_params *);
69static void shm_open(struct vm_area_struct *vma);
70static void shm_close(struct vm_area_struct *vma);
71static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
72#ifdef CONFIG_PROC_FS
73static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74#endif
75
76int shm_init_ns(struct ipc_namespace *ns)
77{
78 ns->shm_ctlmax = SHMMAX;
79 ns->shm_ctlall = SHMALL;
80 ns->shm_ctlmni = SHMMNI;
81 ns->shm_rmid_forced = 0;
82 ns->shm_tot = 0;
83 return ipc_init_ids(&shm_ids(ns));
84}
85
86/*
87 * Called with shm_ids.rwsem (writer) and the shp structure locked.
88 * Only shm_ids.rwsem remains locked on exit.
89 */
90static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
91{
92 struct shmid_kernel *shp;
93
94 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
95
96 if (shp->shm_nattch) {
97 shp->shm_perm.mode |= SHM_DEST;
98 /* Do not find it any more */
99 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
100 shm_unlock(shp);
101 } else
102 shm_destroy(ns, shp);
103}
104
105#ifdef CONFIG_IPC_NS
106void shm_exit_ns(struct ipc_namespace *ns)
107{
108 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
109 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
110 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
111}
112#endif
113
114static int __init ipc_ns_init(void)
115{
116 const int err = shm_init_ns(&init_ipc_ns);
117 WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
118 return err;
119}
120
121pure_initcall(ipc_ns_init);
122
123void __init shm_init(void)
124{
125 ipc_init_proc_interface("sysvipc/shm",
126#if BITS_PER_LONG <= 32
127 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
128#else
129 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
130#endif
131 IPC_SHM_IDS, sysvipc_shm_proc_show);
132}
133
134static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
135{
136 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
137
138 if (IS_ERR(ipcp))
139 return ERR_CAST(ipcp);
140
141 return container_of(ipcp, struct shmid_kernel, shm_perm);
142}
143
144static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
145{
146 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
147
148 if (IS_ERR(ipcp))
149 return ERR_CAST(ipcp);
150
151 return container_of(ipcp, struct shmid_kernel, shm_perm);
152}
153
154/*
155 * shm_lock_(check_) routines are called in the paths where the rwsem
156 * is not necessarily held.
157 */
158static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
159{
160 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
161
162 /*
163 * Callers of shm_lock() must validate the status of the returned ipc
164 * object pointer (as returned by ipc_lock()), and error out as
165 * appropriate.
166 */
167 if (IS_ERR(ipcp))
168 return (void *)ipcp;
169 return container_of(ipcp, struct shmid_kernel, shm_perm);
170}
171
172static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
173{
174 rcu_read_lock();
175 ipc_lock_object(&ipcp->shm_perm);
176}
177
178static void shm_rcu_free(struct rcu_head *head)
179{
180 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
181 rcu);
182 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
183 shm_perm);
184 security_shm_free(shp);
185 kvfree(shp);
186}
187
188static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
189{
190 list_del(&s->shm_clist);
191 ipc_rmid(&shm_ids(ns), &s->shm_perm);
192}
193
194
195static int __shm_open(struct vm_area_struct *vma)
196{
197 struct file *file = vma->vm_file;
198 struct shm_file_data *sfd = shm_file_data(file);
199 struct shmid_kernel *shp;
200
201 shp = shm_lock(sfd->ns, sfd->id);
202
203 if (IS_ERR(shp))
204 return PTR_ERR(shp);
205
206 shp->shm_atim = ktime_get_real_seconds();
207 shp->shm_lprid = task_tgid_vnr(current);
208 shp->shm_nattch++;
209 shm_unlock(shp);
210 return 0;
211}
212
213/* This is called by fork, once for every shm attach. */
214static void shm_open(struct vm_area_struct *vma)
215{
216 int err = __shm_open(vma);
217 /*
218 * We raced in the idr lookup or with shm_destroy().
219 * Either way, the ID is busted.
220 */
221 WARN_ON_ONCE(err);
222}
223
224/*
225 * shm_destroy - free the struct shmid_kernel
226 *
227 * @ns: namespace
228 * @shp: struct to free
229 *
230 * It has to be called with shp and shm_ids.rwsem (writer) locked,
231 * but returns with shp unlocked and freed.
232 */
233static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
234{
235 struct file *shm_file;
236
237 shm_file = shp->shm_file;
238 shp->shm_file = NULL;
239 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
240 shm_rmid(ns, shp);
241 shm_unlock(shp);
242 if (!is_file_hugepages(shm_file))
243 shmem_lock(shm_file, 0, shp->mlock_user);
244 else if (shp->mlock_user)
245 user_shm_unlock(i_size_read(file_inode(shm_file)),
246 shp->mlock_user);
247 fput(shm_file);
248 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
249}
250
251/*
252 * shm_may_destroy - identifies whether shm segment should be destroyed now
253 *
254 * Returns true if and only if there are no active users of the segment and
255 * one of the following is true:
256 *
257 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
258 *
259 * 2) sysctl kernel.shm_rmid_forced is set to 1.
260 */
261static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
262{
263 return (shp->shm_nattch == 0) &&
264 (ns->shm_rmid_forced ||
265 (shp->shm_perm.mode & SHM_DEST));
266}
267
268/*
269 * remove the attach descriptor vma.
270 * free memory for segment if it is marked destroyed.
271 * The descriptor has already been removed from the current->mm->mmap list
272 * and will later be kfree()d.
273 */
274static void shm_close(struct vm_area_struct *vma)
275{
276 struct file *file = vma->vm_file;
277 struct shm_file_data *sfd = shm_file_data(file);
278 struct shmid_kernel *shp;
279 struct ipc_namespace *ns = sfd->ns;
280
281 down_write(&shm_ids(ns).rwsem);
282 /* remove from the list of attaches of the shm segment */
283 shp = shm_lock(ns, sfd->id);
284
285 /*
286 * We raced in the idr lookup or with shm_destroy().
287 * Either way, the ID is busted.
288 */
289 if (WARN_ON_ONCE(IS_ERR(shp)))
290 goto done; /* no-op */
291
292 shp->shm_lprid = task_tgid_vnr(current);
293 shp->shm_dtim = ktime_get_real_seconds();
294 shp->shm_nattch--;
295 if (shm_may_destroy(ns, shp))
296 shm_destroy(ns, shp);
297 else
298 shm_unlock(shp);
299done:
300 up_write(&shm_ids(ns).rwsem);
301}
302
303/* Called with ns->shm_ids(ns).rwsem locked */
304static int shm_try_destroy_orphaned(int id, void *p, void *data)
305{
306 struct ipc_namespace *ns = data;
307 struct kern_ipc_perm *ipcp = p;
308 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
309
310 /*
311 * We want to destroy segments without users and with already
312 * exit'ed originating process.
313 *
314 * As shp->* are changed under rwsem, it's safe to skip shp locking.
315 */
316 if (shp->shm_creator != NULL)
317 return 0;
318
319 if (shm_may_destroy(ns, shp)) {
320 shm_lock_by_ptr(shp);
321 shm_destroy(ns, shp);
322 }
323 return 0;
324}
325
326void shm_destroy_orphaned(struct ipc_namespace *ns)
327{
328 down_write(&shm_ids(ns).rwsem);
329 if (shm_ids(ns).in_use)
330 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
331 up_write(&shm_ids(ns).rwsem);
332}
333
334/* Locking assumes this will only be called with task == current */
335void exit_shm(struct task_struct *task)
336{
337 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
338 struct shmid_kernel *shp, *n;
339
340 if (list_empty(&task->sysvshm.shm_clist))
341 return;
342
343 /*
344 * If kernel.shm_rmid_forced is not set then only keep track of
345 * which shmids are orphaned, so that a later set of the sysctl
346 * can clean them up.
347 */
348 if (!ns->shm_rmid_forced) {
349 down_read(&shm_ids(ns).rwsem);
350 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
351 shp->shm_creator = NULL;
352 /*
353 * Only under read lock but we are only called on current
354 * so no entry on the list will be shared.
355 */
356 list_del(&task->sysvshm.shm_clist);
357 up_read(&shm_ids(ns).rwsem);
358 return;
359 }
360
361 /*
362 * Destroy all already created segments, that were not yet mapped,
363 * and mark any mapped as orphan to cover the sysctl toggling.
364 * Destroy is skipped if shm_may_destroy() returns false.
365 */
366 down_write(&shm_ids(ns).rwsem);
367 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
368 shp->shm_creator = NULL;
369
370 if (shm_may_destroy(ns, shp)) {
371 shm_lock_by_ptr(shp);
372 shm_destroy(ns, shp);
373 }
374 }
375
376 /* Remove the list head from any segments still attached. */
377 list_del(&task->sysvshm.shm_clist);
378 up_write(&shm_ids(ns).rwsem);
379}
380
381static int shm_fault(struct vm_fault *vmf)
382{
383 struct file *file = vmf->vma->vm_file;
384 struct shm_file_data *sfd = shm_file_data(file);
385
386 return sfd->vm_ops->fault(vmf);
387}
388
389static int shm_split(struct vm_area_struct *vma, unsigned long addr)
390{
391 struct file *file = vma->vm_file;
392 struct shm_file_data *sfd = shm_file_data(file);
393
394 if (sfd->vm_ops && sfd->vm_ops->split)
395 return sfd->vm_ops->split(vma, addr);
396
397 return 0;
398}
399
400#ifdef CONFIG_NUMA
401static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
402{
403 struct file *file = vma->vm_file;
404 struct shm_file_data *sfd = shm_file_data(file);
405 int err = 0;
406
407 if (sfd->vm_ops->set_policy)
408 err = sfd->vm_ops->set_policy(vma, new);
409 return err;
410}
411
412static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
413 unsigned long addr)
414{
415 struct file *file = vma->vm_file;
416 struct shm_file_data *sfd = shm_file_data(file);
417 struct mempolicy *pol = NULL;
418
419 if (sfd->vm_ops->get_policy)
420 pol = sfd->vm_ops->get_policy(vma, addr);
421 else if (vma->vm_policy)
422 pol = vma->vm_policy;
423
424 return pol;
425}
426#endif
427
428static int shm_mmap(struct file *file, struct vm_area_struct *vma)
429{
430 struct shm_file_data *sfd = shm_file_data(file);
431 int ret;
432
433 /*
434 * In case of remap_file_pages() emulation, the file can represent
435 * removed IPC ID: propogate shm_lock() error to caller.
436 */
437 ret = __shm_open(vma);
438 if (ret)
439 return ret;
440
441 ret = call_mmap(sfd->file, vma);
442 if (ret) {
443 shm_close(vma);
444 return ret;
445 }
446 sfd->vm_ops = vma->vm_ops;
447#ifdef CONFIG_MMU
448 WARN_ON(!sfd->vm_ops->fault);
449#endif
450 vma->vm_ops = &shm_vm_ops;
451 return 0;
452}
453
454static int shm_release(struct inode *ino, struct file *file)
455{
456 struct shm_file_data *sfd = shm_file_data(file);
457
458 put_ipc_ns(sfd->ns);
459 shm_file_data(file) = NULL;
460 kfree(sfd);
461 return 0;
462}
463
464static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
465{
466 struct shm_file_data *sfd = shm_file_data(file);
467
468 if (!sfd->file->f_op->fsync)
469 return -EINVAL;
470 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
471}
472
473static long shm_fallocate(struct file *file, int mode, loff_t offset,
474 loff_t len)
475{
476 struct shm_file_data *sfd = shm_file_data(file);
477
478 if (!sfd->file->f_op->fallocate)
479 return -EOPNOTSUPP;
480 return sfd->file->f_op->fallocate(file, mode, offset, len);
481}
482
483static unsigned long shm_get_unmapped_area(struct file *file,
484 unsigned long addr, unsigned long len, unsigned long pgoff,
485 unsigned long flags)
486{
487 struct shm_file_data *sfd = shm_file_data(file);
488
489 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
490 pgoff, flags);
491}
492
493static const struct file_operations shm_file_operations = {
494 .mmap = shm_mmap,
495 .fsync = shm_fsync,
496 .release = shm_release,
497 .get_unmapped_area = shm_get_unmapped_area,
498 .llseek = noop_llseek,
499 .fallocate = shm_fallocate,
500};
501
502/*
503 * shm_file_operations_huge is now identical to shm_file_operations,
504 * but we keep it distinct for the sake of is_file_shm_hugepages().
505 */
506static const struct file_operations shm_file_operations_huge = {
507 .mmap = shm_mmap,
508 .fsync = shm_fsync,
509 .release = shm_release,
510 .get_unmapped_area = shm_get_unmapped_area,
511 .llseek = noop_llseek,
512 .fallocate = shm_fallocate,
513};
514
515bool is_file_shm_hugepages(struct file *file)
516{
517 return file->f_op == &shm_file_operations_huge;
518}
519
520static const struct vm_operations_struct shm_vm_ops = {
521 .open = shm_open, /* callback for a new vm-area open */
522 .close = shm_close, /* callback for when the vm-area is released */
523 .fault = shm_fault,
524 .split = shm_split,
525#if defined(CONFIG_NUMA)
526 .set_policy = shm_set_policy,
527 .get_policy = shm_get_policy,
528#endif
529};
530
531/**
532 * newseg - Create a new shared memory segment
533 * @ns: namespace
534 * @params: ptr to the structure that contains key, size and shmflg
535 *
536 * Called with shm_ids.rwsem held as a writer.
537 */
538static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
539{
540 key_t key = params->key;
541 int shmflg = params->flg;
542 size_t size = params->u.size;
543 int error;
544 struct shmid_kernel *shp;
545 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
546 struct file *file;
547 char name[13];
548 vm_flags_t acctflag = 0;
549
550 if (size < SHMMIN || size > ns->shm_ctlmax)
551 return -EINVAL;
552
553 if (numpages << PAGE_SHIFT < size)
554 return -ENOSPC;
555
556 if (ns->shm_tot + numpages < ns->shm_tot ||
557 ns->shm_tot + numpages > ns->shm_ctlall)
558 return -ENOSPC;
559
560 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
561 if (unlikely(!shp))
562 return -ENOMEM;
563
564 shp->shm_perm.key = key;
565 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
566 shp->mlock_user = NULL;
567
568 shp->shm_perm.security = NULL;
569 error = security_shm_alloc(shp);
570 if (error) {
571 kvfree(shp);
572 return error;
573 }
574
575 sprintf(name, "SYSV%08x", key);
576 if (shmflg & SHM_HUGETLB) {
577 struct hstate *hs;
578 size_t hugesize;
579
580 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
581 if (!hs) {
582 error = -EINVAL;
583 goto no_file;
584 }
585 hugesize = ALIGN(size, huge_page_size(hs));
586
587 /* hugetlb_file_setup applies strict accounting */
588 if (shmflg & SHM_NORESERVE)
589 acctflag = VM_NORESERVE;
590 file = hugetlb_file_setup(name, hugesize, acctflag,
591 &shp->mlock_user, HUGETLB_SHMFS_INODE,
592 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
593 } else {
594 /*
595 * Do not allow no accounting for OVERCOMMIT_NEVER, even
596 * if it's asked for.
597 */
598 if ((shmflg & SHM_NORESERVE) &&
599 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
600 acctflag = VM_NORESERVE;
601 file = shmem_kernel_file_setup(name, size, acctflag);
602 }
603 error = PTR_ERR(file);
604 if (IS_ERR(file))
605 goto no_file;
606
607 shp->shm_cprid = task_tgid_vnr(current);
608 shp->shm_lprid = 0;
609 shp->shm_atim = shp->shm_dtim = 0;
610 shp->shm_ctim = ktime_get_real_seconds();
611 shp->shm_segsz = size;
612 shp->shm_nattch = 0;
613 shp->shm_file = file;
614 shp->shm_creator = current;
615
616 /* ipc_addid() locks shp upon success. */
617 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
618 if (error < 0)
619 goto no_id;
620
621 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
622
623 /*
624 * shmid gets reported as "inode#" in /proc/pid/maps.
625 * proc-ps tools use this. Changing this will break them.
626 */
627 file_inode(file)->i_ino = shp->shm_perm.id;
628
629 ns->shm_tot += numpages;
630 error = shp->shm_perm.id;
631
632 ipc_unlock_object(&shp->shm_perm);
633 rcu_read_unlock();
634 return error;
635
636no_id:
637 if (is_file_hugepages(file) && shp->mlock_user)
638 user_shm_unlock(size, shp->mlock_user);
639 fput(file);
640no_file:
641 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
642 return error;
643}
644
645/*
646 * Called with shm_ids.rwsem and ipcp locked.
647 */
648static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
649{
650 struct shmid_kernel *shp;
651
652 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
653 return security_shm_associate(shp, shmflg);
654}
655
656/*
657 * Called with shm_ids.rwsem and ipcp locked.
658 */
659static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
660 struct ipc_params *params)
661{
662 struct shmid_kernel *shp;
663
664 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
665 if (shp->shm_segsz < params->u.size)
666 return -EINVAL;
667
668 return 0;
669}
670
671SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
672{
673 struct ipc_namespace *ns;
674 static const struct ipc_ops shm_ops = {
675 .getnew = newseg,
676 .associate = shm_security,
677 .more_checks = shm_more_checks,
678 };
679 struct ipc_params shm_params;
680
681 ns = current->nsproxy->ipc_ns;
682
683 shm_params.key = key;
684 shm_params.flg = shmflg;
685 shm_params.u.size = size;
686
687 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
688}
689
690static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
691{
692 switch (version) {
693 case IPC_64:
694 return copy_to_user(buf, in, sizeof(*in));
695 case IPC_OLD:
696 {
697 struct shmid_ds out;
698
699 memset(&out, 0, sizeof(out));
700 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
701 out.shm_segsz = in->shm_segsz;
702 out.shm_atime = in->shm_atime;
703 out.shm_dtime = in->shm_dtime;
704 out.shm_ctime = in->shm_ctime;
705 out.shm_cpid = in->shm_cpid;
706 out.shm_lpid = in->shm_lpid;
707 out.shm_nattch = in->shm_nattch;
708
709 return copy_to_user(buf, &out, sizeof(out));
710 }
711 default:
712 return -EINVAL;
713 }
714}
715
716static inline unsigned long
717copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
718{
719 switch (version) {
720 case IPC_64:
721 if (copy_from_user(out, buf, sizeof(*out)))
722 return -EFAULT;
723 return 0;
724 case IPC_OLD:
725 {
726 struct shmid_ds tbuf_old;
727
728 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
729 return -EFAULT;
730
731 out->shm_perm.uid = tbuf_old.shm_perm.uid;
732 out->shm_perm.gid = tbuf_old.shm_perm.gid;
733 out->shm_perm.mode = tbuf_old.shm_perm.mode;
734
735 return 0;
736 }
737 default:
738 return -EINVAL;
739 }
740}
741
742static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
743{
744 switch (version) {
745 case IPC_64:
746 return copy_to_user(buf, in, sizeof(*in));
747 case IPC_OLD:
748 {
749 struct shminfo out;
750
751 if (in->shmmax > INT_MAX)
752 out.shmmax = INT_MAX;
753 else
754 out.shmmax = (int)in->shmmax;
755
756 out.shmmin = in->shmmin;
757 out.shmmni = in->shmmni;
758 out.shmseg = in->shmseg;
759 out.shmall = in->shmall;
760
761 return copy_to_user(buf, &out, sizeof(out));
762 }
763 default:
764 return -EINVAL;
765 }
766}
767
768/*
769 * Calculate and add used RSS and swap pages of a shm.
770 * Called with shm_ids.rwsem held as a reader
771 */
772static void shm_add_rss_swap(struct shmid_kernel *shp,
773 unsigned long *rss_add, unsigned long *swp_add)
774{
775 struct inode *inode;
776
777 inode = file_inode(shp->shm_file);
778
779 if (is_file_hugepages(shp->shm_file)) {
780 struct address_space *mapping = inode->i_mapping;
781 struct hstate *h = hstate_file(shp->shm_file);
782 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
783 } else {
784#ifdef CONFIG_SHMEM
785 struct shmem_inode_info *info = SHMEM_I(inode);
786
787 spin_lock_irq(&info->lock);
788 *rss_add += inode->i_mapping->nrpages;
789 *swp_add += info->swapped;
790 spin_unlock_irq(&info->lock);
791#else
792 *rss_add += inode->i_mapping->nrpages;
793#endif
794 }
795}
796
797/*
798 * Called with shm_ids.rwsem held as a reader
799 */
800static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
801 unsigned long *swp)
802{
803 int next_id;
804 int total, in_use;
805
806 *rss = 0;
807 *swp = 0;
808
809 in_use = shm_ids(ns).in_use;
810
811 for (total = 0, next_id = 0; total < in_use; next_id++) {
812 struct kern_ipc_perm *ipc;
813 struct shmid_kernel *shp;
814
815 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
816 if (ipc == NULL)
817 continue;
818 shp = container_of(ipc, struct shmid_kernel, shm_perm);
819
820 shm_add_rss_swap(shp, rss, swp);
821
822 total++;
823 }
824}
825
826/*
827 * This function handles some shmctl commands which require the rwsem
828 * to be held in write mode.
829 * NOTE: no locks must be held, the rwsem is taken inside this function.
830 */
831static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
832 struct shmid64_ds *shmid64)
833{
834 struct kern_ipc_perm *ipcp;
835 struct shmid_kernel *shp;
836 int err;
837
838 down_write(&shm_ids(ns).rwsem);
839 rcu_read_lock();
840
841 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
842 &shmid64->shm_perm, 0);
843 if (IS_ERR(ipcp)) {
844 err = PTR_ERR(ipcp);
845 goto out_unlock1;
846 }
847
848 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
849
850 err = security_shm_shmctl(shp, cmd);
851 if (err)
852 goto out_unlock1;
853
854 switch (cmd) {
855 case IPC_RMID:
856 ipc_lock_object(&shp->shm_perm);
857 /* do_shm_rmid unlocks the ipc object and rcu */
858 do_shm_rmid(ns, ipcp);
859 goto out_up;
860 case IPC_SET:
861 ipc_lock_object(&shp->shm_perm);
862 err = ipc_update_perm(&shmid64->shm_perm, ipcp);
863 if (err)
864 goto out_unlock0;
865 shp->shm_ctim = ktime_get_real_seconds();
866 break;
867 default:
868 err = -EINVAL;
869 goto out_unlock1;
870 }
871
872out_unlock0:
873 ipc_unlock_object(&shp->shm_perm);
874out_unlock1:
875 rcu_read_unlock();
876out_up:
877 up_write(&shm_ids(ns).rwsem);
878 return err;
879}
880
881static int shmctl_ipc_info(struct ipc_namespace *ns,
882 struct shminfo64 *shminfo)
883{
884 int err = security_shm_shmctl(NULL, IPC_INFO);
885 if (!err) {
886 memset(shminfo, 0, sizeof(*shminfo));
887 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
888 shminfo->shmmax = ns->shm_ctlmax;
889 shminfo->shmall = ns->shm_ctlall;
890 shminfo->shmmin = SHMMIN;
891 down_read(&shm_ids(ns).rwsem);
892 err = ipc_get_maxid(&shm_ids(ns));
893 up_read(&shm_ids(ns).rwsem);
894 if (err < 0)
895 err = 0;
896 }
897 return err;
898}
899
900static int shmctl_shm_info(struct ipc_namespace *ns,
901 struct shm_info *shm_info)
902{
903 int err = security_shm_shmctl(NULL, SHM_INFO);
904 if (!err) {
905 memset(shm_info, 0, sizeof(*shm_info));
906 down_read(&shm_ids(ns).rwsem);
907 shm_info->used_ids = shm_ids(ns).in_use;
908 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp);
909 shm_info->shm_tot = ns->shm_tot;
910 shm_info->swap_attempts = 0;
911 shm_info->swap_successes = 0;
912 err = ipc_get_maxid(&shm_ids(ns));
913 up_read(&shm_ids(ns).rwsem);
914 if (err < 0)
915 err = 0;
916 }
917 return err;
918}
919
920static int shmctl_stat(struct ipc_namespace *ns, int shmid,
921 int cmd, struct shmid64_ds *tbuf)
922{
923 struct shmid_kernel *shp;
924 int id = 0;
925 int err;
926
927 memset(tbuf, 0, sizeof(*tbuf));
928
929 rcu_read_lock();
930 if (cmd == SHM_STAT) {
931 shp = shm_obtain_object(ns, shmid);
932 if (IS_ERR(shp)) {
933 err = PTR_ERR(shp);
934 goto out_unlock;
935 }
936 id = shp->shm_perm.id;
937 } else {
938 shp = shm_obtain_object_check(ns, shmid);
939 if (IS_ERR(shp)) {
940 err = PTR_ERR(shp);
941 goto out_unlock;
942 }
943 }
944
945 err = -EACCES;
946 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
947 goto out_unlock;
948
949 err = security_shm_shmctl(shp, cmd);
950 if (err)
951 goto out_unlock;
952
953 ipc_lock_object(&shp->shm_perm);
954
955 if (!ipc_valid_object(&shp->shm_perm)) {
956 ipc_unlock_object(&shp->shm_perm);
957 err = -EIDRM;
958 goto out_unlock;
959 }
960
961 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
962 tbuf->shm_segsz = shp->shm_segsz;
963 tbuf->shm_atime = shp->shm_atim;
964 tbuf->shm_dtime = shp->shm_dtim;
965 tbuf->shm_ctime = shp->shm_ctim;
966 tbuf->shm_cpid = shp->shm_cprid;
967 tbuf->shm_lpid = shp->shm_lprid;
968 tbuf->shm_nattch = shp->shm_nattch;
969
970 ipc_unlock_object(&shp->shm_perm);
971 rcu_read_unlock();
972 return id;
973
974out_unlock:
975 rcu_read_unlock();
976 return err;
977}
978
979static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
980{
981 struct shmid_kernel *shp;
982 struct file *shm_file;
983 int err;
984
985 rcu_read_lock();
986 shp = shm_obtain_object_check(ns, shmid);
987 if (IS_ERR(shp)) {
988 err = PTR_ERR(shp);
989 goto out_unlock1;
990 }
991
992 audit_ipc_obj(&(shp->shm_perm));
993 err = security_shm_shmctl(shp, cmd);
994 if (err)
995 goto out_unlock1;
996
997 ipc_lock_object(&shp->shm_perm);
998
999 /* check if shm_destroy() is tearing down shp */
1000 if (!ipc_valid_object(&shp->shm_perm)) {
1001 err = -EIDRM;
1002 goto out_unlock0;
1003 }
1004
1005 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
1006 kuid_t euid = current_euid();
1007
1008 if (!uid_eq(euid, shp->shm_perm.uid) &&
1009 !uid_eq(euid, shp->shm_perm.cuid)) {
1010 err = -EPERM;
1011 goto out_unlock0;
1012 }
1013 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
1014 err = -EPERM;
1015 goto out_unlock0;
1016 }
1017 }
1018
1019 shm_file = shp->shm_file;
1020 if (is_file_hugepages(shm_file))
1021 goto out_unlock0;
1022
1023 if (cmd == SHM_LOCK) {
1024 struct user_struct *user = current_user();
1025
1026 err = shmem_lock(shm_file, 1, user);
1027 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1028 shp->shm_perm.mode |= SHM_LOCKED;
1029 shp->mlock_user = user;
1030 }
1031 goto out_unlock0;
1032 }
1033
1034 /* SHM_UNLOCK */
1035 if (!(shp->shm_perm.mode & SHM_LOCKED))
1036 goto out_unlock0;
1037 shmem_lock(shm_file, 0, shp->mlock_user);
1038 shp->shm_perm.mode &= ~SHM_LOCKED;
1039 shp->mlock_user = NULL;
1040 get_file(shm_file);
1041 ipc_unlock_object(&shp->shm_perm);
1042 rcu_read_unlock();
1043 shmem_unlock_mapping(shm_file->f_mapping);
1044
1045 fput(shm_file);
1046 return err;
1047
1048out_unlock0:
1049 ipc_unlock_object(&shp->shm_perm);
1050out_unlock1:
1051 rcu_read_unlock();
1052 return err;
1053}
1054
1055SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
1056{
1057 int err, version;
1058 struct ipc_namespace *ns;
1059 struct shmid64_ds sem64;
1060
1061 if (cmd < 0 || shmid < 0)
1062 return -EINVAL;
1063
1064 version = ipc_parse_version(&cmd);
1065 ns = current->nsproxy->ipc_ns;
1066
1067 switch (cmd) {
1068 case IPC_INFO: {
1069 struct shminfo64 shminfo;
1070 err = shmctl_ipc_info(ns, &shminfo);
1071 if (err < 0)
1072 return err;
1073 if (copy_shminfo_to_user(buf, &shminfo, version))
1074 err = -EFAULT;
1075 return err;
1076 }
1077 case SHM_INFO: {
1078 struct shm_info shm_info;
1079 err = shmctl_shm_info(ns, &shm_info);
1080 if (err < 0)
1081 return err;
1082 if (copy_to_user(buf, &shm_info, sizeof(shm_info)))
1083 err = -EFAULT;
1084 return err;
1085 }
1086 case SHM_STAT:
1087 case IPC_STAT: {
1088 err = shmctl_stat(ns, shmid, cmd, &sem64);
1089 if (err < 0)
1090 return err;
1091 if (copy_shmid_to_user(buf, &sem64, version))
1092 err = -EFAULT;
1093 return err;
1094 }
1095 case IPC_SET:
1096 if (copy_shmid_from_user(&sem64, buf, version))
1097 return -EFAULT;
1098 /* fallthru */
1099 case IPC_RMID:
1100 return shmctl_down(ns, shmid, cmd, &sem64);
1101 case SHM_LOCK:
1102 case SHM_UNLOCK:
1103 return shmctl_do_lock(ns, shmid, cmd);
1104 default:
1105 return -EINVAL;
1106 }
1107}
1108
1109#ifdef CONFIG_COMPAT
1110
1111struct compat_shmid_ds {
1112 struct compat_ipc_perm shm_perm;
1113 int shm_segsz;
1114 compat_time_t shm_atime;
1115 compat_time_t shm_dtime;
1116 compat_time_t shm_ctime;
1117 compat_ipc_pid_t shm_cpid;
1118 compat_ipc_pid_t shm_lpid;
1119 unsigned short shm_nattch;
1120 unsigned short shm_unused;
1121 compat_uptr_t shm_unused2;
1122 compat_uptr_t shm_unused3;
1123};
1124
1125struct compat_shminfo64 {
1126 compat_ulong_t shmmax;
1127 compat_ulong_t shmmin;
1128 compat_ulong_t shmmni;
1129 compat_ulong_t shmseg;
1130 compat_ulong_t shmall;
1131 compat_ulong_t __unused1;
1132 compat_ulong_t __unused2;
1133 compat_ulong_t __unused3;
1134 compat_ulong_t __unused4;
1135};
1136
1137struct compat_shm_info {
1138 compat_int_t used_ids;
1139 compat_ulong_t shm_tot, shm_rss, shm_swp;
1140 compat_ulong_t swap_attempts, swap_successes;
1141};
1142
1143static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in,
1144 int version)
1145{
1146 if (in->shmmax > INT_MAX)
1147 in->shmmax = INT_MAX;
1148 if (version == IPC_64) {
1149 struct compat_shminfo64 info;
1150 memset(&info, 0, sizeof(info));
1151 info.shmmax = in->shmmax;
1152 info.shmmin = in->shmmin;
1153 info.shmmni = in->shmmni;
1154 info.shmseg = in->shmseg;
1155 info.shmall = in->shmall;
1156 return copy_to_user(buf, &info, sizeof(info));
1157 } else {
1158 struct shminfo info;
1159 memset(&info, 0, sizeof(info));
1160 info.shmmax = in->shmmax;
1161 info.shmmin = in->shmmin;
1162 info.shmmni = in->shmmni;
1163 info.shmseg = in->shmseg;
1164 info.shmall = in->shmall;
1165 return copy_to_user(buf, &info, sizeof(info));
1166 }
1167}
1168
1169static int put_compat_shm_info(struct shm_info *ip,
1170 struct compat_shm_info __user *uip)
1171{
1172 struct compat_shm_info info;
1173
1174 memset(&info, 0, sizeof(info));
1175 info.used_ids = ip->used_ids;
1176 info.shm_tot = ip->shm_tot;
1177 info.shm_rss = ip->shm_rss;
1178 info.shm_swp = ip->shm_swp;
1179 info.swap_attempts = ip->swap_attempts;
1180 info.swap_successes = ip->swap_successes;
1181 return copy_to_user(uip, &info, sizeof(info));
1182}
1183
1184static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
1185 int version)
1186{
1187 if (version == IPC_64) {
1188 struct compat_shmid64_ds v;
1189 memset(&v, 0, sizeof(v));
1190 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm);
1191 v.shm_atime = in->shm_atime;
1192 v.shm_dtime = in->shm_dtime;
1193 v.shm_ctime = in->shm_ctime;
1194 v.shm_segsz = in->shm_segsz;
1195 v.shm_nattch = in->shm_nattch;
1196 v.shm_cpid = in->shm_cpid;
1197 v.shm_lpid = in->shm_lpid;
1198 return copy_to_user(buf, &v, sizeof(v));
1199 } else {
1200 struct compat_shmid_ds v;
1201 memset(&v, 0, sizeof(v));
1202 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm);
1203 v.shm_perm.key = in->shm_perm.key;
1204 v.shm_atime = in->shm_atime;
1205 v.shm_dtime = in->shm_dtime;
1206 v.shm_ctime = in->shm_ctime;
1207 v.shm_segsz = in->shm_segsz;
1208 v.shm_nattch = in->shm_nattch;
1209 v.shm_cpid = in->shm_cpid;
1210 v.shm_lpid = in->shm_lpid;
1211 return copy_to_user(buf, &v, sizeof(v));
1212 }
1213}
1214
1215static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf,
1216 int version)
1217{
1218 memset(out, 0, sizeof(*out));
1219 if (version == IPC_64) {
1220 struct compat_shmid64_ds __user *p = buf;
1221 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm);
1222 } else {
1223 struct compat_shmid_ds __user *p = buf;
1224 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm);
1225 }
1226}
1227
1228COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr)
1229{
1230 struct ipc_namespace *ns;
1231 struct shmid64_ds sem64;
1232 int version = compat_ipc_parse_version(&cmd);
1233 int err;
1234
1235 ns = current->nsproxy->ipc_ns;
1236
1237 if (cmd < 0 || shmid < 0)
1238 return -EINVAL;
1239
1240 switch (cmd) {
1241 case IPC_INFO: {
1242 struct shminfo64 shminfo;
1243 err = shmctl_ipc_info(ns, &shminfo);
1244 if (err < 0)
1245 return err;
1246 if (copy_compat_shminfo_to_user(uptr, &shminfo, version))
1247 err = -EFAULT;
1248 return err;
1249 }
1250 case SHM_INFO: {
1251 struct shm_info shm_info;
1252 err = shmctl_shm_info(ns, &shm_info);
1253 if (err < 0)
1254 return err;
1255 if (put_compat_shm_info(&shm_info, uptr))
1256 err = -EFAULT;
1257 return err;
1258 }
1259 case IPC_STAT:
1260 case SHM_STAT:
1261 err = shmctl_stat(ns, shmid, cmd, &sem64);
1262 if (err < 0)
1263 return err;
1264 if (copy_compat_shmid_to_user(uptr, &sem64, version))
1265 err = -EFAULT;
1266 return err;
1267
1268 case IPC_SET:
1269 if (copy_compat_shmid_from_user(&sem64, uptr, version))
1270 return -EFAULT;
1271 /* fallthru */
1272 case IPC_RMID:
1273 return shmctl_down(ns, shmid, cmd, &sem64);
1274 case SHM_LOCK:
1275 case SHM_UNLOCK:
1276 return shmctl_do_lock(ns, shmid, cmd);
1277 break;
1278 default:
1279 return -EINVAL;
1280 }
1281 return err;
1282}
1283#endif
1284
1285/*
1286 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1287 *
1288 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1289 * "raddr" thing points to kernel space, and there has to be a wrapper around
1290 * this.
1291 */
1292long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1293 ulong *raddr, unsigned long shmlba)
1294{
1295 struct shmid_kernel *shp;
1296 unsigned long addr = (unsigned long)shmaddr;
1297 unsigned long size;
1298 struct file *file;
1299 int err;
1300 unsigned long flags = MAP_SHARED;
1301 unsigned long prot;
1302 int acc_mode;
1303 struct ipc_namespace *ns;
1304 struct shm_file_data *sfd;
1305 struct path path;
1306 fmode_t f_mode;
1307 unsigned long populate = 0;
1308
1309 err = -EINVAL;
1310 if (shmid < 0)
1311 goto out;
1312
1313 if (addr) {
1314 if (addr & (shmlba - 1)) {
1315 /*
1316 * Round down to the nearest multiple of shmlba.
1317 * For sane do_mmap_pgoff() parameters, avoid
1318 * round downs that trigger nil-page and MAP_FIXED.
1319 */
1320 if ((shmflg & SHM_RND) && addr >= shmlba)
1321 addr &= ~(shmlba - 1);
1322 else
1323#ifndef __ARCH_FORCE_SHMLBA
1324 if (addr & ~PAGE_MASK)
1325#endif
1326 goto out;
1327 }
1328
1329 flags |= MAP_FIXED;
1330 } else if ((shmflg & SHM_REMAP))
1331 goto out;
1332
1333 if (shmflg & SHM_RDONLY) {
1334 prot = PROT_READ;
1335 acc_mode = S_IRUGO;
1336 f_mode = FMODE_READ;
1337 } else {
1338 prot = PROT_READ | PROT_WRITE;
1339 acc_mode = S_IRUGO | S_IWUGO;
1340 f_mode = FMODE_READ | FMODE_WRITE;
1341 }
1342 if (shmflg & SHM_EXEC) {
1343 prot |= PROT_EXEC;
1344 acc_mode |= S_IXUGO;
1345 }
1346
1347 /*
1348 * We cannot rely on the fs check since SYSV IPC does have an
1349 * additional creator id...
1350 */
1351 ns = current->nsproxy->ipc_ns;
1352 rcu_read_lock();
1353 shp = shm_obtain_object_check(ns, shmid);
1354 if (IS_ERR(shp)) {
1355 err = PTR_ERR(shp);
1356 goto out_unlock;
1357 }
1358
1359 err = -EACCES;
1360 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1361 goto out_unlock;
1362
1363 err = security_shm_shmat(shp, shmaddr, shmflg);
1364 if (err)
1365 goto out_unlock;
1366
1367 ipc_lock_object(&shp->shm_perm);
1368
1369 /* check if shm_destroy() is tearing down shp */
1370 if (!ipc_valid_object(&shp->shm_perm)) {
1371 ipc_unlock_object(&shp->shm_perm);
1372 err = -EIDRM;
1373 goto out_unlock;
1374 }
1375
1376 path = shp->shm_file->f_path;
1377 path_get(&path);
1378 shp->shm_nattch++;
1379 size = i_size_read(d_inode(path.dentry));
1380 ipc_unlock_object(&shp->shm_perm);
1381 rcu_read_unlock();
1382
1383 err = -ENOMEM;
1384 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1385 if (!sfd) {
1386 path_put(&path);
1387 goto out_nattch;
1388 }
1389
1390 file = alloc_file(&path, f_mode,
1391 is_file_hugepages(shp->shm_file) ?
1392 &shm_file_operations_huge :
1393 &shm_file_operations);
1394 err = PTR_ERR(file);
1395 if (IS_ERR(file)) {
1396 kfree(sfd);
1397 path_put(&path);
1398 goto out_nattch;
1399 }
1400
1401 file->private_data = sfd;
1402 file->f_mapping = shp->shm_file->f_mapping;
1403 sfd->id = shp->shm_perm.id;
1404 sfd->ns = get_ipc_ns(ns);
1405 sfd->file = shp->shm_file;
1406 sfd->vm_ops = NULL;
1407
1408 err = security_mmap_file(file, prot, flags);
1409 if (err)
1410 goto out_fput;
1411
1412 if (down_write_killable(¤t->mm->mmap_sem)) {
1413 err = -EINTR;
1414 goto out_fput;
1415 }
1416
1417 if (addr && !(shmflg & SHM_REMAP)) {
1418 err = -EINVAL;
1419 if (addr + size < addr)
1420 goto invalid;
1421
1422 if (find_vma_intersection(current->mm, addr, addr + size))
1423 goto invalid;
1424 }
1425
1426 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL);
1427 *raddr = addr;
1428 err = 0;
1429 if (IS_ERR_VALUE(addr))
1430 err = (long)addr;
1431invalid:
1432 up_write(¤t->mm->mmap_sem);
1433 if (populate)
1434 mm_populate(addr, populate);
1435
1436out_fput:
1437 fput(file);
1438
1439out_nattch:
1440 down_write(&shm_ids(ns).rwsem);
1441 shp = shm_lock(ns, shmid);
1442 shp->shm_nattch--;
1443 if (shm_may_destroy(ns, shp))
1444 shm_destroy(ns, shp);
1445 else
1446 shm_unlock(shp);
1447 up_write(&shm_ids(ns).rwsem);
1448 return err;
1449
1450out_unlock:
1451 rcu_read_unlock();
1452out:
1453 return err;
1454}
1455
1456SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1457{
1458 unsigned long ret;
1459 long err;
1460
1461 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1462 if (err)
1463 return err;
1464 force_successful_syscall_return();
1465 return (long)ret;
1466}
1467
1468#ifdef CONFIG_COMPAT
1469
1470#ifndef COMPAT_SHMLBA
1471#define COMPAT_SHMLBA SHMLBA
1472#endif
1473
1474COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg)
1475{
1476 unsigned long ret;
1477 long err;
1478
1479 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
1480 if (err)
1481 return err;
1482 force_successful_syscall_return();
1483 return (long)ret;
1484}
1485#endif
1486
1487/*
1488 * detach and kill segment if marked destroyed.
1489 * The work is done in shm_close.
1490 */
1491SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1492{
1493 struct mm_struct *mm = current->mm;
1494 struct vm_area_struct *vma;
1495 unsigned long addr = (unsigned long)shmaddr;
1496 int retval = -EINVAL;
1497#ifdef CONFIG_MMU
1498 loff_t size = 0;
1499 struct file *file;
1500 struct vm_area_struct *next;
1501#endif
1502
1503 if (addr & ~PAGE_MASK)
1504 return retval;
1505
1506 if (down_write_killable(&mm->mmap_sem))
1507 return -EINTR;
1508
1509 /*
1510 * This function tries to be smart and unmap shm segments that
1511 * were modified by partial mlock or munmap calls:
1512 * - It first determines the size of the shm segment that should be
1513 * unmapped: It searches for a vma that is backed by shm and that
1514 * started at address shmaddr. It records it's size and then unmaps
1515 * it.
1516 * - Then it unmaps all shm vmas that started at shmaddr and that
1517 * are within the initially determined size and that are from the
1518 * same shm segment from which we determined the size.
1519 * Errors from do_munmap are ignored: the function only fails if
1520 * it's called with invalid parameters or if it's called to unmap
1521 * a part of a vma. Both calls in this function are for full vmas,
1522 * the parameters are directly copied from the vma itself and always
1523 * valid - therefore do_munmap cannot fail. (famous last words?)
1524 */
1525 /*
1526 * If it had been mremap()'d, the starting address would not
1527 * match the usual checks anyway. So assume all vma's are
1528 * above the starting address given.
1529 */
1530 vma = find_vma(mm, addr);
1531
1532#ifdef CONFIG_MMU
1533 while (vma) {
1534 next = vma->vm_next;
1535
1536 /*
1537 * Check if the starting address would match, i.e. it's
1538 * a fragment created by mprotect() and/or munmap(), or it
1539 * otherwise it starts at this address with no hassles.
1540 */
1541 if ((vma->vm_ops == &shm_vm_ops) &&
1542 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1543
1544 /*
1545 * Record the file of the shm segment being
1546 * unmapped. With mremap(), someone could place
1547 * page from another segment but with equal offsets
1548 * in the range we are unmapping.
1549 */
1550 file = vma->vm_file;
1551 size = i_size_read(file_inode(vma->vm_file));
1552 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1553 /*
1554 * We discovered the size of the shm segment, so
1555 * break out of here and fall through to the next
1556 * loop that uses the size information to stop
1557 * searching for matching vma's.
1558 */
1559 retval = 0;
1560 vma = next;
1561 break;
1562 }
1563 vma = next;
1564 }
1565
1566 /*
1567 * We need look no further than the maximum address a fragment
1568 * could possibly have landed at. Also cast things to loff_t to
1569 * prevent overflows and make comparisons vs. equal-width types.
1570 */
1571 size = PAGE_ALIGN(size);
1572 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1573 next = vma->vm_next;
1574
1575 /* finding a matching vma now does not alter retval */
1576 if ((vma->vm_ops == &shm_vm_ops) &&
1577 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
1578 (vma->vm_file == file))
1579 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1580 vma = next;
1581 }
1582
1583#else /* CONFIG_MMU */
1584 /* under NOMMU conditions, the exact address to be destroyed must be
1585 * given
1586 */
1587 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1588 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
1589 retval = 0;
1590 }
1591
1592#endif
1593
1594 up_write(&mm->mmap_sem);
1595 return retval;
1596}
1597
1598#ifdef CONFIG_PROC_FS
1599static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1600{
1601 struct user_namespace *user_ns = seq_user_ns(s);
1602 struct kern_ipc_perm *ipcp = it;
1603 struct shmid_kernel *shp;
1604 unsigned long rss = 0, swp = 0;
1605
1606 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1607 shm_add_rss_swap(shp, &rss, &swp);
1608
1609#if BITS_PER_LONG <= 32
1610#define SIZE_SPEC "%10lu"
1611#else
1612#define SIZE_SPEC "%21lu"
1613#endif
1614
1615 seq_printf(s,
1616 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1617 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu "
1618 SIZE_SPEC " " SIZE_SPEC "\n",
1619 shp->shm_perm.key,
1620 shp->shm_perm.id,
1621 shp->shm_perm.mode,
1622 shp->shm_segsz,
1623 shp->shm_cprid,
1624 shp->shm_lprid,
1625 shp->shm_nattch,
1626 from_kuid_munged(user_ns, shp->shm_perm.uid),
1627 from_kgid_munged(user_ns, shp->shm_perm.gid),
1628 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1629 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1630 shp->shm_atim,
1631 shp->shm_dtim,
1632 shp->shm_ctim,
1633 rss * PAGE_SIZE,
1634 swp * PAGE_SIZE);
1635
1636 return 0;
1637}
1638#endif