+15
-7
fs/aio.c
+15
-7
fs/aio.c
···
278
278
return 0;
279
279
}
280
280
281
-
static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
281
+
static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
282
282
{
283
283
struct mm_struct *mm = vma->vm_mm;
284
284
struct kioctx_table *table;
285
-
int i;
285
+
int i, res = -EINVAL;
286
286
287
287
spin_lock(&mm->ioctx_lock);
288
288
rcu_read_lock();
···
292
292
293
293
ctx = table->table[i];
294
294
if (ctx && ctx->aio_ring_file == file) {
295
-
ctx->user_id = ctx->mmap_base = vma->vm_start;
295
+
if (!atomic_read(&ctx->dead)) {
296
+
ctx->user_id = ctx->mmap_base = vma->vm_start;
297
+
res = 0;
298
+
}
296
299
break;
297
300
}
298
301
}
299
302
300
303
rcu_read_unlock();
301
304
spin_unlock(&mm->ioctx_lock);
305
+
return res;
302
306
}
303
307
304
308
static const struct file_operations aio_ring_fops = {
···
731
727
err_cleanup:
732
728
aio_nr_sub(ctx->max_reqs);
733
729
err_ctx:
730
+
atomic_set(&ctx->dead, 1);
731
+
if (ctx->mmap_size)
732
+
vm_munmap(ctx->mmap_base, ctx->mmap_size);
734
733
aio_free_ring(ctx);
735
734
err:
736
735
mutex_unlock(&ctx->ring_lock);
···
755
748
{
756
749
struct kioctx_table *table;
757
750
758
-
if (atomic_xchg(&ctx->dead, 1))
759
-
return -EINVAL;
760
-
761
-
762
751
spin_lock(&mm->ioctx_lock);
752
+
if (atomic_xchg(&ctx->dead, 1)) {
753
+
spin_unlock(&mm->ioctx_lock);
754
+
return -EINVAL;
755
+
}
756
+
763
757
table = rcu_dereference_raw(mm->ioctx_table);
764
758
WARN_ON(ctx != table->table[ctx->id]);
765
759
table->table[ctx->id] = NULL;
+11
-6
fs/ocfs2/file.c
+11
-6
fs/ocfs2/file.c
···
2394
2394
/*
2395
2395
* for completing the rest of the request.
2396
2396
*/
2397
-
*ppos += written;
2398
2397
count -= written;
2399
2398
written_buffered = generic_perform_write(file, from, *ppos);
2400
2399
/*
···
2408
2409
goto out_dio;
2409
2410
}
2410
2411
2411
-
iocb->ki_pos = *ppos + written_buffered;
2412
2412
/* We need to ensure that the page cache pages are written to
2413
2413
* disk and invalidated to preserve the expected O_DIRECT
2414
2414
* semantics.
···
2416
2418
ret = filemap_write_and_wait_range(file->f_mapping, *ppos,
2417
2419
endbyte);
2418
2420
if (ret == 0) {
2421
+
iocb->ki_pos = *ppos + written_buffered;
2419
2422
written += written_buffered;
2420
2423
invalidate_mapping_pages(mapping,
2421
2424
*ppos >> PAGE_CACHE_SHIFT,
···
2439
2440
/* buffered aio wouldn't have proper lock coverage today */
2440
2441
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2441
2442
2443
+
if (unlikely(written <= 0))
2444
+
goto no_sync;
2445
+
2442
2446
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2443
2447
((file->f_flags & O_DIRECT) && !direct_io)) {
2444
-
ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
2445
-
*ppos + count - 1);
2448
+
ret = filemap_fdatawrite_range(file->f_mapping,
2449
+
iocb->ki_pos - written,
2450
+
iocb->ki_pos - 1);
2446
2451
if (ret < 0)
2447
2452
written = ret;
2448
2453
···
2457
2454
}
2458
2455
2459
2456
if (!ret)
2460
-
ret = filemap_fdatawait_range(file->f_mapping, *ppos,
2461
-
*ppos + count - 1);
2457
+
ret = filemap_fdatawait_range(file->f_mapping,
2458
+
iocb->ki_pos - written,
2459
+
iocb->ki_pos - 1);
2462
2460
}
2463
2461
2462
+
no_sync:
2464
2463
/*
2465
2464
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2466
2465
* function pointer which is called when o_direct io completes so that
+1
-1
include/linux/fs.h
+1
-1
include/linux/fs.h
···
1549
1549
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1550
1550
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1551
1551
int (*mmap) (struct file *, struct vm_area_struct *);
1552
-
void (*mremap)(struct file *, struct vm_area_struct *);
1552
+
int (*mremap)(struct file *, struct vm_area_struct *);
1553
1553
int (*open) (struct inode *, struct file *);
1554
1554
int (*flush) (struct file *, fl_owner_t id);
1555
1555
int (*release) (struct inode *, struct file *);
+8
-2
mm/mremap.c
+8
-2
mm/mremap.c
···
286
286
old_len = new_len;
287
287
old_addr = new_addr;
288
288
new_addr = -ENOMEM;
289
-
} else if (vma->vm_file && vma->vm_file->f_op->mremap)
290
-
vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
289
+
} else if (vma->vm_file && vma->vm_file->f_op->mremap) {
290
+
err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
291
+
if (err < 0) {
292
+
move_page_tables(new_vma, new_addr, vma, old_addr,
293
+
moved_len, true);
294
+
return err;
295
+
}
296
+
}
291
297
292
298
/* Conceal VM_ACCOUNT so old reservation is not undone */
293
299
if (vm_flags & VM_ACCOUNT) {