Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-3.8-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull Xen fixes from Konrad Rzeszutek Wilk:
- CVE-2013-0190/XSA-40 (or stack corruption for 32-bit PV kernels)
- Fix racy vma access spotted by Al Viro
- Fix mmap batch ioctl potentially resulting in large O(n) page allcations.
- Fix vcpu online/offline BUG:scheduling while atomic..
- Fix unbound buffer scanning for more than 32 vCPUs.
- Fix grant table being incorrectly initialized
- Fix incorrect check in pciback
- Allow privcmd in backend domains.

Fix up whitespace conflict due to ugly merge resolution in Xen tree in
arch/arm/xen/enlighten.c

* tag 'stable/for-linus-3.8-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen: Fix stack corruption in xen_failsafe_callback for 32bit PVOPS guests.
Revert "xen/smp: Fix CPU online/offline bug triggering a BUG: scheduling while atomic."
xen/gntdev: remove erronous use of copy_to_user
xen/gntdev: correctly unmap unlinked maps in mmu notifier
xen/gntdev: fix unsafe vma access
xen/privcmd: Fix mmap batch ioctl.
Xen: properly bound buffer access when parsing cpu/*/availability
xen/grant-table: correctly initialize grant table version 1
x86/xen : Fix the wrong check in pciback
xen/privcmd: Relax access control in privcmd_ioctl_mmap

+168 -115
-1
arch/x86/kernel/entry_32.S
··· 1065 1065 lea 16(%esp),%esp 1066 1066 CFI_ADJUST_CFA_OFFSET -16 1067 1067 jz 5f 1068 - addl $16,%esp 1069 1068 jmp iret_exc 1070 1069 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 1071 1070 SAVE_ALL
-7
arch/x86/xen/smp.c
··· 432 432 play_dead_common(); 433 433 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 434 434 cpu_bringup(); 435 - /* 436 - * Balance out the preempt calls - as we are running in cpu_idle 437 - * loop which has been called at bootup from cpu_bringup_and_idle. 438 - * The cpucpu_bringup_and_idle called cpu_bringup which made a 439 - * preempt_disable() So this preempt_enable will balance it out. 440 - */ 441 - preempt_enable(); 442 435 } 443 436 444 437 #else /* !CONFIG_HOTPLUG_CPU */
+2 -2
drivers/xen/cpu_hotplug.c
··· 25 25 static int vcpu_online(unsigned int cpu) 26 26 { 27 27 int err; 28 - char dir[32], state[32]; 28 + char dir[16], state[16]; 29 29 30 30 sprintf(dir, "cpu/%u", cpu); 31 - err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); 31 + err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state); 32 32 if (err != 1) { 33 33 if (!xen_initial_domain()) 34 34 printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
+88 -42
drivers/xen/gntdev.c
··· 56 56 static atomic_t pages_mapped = ATOMIC_INIT(0); 57 57 58 58 static int use_ptemod; 59 + #define populate_freeable_maps use_ptemod 59 60 60 61 struct gntdev_priv { 62 + /* maps with visible offsets in the file descriptor */ 61 63 struct list_head maps; 62 - /* lock protects maps from concurrent changes */ 64 + /* maps that are not visible; will be freed on munmap. 65 + * Only populated if populate_freeable_maps == 1 */ 66 + struct list_head freeable_maps; 67 + /* lock protects maps and freeable_maps */ 63 68 spinlock_t lock; 64 69 struct mm_struct *mm; 65 70 struct mmu_notifier mn; ··· 198 193 return NULL; 199 194 } 200 195 201 - static void gntdev_put_map(struct grant_map *map) 196 + static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) 202 197 { 203 198 if (!map) 204 199 return; ··· 211 206 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { 212 207 notify_remote_via_evtchn(map->notify.event); 213 208 evtchn_put(map->notify.event); 209 + } 210 + 211 + if (populate_freeable_maps && priv) { 212 + spin_lock(&priv->lock); 213 + list_del(&map->next); 214 + spin_unlock(&priv->lock); 214 215 } 215 216 216 217 if (map->pages && !use_ptemod) ··· 312 301 313 302 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 314 303 int pgno = (map->notify.addr >> PAGE_SHIFT); 315 - if (pgno >= offset && pgno < offset + pages && use_ptemod) { 316 - void __user *tmp = (void __user *) 317 - map->vma->vm_start + map->notify.addr; 318 - err = copy_to_user(tmp, &err, 1); 319 - if (err) 320 - return -EFAULT; 321 - map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 322 - } else if (pgno >= offset && pgno < offset + pages) { 323 - uint8_t *tmp = kmap(map->pages[pgno]); 304 + if (pgno >= offset && pgno < offset + pages) { 305 + /* No need for kmap, pages are in lowmem */ 306 + uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); 324 307 tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; 325 - kunmap(map->pages[pgno]); 326 308 map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; 327 309 } 328 310 } ··· 380 376 static void gntdev_vma_close(struct vm_area_struct *vma) 381 377 { 382 378 struct grant_map *map = vma->vm_private_data; 379 + struct file *file = vma->vm_file; 380 + struct gntdev_priv *priv = file->private_data; 383 381 384 382 pr_debug("gntdev_vma_close %p\n", vma); 385 - map->vma = NULL; 383 + if (use_ptemod) { 384 + /* It is possible that an mmu notifier could be running 385 + * concurrently, so take priv->lock to ensure that the vma won't 386 + * vanishing during the unmap_grant_pages call, since we will 387 + * spin here until that completes. Such a concurrent call will 388 + * not do any unmapping, since that has been done prior to 389 + * closing the vma, but it may still iterate the unmap_ops list. 390 + */ 391 + spin_lock(&priv->lock); 392 + map->vma = NULL; 393 + spin_unlock(&priv->lock); 394 + } 386 395 vma->vm_private_data = NULL; 387 - gntdev_put_map(map); 396 + gntdev_put_map(priv, map); 388 397 } 389 398 390 399 static struct vm_operations_struct gntdev_vmops = { ··· 407 390 408 391 /* ------------------------------------------------------------------ */ 409 392 393 + static void unmap_if_in_range(struct grant_map *map, 394 + unsigned long start, unsigned long end) 395 + { 396 + unsigned long mstart, mend; 397 + int err; 398 + 399 + if (!map->vma) 400 + return; 401 + if (map->vma->vm_start >= end) 402 + return; 403 + if (map->vma->vm_end <= start) 404 + return; 405 + mstart = max(start, map->vma->vm_start); 406 + mend = min(end, map->vma->vm_end); 407 + pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 408 + map->index, map->count, 409 + map->vma->vm_start, map->vma->vm_end, 410 + start, end, mstart, mend); 411 + err = unmap_grant_pages(map, 412 + (mstart - map->vma->vm_start) >> PAGE_SHIFT, 413 + (mend - mstart) >> PAGE_SHIFT); 414 + WARN_ON(err); 415 + } 416 + 410 417 static void mn_invl_range_start(struct mmu_notifier *mn, 411 418 struct mm_struct *mm, 412 419 unsigned long start, unsigned long end) 413 420 { 414 421 struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); 415 422 struct grant_map *map; 416 - unsigned long mstart, mend; 417 - int err; 418 423 419 424 spin_lock(&priv->lock); 420 425 list_for_each_entry(map, &priv->maps, next) { 421 - if (!map->vma) 422 - continue; 423 - if (map->vma->vm_start >= end) 424 - continue; 425 - if (map->vma->vm_end <= start) 426 - continue; 427 - mstart = max(start, map->vma->vm_start); 428 - mend = min(end, map->vma->vm_end); 429 - pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 430 - map->index, map->count, 431 - map->vma->vm_start, map->vma->vm_end, 432 - start, end, mstart, mend); 433 - err = unmap_grant_pages(map, 434 - (mstart - map->vma->vm_start) >> PAGE_SHIFT, 435 - (mend - mstart) >> PAGE_SHIFT); 436 - WARN_ON(err); 426 + unmap_if_in_range(map, start, end); 427 + } 428 + list_for_each_entry(map, &priv->freeable_maps, next) { 429 + unmap_if_in_range(map, start, end); 437 430 } 438 431 spin_unlock(&priv->lock); 439 432 } ··· 464 437 465 438 spin_lock(&priv->lock); 466 439 list_for_each_entry(map, &priv->maps, next) { 440 + if (!map->vma) 441 + continue; 442 + pr_debug("map %d+%d (%lx %lx)\n", 443 + map->index, map->count, 444 + map->vma->vm_start, map->vma->vm_end); 445 + err = unmap_grant_pages(map, /* offset */ 0, map->count); 446 + WARN_ON(err); 447 + } 448 + list_for_each_entry(map, &priv->freeable_maps, next) { 467 449 if (!map->vma) 468 450 continue; 469 451 pr_debug("map %d+%d (%lx %lx)\n", ··· 502 466 return -ENOMEM; 503 467 504 468 INIT_LIST_HEAD(&priv->maps); 469 + INIT_LIST_HEAD(&priv->freeable_maps); 505 470 spin_lock_init(&priv->lock); 506 471 507 472 if (use_ptemod) { ··· 537 500 while (!list_empty(&priv->maps)) { 538 501 map = list_entry(priv->maps.next, struct grant_map, next); 539 502 list_del(&map->next); 540 - gntdev_put_map(map); 503 + gntdev_put_map(NULL /* already removed */, map); 541 504 } 505 + WARN_ON(!list_empty(&priv->freeable_maps)); 542 506 543 507 if (use_ptemod) 544 508 mmu_notifier_unregister(&priv->mn, priv->mm); ··· 567 529 568 530 if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { 569 531 pr_debug("can't map: over limit\n"); 570 - gntdev_put_map(map); 532 + gntdev_put_map(NULL, map); 571 533 return err; 572 534 } 573 535 574 536 if (copy_from_user(map->grants, &u->refs, 575 537 sizeof(map->grants[0]) * op.count) != 0) { 576 - gntdev_put_map(map); 577 - return err; 538 + gntdev_put_map(NULL, map); 539 + return -EFAULT; 578 540 } 579 541 580 542 spin_lock(&priv->lock); ··· 603 565 map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); 604 566 if (map) { 605 567 list_del(&map->next); 568 + if (populate_freeable_maps) 569 + list_add_tail(&map->next, &priv->freeable_maps); 606 570 err = 0; 607 571 } 608 572 spin_unlock(&priv->lock); 609 573 if (map) 610 - gntdev_put_map(map); 574 + gntdev_put_map(priv, map); 611 575 return err; 612 576 } 613 577 ··· 619 579 struct ioctl_gntdev_get_offset_for_vaddr op; 620 580 struct vm_area_struct *vma; 621 581 struct grant_map *map; 582 + int rv = -EINVAL; 622 583 623 584 if (copy_from_user(&op, u, sizeof(op)) != 0) 624 585 return -EFAULT; 625 586 pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); 626 587 588 + down_read(&current->mm->mmap_sem); 627 589 vma = find_vma(current->mm, op.vaddr); 628 590 if (!vma || vma->vm_ops != &gntdev_vmops) 629 - return -EINVAL; 591 + goto out_unlock; 630 592 631 593 map = vma->vm_private_data; 632 594 if (!map) 633 - return -EINVAL; 595 + goto out_unlock; 634 596 635 597 op.offset = map->index << PAGE_SHIFT; 636 598 op.count = map->count; 599 + rv = 0; 637 600 638 - if (copy_to_user(u, &op, sizeof(op)) != 0) 601 + out_unlock: 602 + up_read(&current->mm->mmap_sem); 603 + 604 + if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0) 639 605 return -EFAULT; 640 - return 0; 606 + return rv; 641 607 } 642 608 643 609 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) ··· 824 778 out_put_map: 825 779 if (use_ptemod) 826 780 map->vma = NULL; 827 - gntdev_put_map(map); 781 + gntdev_put_map(priv, map); 828 782 return err; 829 783 } 830 784
+30 -20
drivers/xen/grant-table.c
··· 56 56 /* External tools reserve first few grant table entries. */ 57 57 #define NR_RESERVED_ENTRIES 8 58 58 #define GNTTAB_LIST_END 0xffffffff 59 - #define GREFS_PER_GRANT_FRAME \ 60 - (grant_table_version == 1 ? \ 61 - (PAGE_SIZE / sizeof(struct grant_entry_v1)) : \ 62 - (PAGE_SIZE / sizeof(union grant_entry_v2))) 63 59 64 60 static grant_ref_t **gnttab_list; 65 61 static unsigned int nr_grant_frames; ··· 150 154 static grant_status_t *grstatus; 151 155 152 156 static int grant_table_version; 157 + static int grefs_per_grant_frame; 153 158 154 159 static struct gnttab_free_callback *gnttab_free_callback_list; 155 160 ··· 764 767 unsigned int new_nr_grant_frames, extra_entries, i; 765 768 unsigned int nr_glist_frames, new_nr_glist_frames; 766 769 767 - new_nr_grant_frames = nr_grant_frames + more_frames; 768 - extra_entries = more_frames * GREFS_PER_GRANT_FRAME; 770 + BUG_ON(grefs_per_grant_frame == 0); 769 771 770 - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 772 + new_nr_grant_frames = nr_grant_frames + more_frames; 773 + extra_entries = more_frames * grefs_per_grant_frame; 774 + 775 + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 771 776 new_nr_glist_frames = 772 - (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 777 + (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 773 778 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 774 779 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 775 780 if (!gnttab_list[i]) ··· 779 780 } 780 781 781 782 782 - for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; 783 - i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) 783 + for (i = grefs_per_grant_frame * nr_grant_frames; 784 + i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) 784 785 gnttab_entry(i) = i + 1; 785 786 786 787 gnttab_entry(i) = gnttab_free_head; 787 - gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; 788 + gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; 788 789 gnttab_free_count += extra_entries; 789 790 790 791 nr_grant_frames = new_nr_grant_frames; ··· 956 957 957 958 static unsigned nr_status_frames(unsigned nr_grant_frames) 958 959 { 959 - return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP; 960 + BUG_ON(grefs_per_grant_frame == 0); 961 + return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP; 960 962 } 961 963 962 964 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) ··· 1115 1115 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1); 1116 1116 if (rc == 0 && gsv.version == 2) { 1117 1117 grant_table_version = 2; 1118 + grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2); 1118 1119 gnttab_interface = &gnttab_v2_ops; 1119 1120 } else if (grant_table_version == 2) { 1120 1121 /* ··· 1128 1127 panic("we need grant tables version 2, but only version 1 is available"); 1129 1128 } else { 1130 1129 grant_table_version = 1; 1130 + grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); 1131 1131 gnttab_interface = &gnttab_v1_ops; 1132 1132 } 1133 1133 printk(KERN_INFO "Grant tables using version %d layout.\n", 1134 1134 grant_table_version); 1135 1135 } 1136 1136 1137 - int gnttab_resume(void) 1137 + static int gnttab_setup(void) 1138 1138 { 1139 1139 unsigned int max_nr_gframes; 1140 1140 1141 - gnttab_request_version(); 1142 1141 max_nr_gframes = gnttab_max_grant_frames(); 1143 1142 if (max_nr_gframes < nr_grant_frames) 1144 1143 return -ENOSYS; ··· 1161 1160 return 0; 1162 1161 } 1163 1162 1163 + int gnttab_resume(void) 1164 + { 1165 + gnttab_request_version(); 1166 + return gnttab_setup(); 1167 + } 1168 + 1164 1169 int gnttab_suspend(void) 1165 1170 { 1166 1171 gnttab_interface->unmap_frames(); ··· 1178 1171 int rc; 1179 1172 unsigned int cur, extra; 1180 1173 1174 + BUG_ON(grefs_per_grant_frame == 0); 1181 1175 cur = nr_grant_frames; 1182 - extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / 1183 - GREFS_PER_GRANT_FRAME); 1176 + extra = ((req_entries + (grefs_per_grant_frame-1)) / 1177 + grefs_per_grant_frame); 1184 1178 if (cur + extra > gnttab_max_grant_frames()) 1185 1179 return -ENOSPC; 1186 1180 ··· 1199 1191 unsigned int nr_init_grefs; 1200 1192 int ret; 1201 1193 1194 + gnttab_request_version(); 1202 1195 nr_grant_frames = 1; 1203 1196 boot_max_nr_grant_frames = __max_nr_grant_frames(); 1204 1197 1205 1198 /* Determine the maximum number of frames required for the 1206 1199 * grant reference free list on the current hypervisor. 1207 1200 */ 1201 + BUG_ON(grefs_per_grant_frame == 0); 1208 1202 max_nr_glist_frames = (boot_max_nr_grant_frames * 1209 - GREFS_PER_GRANT_FRAME / RPP); 1203 + grefs_per_grant_frame / RPP); 1210 1204 1211 1205 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 1212 1206 GFP_KERNEL); 1213 1207 if (gnttab_list == NULL) 1214 1208 return -ENOMEM; 1215 1209 1216 - nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; 1210 + nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 1217 1211 for (i = 0; i < nr_glist_frames; i++) { 1218 1212 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 1219 1213 if (gnttab_list[i] == NULL) { ··· 1224 1214 } 1225 1215 } 1226 1216 1227 - if (gnttab_resume() < 0) { 1217 + if (gnttab_setup() < 0) { 1228 1218 ret = -ENODEV; 1229 1219 goto ini_nomem; 1230 1220 } 1231 1221 1232 - nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; 1222 + nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; 1233 1223 1234 1224 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 1235 1225 gnttab_entry(i) = i + 1;
+47 -42
drivers/xen/privcmd.c
··· 199 199 LIST_HEAD(pagelist); 200 200 struct mmap_mfn_state state; 201 201 202 - if (!xen_initial_domain()) 203 - return -EPERM; 204 - 205 202 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 206 203 if (xen_feature(XENFEAT_auto_translated_physmap)) 207 204 return -ENOSYS; ··· 258 261 * -ENOENT if at least 1 -ENOENT has happened. 259 262 */ 260 263 int global_error; 261 - /* An array for individual errors */ 262 - int *err; 264 + int version; 263 265 264 266 /* User-space mfn array to store errors in the second pass for V1. */ 265 267 xen_pfn_t __user *user_mfn; 268 + /* User-space int array to store errors in the second pass for V2. */ 269 + int __user *user_err; 266 270 }; 267 271 268 272 /* auto translated dom0 note: if domU being created is PV, then mfn is ··· 286 288 &cur_page); 287 289 288 290 /* Store error code for second pass. */ 289 - *(st->err++) = ret; 291 + if (st->version == 1) { 292 + if (ret < 0) { 293 + /* 294 + * V1 encodes the error codes in the 32bit top nibble of the 295 + * mfn (with its known limitations vis-a-vis 64 bit callers). 296 + */ 297 + *mfnp |= (ret == -ENOENT) ? 298 + PRIVCMD_MMAPBATCH_PAGED_ERROR : 299 + PRIVCMD_MMAPBATCH_MFN_ERROR; 300 + } 301 + } else { /* st->version == 2 */ 302 + *((int *) mfnp) = ret; 303 + } 290 304 291 305 /* And see if it affects the global_error. */ 292 306 if (ret < 0) { ··· 315 305 return 0; 316 306 } 317 307 318 - static int mmap_return_errors_v1(void *data, void *state) 308 + static int mmap_return_errors(void *data, void *state) 319 309 { 320 - xen_pfn_t *mfnp = data; 321 310 struct mmap_batch_state *st = state; 322 - int err = *(st->err++); 323 311 324 - /* 325 - * V1 encodes the error codes in the 32bit top nibble of the 326 - * mfn (with its known limitations vis-a-vis 64 bit callers). 327 - */ 328 - *mfnp |= (err == -ENOENT) ? 329 - PRIVCMD_MMAPBATCH_PAGED_ERROR : 330 - PRIVCMD_MMAPBATCH_MFN_ERROR; 331 - return __put_user(*mfnp, st->user_mfn++); 312 + if (st->version == 1) { 313 + xen_pfn_t mfnp = *((xen_pfn_t *) data); 314 + if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) 315 + return __put_user(mfnp, st->user_mfn++); 316 + else 317 + st->user_mfn++; 318 + } else { /* st->version == 2 */ 319 + int err = *((int *) data); 320 + if (err) 321 + return __put_user(err, st->user_err++); 322 + else 323 + st->user_err++; 324 + } 325 + 326 + return 0; 332 327 } 333 328 334 329 /* Allocate pfns that are then mapped with gmfns from foreign domid. Update ··· 372 357 struct vm_area_struct *vma; 373 358 unsigned long nr_pages; 374 359 LIST_HEAD(pagelist); 375 - int *err_array = NULL; 376 360 struct mmap_batch_state state; 377 - 378 - if (!xen_initial_domain()) 379 - return -EPERM; 380 361 381 362 switch (version) { 382 363 case 1: ··· 407 396 goto out; 408 397 } 409 398 410 - err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL); 411 - if (err_array == NULL) { 412 - ret = -ENOMEM; 413 - goto out; 399 + if (version == 2) { 400 + /* Zero error array now to only copy back actual errors. */ 401 + if (clear_user(m.err, sizeof(int) * m.num)) { 402 + ret = -EFAULT; 403 + goto out; 404 + } 414 405 } 415 406 416 407 down_write(&mm->mmap_sem); ··· 440 427 state.va = m.addr; 441 428 state.index = 0; 442 429 state.global_error = 0; 443 - state.err = err_array; 430 + state.version = version; 444 431 445 432 /* mmap_batch_fn guarantees ret == 0 */ 446 433 BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), ··· 448 435 449 436 up_write(&mm->mmap_sem); 450 437 451 - if (version == 1) { 452 - if (state.global_error) { 453 - /* Write back errors in second pass. */ 454 - state.user_mfn = (xen_pfn_t *)m.arr; 455 - state.err = err_array; 456 - ret = traverse_pages(m.num, sizeof(xen_pfn_t), 457 - &pagelist, mmap_return_errors_v1, &state); 458 - } else 459 - ret = 0; 460 - 461 - } else if (version == 2) { 462 - ret = __copy_to_user(m.err, err_array, m.num * sizeof(int)); 463 - if (ret) 464 - ret = -EFAULT; 465 - } 438 + if (state.global_error) { 439 + /* Write back errors in second pass. */ 440 + state.user_mfn = (xen_pfn_t *)m.arr; 441 + state.user_err = m.err; 442 + ret = traverse_pages(m.num, sizeof(xen_pfn_t), 443 + &pagelist, mmap_return_errors, &state); 444 + } else 445 + ret = 0; 466 446 467 447 /* If we have not had any EFAULT-like global errors then set the global 468 448 * error to -ENOENT if necessary. */ ··· 463 457 ret = -ENOENT; 464 458 465 459 out: 466 - kfree(err_array); 467 460 free_page_list(&pagelist); 468 461 469 462 return ret;
+1 -1
drivers/xen/xen-pciback/pciback.h
··· 124 124 static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 125 125 struct pci_dev *dev) 126 126 { 127 - if (xen_pcibk_backend && xen_pcibk_backend->free) 127 + if (xen_pcibk_backend && xen_pcibk_backend->release) 128 128 return xen_pcibk_backend->release(pdev, dev); 129 129 } 130 130