Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ebt_config_compat_v4' of git://git.breakpoint.cc/fw/nf-next-2.6

+1124 -119
+16
net/bridge/netfilter/ebt_limit.c
··· 84 84 return true; 85 85 } 86 86 87 + 88 + #ifdef CONFIG_COMPAT 89 + /* 90 + * no conversion function needed -- 91 + * only avg/burst have meaningful values in userspace. 92 + */ 93 + struct ebt_compat_limit_info { 94 + compat_uint_t avg, burst; 95 + compat_ulong_t prev; 96 + compat_uint_t credit, credit_cap, cost; 97 + }; 98 + #endif 99 + 87 100 static struct xt_match ebt_limit_mt_reg __read_mostly = { 88 101 .name = "limit", 89 102 .revision = 0, ··· 104 91 .match = ebt_limit_mt, 105 92 .checkentry = ebt_limit_mt_check, 106 93 .matchsize = sizeof(struct ebt_limit_info), 94 + #ifdef CONFIG_COMPAT 95 + .compatsize = sizeof(struct ebt_compat_limit_info), 96 + #endif 107 97 .me = THIS_MODULE, 108 98 }; 109 99
+31
net/bridge/netfilter/ebt_mark.c
··· 52 52 return false; 53 53 return true; 54 54 } 55 + #ifdef CONFIG_COMPAT 56 + struct compat_ebt_mark_t_info { 57 + compat_ulong_t mark; 58 + compat_uint_t target; 59 + }; 60 + 61 + static void mark_tg_compat_from_user(void *dst, const void *src) 62 + { 63 + const struct compat_ebt_mark_t_info *user = src; 64 + struct ebt_mark_t_info *kern = dst; 65 + 66 + kern->mark = user->mark; 67 + kern->target = user->target; 68 + } 69 + 70 + static int mark_tg_compat_to_user(void __user *dst, const void *src) 71 + { 72 + struct compat_ebt_mark_t_info __user *user = dst; 73 + const struct ebt_mark_t_info *kern = src; 74 + 75 + if (put_user(kern->mark, &user->mark) || 76 + put_user(kern->target, &user->target)) 77 + return -EFAULT; 78 + return 0; 79 + } 80 + #endif 55 81 56 82 static struct xt_target ebt_mark_tg_reg __read_mostly = { 57 83 .name = "mark", ··· 86 60 .target = ebt_mark_tg, 87 61 .checkentry = ebt_mark_tg_check, 88 62 .targetsize = sizeof(struct ebt_mark_t_info), 63 + #ifdef CONFIG_COMPAT 64 + .compatsize = sizeof(struct compat_ebt_mark_t_info), 65 + .compat_from_user = mark_tg_compat_from_user, 66 + .compat_to_user = mark_tg_compat_to_user, 67 + #endif 89 68 .me = THIS_MODULE, 90 69 }; 91 70
+37
net/bridge/netfilter/ebt_mark_m.c
··· 35 35 return true; 36 36 } 37 37 38 + 39 + #ifdef CONFIG_COMPAT 40 + struct compat_ebt_mark_m_info { 41 + compat_ulong_t mark, mask; 42 + uint8_t invert, bitmask; 43 + }; 44 + 45 + static void mark_mt_compat_from_user(void *dst, const void *src) 46 + { 47 + const struct compat_ebt_mark_m_info *user = src; 48 + struct ebt_mark_m_info *kern = dst; 49 + 50 + kern->mark = user->mark; 51 + kern->mask = user->mask; 52 + kern->invert = user->invert; 53 + kern->bitmask = user->bitmask; 54 + } 55 + 56 + static int mark_mt_compat_to_user(void __user *dst, const void *src) 57 + { 58 + struct compat_ebt_mark_m_info __user *user = dst; 59 + const struct ebt_mark_m_info *kern = src; 60 + 61 + if (put_user(kern->mark, &user->mark) || 62 + put_user(kern->mask, &user->mask) || 63 + put_user(kern->invert, &user->invert) || 64 + put_user(kern->bitmask, &user->bitmask)) 65 + return -EFAULT; 66 + return 0; 67 + } 68 + #endif 69 + 38 70 static struct xt_match ebt_mark_mt_reg __read_mostly = { 39 71 .name = "mark_m", 40 72 .revision = 0, ··· 74 42 .match = ebt_mark_mt, 75 43 .checkentry = ebt_mark_mt_check, 76 44 .matchsize = sizeof(struct ebt_mark_m_info), 45 + #ifdef CONFIG_COMPAT 46 + .compatsize = sizeof(struct compat_ebt_mark_m_info), 47 + .compat_from_user = mark_mt_compat_from_user, 48 + .compat_to_user = mark_mt_compat_to_user, 49 + #endif 77 50 .me = THIS_MODULE, 78 51 }; 79 52
+1040 -119
net/bridge/netfilter/ebtables.c
··· 33 33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 34 34 "report to author: "format, ## args) 35 35 /* #define BUGPRINT(format, args...) */ 36 - #define MEMPRINT(format, args...) printk("kernel msg: ebtables "\ 37 - ": out of memory: "format, ## args) 38 - /* #define MEMPRINT(format, args...) */ 39 - 40 - 41 36 42 37 /* 43 38 * Each cpu has its own set of counters, so there is no need for write_lock in ··· 51 56 52 57 static DEFINE_MUTEX(ebt_mutex); 53 58 59 + #ifdef CONFIG_COMPAT 60 + static void ebt_standard_compat_from_user(void *dst, const void *src) 61 + { 62 + int v = *(compat_int_t *)src; 63 + 64 + if (v >= 0) 65 + v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); 66 + memcpy(dst, &v, sizeof(v)); 67 + } 68 + 69 + static int ebt_standard_compat_to_user(void __user *dst, const void *src) 70 + { 71 + compat_int_t cv = *(int *)src; 72 + 73 + if (cv >= 0) 74 + cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); 75 + return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 76 + } 77 + #endif 78 + 79 + 54 80 static struct xt_target ebt_standard_target = { 55 81 .name = "standard", 56 82 .revision = 0, 57 83 .family = NFPROTO_BRIDGE, 58 84 .targetsize = sizeof(int), 85 + #ifdef CONFIG_COMPAT 86 + .compatsize = sizeof(compat_int_t), 87 + .compat_from_user = ebt_standard_compat_from_user, 88 + .compat_to_user = ebt_standard_compat_to_user, 89 + #endif 59 90 }; 60 91 61 92 static inline int ··· 980 959 } 981 960 } 982 961 983 - /* replace the table */ 984 - static int do_replace(struct net *net, const void __user *user, 985 - unsigned int len) 962 + static int do_replace_finish(struct net *net, struct ebt_replace *repl, 963 + struct ebt_table_info *newinfo) 986 964 { 987 - int ret, i, countersize; 988 - struct ebt_table_info *newinfo; 989 - struct ebt_replace tmp; 990 - struct ebt_table *t; 965 + int ret, i; 991 966 struct ebt_counter *counterstmp = NULL; 992 967 /* used to be able to unlock earlier */ 993 968 struct ebt_table_info *table; 994 - 995 - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 996 - return -EFAULT; 997 - 998 - if (len != sizeof(tmp) + tmp.entries_size) { 999 - BUGPRINT("Wrong len argument\n"); 1000 - return -EINVAL; 1001 - } 1002 - 1003 - if (tmp.entries_size == 0) { 1004 - BUGPRINT("Entries_size never zero\n"); 1005 - return -EINVAL; 1006 - } 1007 - /* overflow check */ 1008 - if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS - 1009 - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 1010 - return -ENOMEM; 1011 - if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1012 - return -ENOMEM; 1013 - 1014 - countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1015 - newinfo = vmalloc(sizeof(*newinfo) + countersize); 1016 - if (!newinfo) 1017 - return -ENOMEM; 1018 - 1019 - if (countersize) 1020 - memset(newinfo->counters, 0, countersize); 1021 - 1022 - newinfo->entries = vmalloc(tmp.entries_size); 1023 - if (!newinfo->entries) { 1024 - ret = -ENOMEM; 1025 - goto free_newinfo; 1026 - } 1027 - if (copy_from_user( 1028 - newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 1029 - BUGPRINT("Couldn't copy entries from userspace\n"); 1030 - ret = -EFAULT; 1031 - goto free_entries; 1032 - } 969 + struct ebt_table *t; 1033 970 1034 971 /* the user wants counters back 1035 972 the check on the size is done later, when we have the lock */ 1036 - if (tmp.num_counters) { 1037 - counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp)); 1038 - if (!counterstmp) { 1039 - ret = -ENOMEM; 1040 - goto free_entries; 1041 - } 973 + if (repl->num_counters) { 974 + unsigned long size = repl->num_counters * sizeof(*counterstmp); 975 + counterstmp = vmalloc(size); 976 + if (!counterstmp) 977 + return -ENOMEM; 1042 978 } 1043 - else 1044 - counterstmp = NULL; 1045 979 1046 - /* this can get initialized by translate_table() */ 1047 980 newinfo->chainstack = NULL; 1048 - ret = ebt_verify_pointers(&tmp, newinfo); 981 + ret = ebt_verify_pointers(repl, newinfo); 1049 982 if (ret != 0) 1050 983 goto free_counterstmp; 1051 984 1052 - ret = translate_table(net, tmp.name, newinfo); 985 + ret = translate_table(net, repl->name, newinfo); 1053 986 1054 987 if (ret != 0) 1055 988 goto free_counterstmp; 1056 989 1057 - t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 990 + t = find_table_lock(net, repl->name, &ret, &ebt_mutex); 1058 991 if (!t) { 1059 992 ret = -ENOENT; 1060 993 goto free_iterate; 1061 994 } 1062 995 1063 996 /* the table doesn't like it */ 1064 - if (t->check && (ret = t->check(newinfo, tmp.valid_hooks))) 997 + if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) 1065 998 goto free_unlock; 1066 999 1067 - if (tmp.num_counters && tmp.num_counters != t->private->nentries) { 1000 + if (repl->num_counters && repl->num_counters != t->private->nentries) { 1068 1001 BUGPRINT("Wrong nr. of counters requested\n"); 1069 1002 ret = -EINVAL; 1070 1003 goto free_unlock; ··· 1034 1059 module_put(t->me); 1035 1060 /* we need an atomic snapshot of the counters */ 1036 1061 write_lock_bh(&t->lock); 1037 - if (tmp.num_counters) 1062 + if (repl->num_counters) 1038 1063 get_counters(t->private->counters, counterstmp, 1039 1064 t->private->nentries); 1040 1065 ··· 1045 1070 allocation. Only reason why this is done is because this way the lock 1046 1071 is held only once, while this doesn't bring the kernel into a 1047 1072 dangerous state. */ 1048 - if (tmp.num_counters && 1049 - copy_to_user(tmp.counters, counterstmp, 1050 - tmp.num_counters * sizeof(struct ebt_counter))) { 1051 - BUGPRINT("Couldn't copy counters to userspace\n"); 1073 + if (repl->num_counters && 1074 + copy_to_user(repl->counters, counterstmp, 1075 + repl->num_counters * sizeof(struct ebt_counter))) { 1052 1076 ret = -EFAULT; 1053 1077 } 1054 1078 else ··· 1081 1107 vfree(newinfo->chainstack[i]); 1082 1108 vfree(newinfo->chainstack); 1083 1109 } 1110 + return ret; 1111 + } 1112 + 1113 + /* replace the table */ 1114 + static int do_replace(struct net *net, const void __user *user, 1115 + unsigned int len) 1116 + { 1117 + int ret, countersize; 1118 + struct ebt_table_info *newinfo; 1119 + struct ebt_replace tmp; 1120 + 1121 + if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) 1122 + return -EFAULT; 1123 + 1124 + if (len != sizeof(tmp) + tmp.entries_size) { 1125 + BUGPRINT("Wrong len argument\n"); 1126 + return -EINVAL; 1127 + } 1128 + 1129 + if (tmp.entries_size == 0) { 1130 + BUGPRINT("Entries_size never zero\n"); 1131 + return -EINVAL; 1132 + } 1133 + /* overflow check */ 1134 + if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / 1135 + NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 1136 + return -ENOMEM; 1137 + if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1138 + return -ENOMEM; 1139 + 1140 + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1141 + newinfo = vmalloc(sizeof(*newinfo) + countersize); 1142 + if (!newinfo) 1143 + return -ENOMEM; 1144 + 1145 + if (countersize) 1146 + memset(newinfo->counters, 0, countersize); 1147 + 1148 + newinfo->entries = vmalloc(tmp.entries_size); 1149 + if (!newinfo->entries) { 1150 + ret = -ENOMEM; 1151 + goto free_newinfo; 1152 + } 1153 + if (copy_from_user( 1154 + newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 1155 + BUGPRINT("Couldn't copy entries from userspace\n"); 1156 + ret = -EFAULT; 1157 + goto free_entries; 1158 + } 1159 + 1160 + ret = do_replace_finish(net, &tmp, newinfo); 1161 + if (ret == 0) 1162 + return ret; 1084 1163 free_entries: 1085 1164 vfree(newinfo->entries); 1086 1165 free_newinfo: ··· 1268 1241 } 1269 1242 1270 1243 /* userspace just supplied us with counters */ 1271 - static int update_counters(struct net *net, const void __user *user, 1272 - unsigned int len) 1244 + static int do_update_counters(struct net *net, const char *name, 1245 + struct ebt_counter __user *counters, 1246 + unsigned int num_counters, 1247 + const void __user *user, unsigned int len) 1273 1248 { 1274 1249 int i, ret; 1275 1250 struct ebt_counter *tmp; 1276 - struct ebt_replace hlp; 1277 1251 struct ebt_table *t; 1278 1252 1279 - if (copy_from_user(&hlp, user, sizeof(hlp))) 1280 - return -EFAULT; 1281 - 1282 - if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 1283 - return -EINVAL; 1284 - if (hlp.num_counters == 0) 1253 + if (num_counters == 0) 1285 1254 return -EINVAL; 1286 1255 1287 - if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) { 1288 - MEMPRINT("Update_counters && nomemory\n"); 1256 + tmp = vmalloc(num_counters * sizeof(*tmp)); 1257 + if (!tmp) 1289 1258 return -ENOMEM; 1290 - } 1291 1259 1292 - t = find_table_lock(net, hlp.name, &ret, &ebt_mutex); 1260 + t = find_table_lock(net, name, &ret, &ebt_mutex); 1293 1261 if (!t) 1294 1262 goto free_tmp; 1295 1263 1296 - if (hlp.num_counters != t->private->nentries) { 1264 + if (num_counters != t->private->nentries) { 1297 1265 BUGPRINT("Wrong nr of counters\n"); 1298 1266 ret = -EINVAL; 1299 1267 goto unlock_mutex; 1300 1268 } 1301 1269 1302 - if ( copy_from_user(tmp, hlp.counters, 1303 - hlp.num_counters * sizeof(struct ebt_counter)) ) { 1304 - BUGPRINT("Updata_counters && !cfu\n"); 1270 + if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) { 1305 1271 ret = -EFAULT; 1306 1272 goto unlock_mutex; 1307 1273 } ··· 1303 1283 write_lock_bh(&t->lock); 1304 1284 1305 1285 /* we add to the counters of the first cpu */ 1306 - for (i = 0; i < hlp.num_counters; i++) { 1286 + for (i = 0; i < num_counters; i++) { 1307 1287 t->private->counters[i].pcnt += tmp[i].pcnt; 1308 1288 t->private->counters[i].bcnt += tmp[i].bcnt; 1309 1289 } ··· 1315 1295 free_tmp: 1316 1296 vfree(tmp); 1317 1297 return ret; 1298 + } 1299 + 1300 + static int update_counters(struct net *net, const void __user *user, 1301 + unsigned int len) 1302 + { 1303 + struct ebt_replace hlp; 1304 + 1305 + if (copy_from_user(&hlp, user, sizeof(hlp))) 1306 + return -EFAULT; 1307 + 1308 + if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 1309 + return -EINVAL; 1310 + 1311 + return do_update_counters(net, hlp.name, hlp.counters, 1312 + hlp.num_counters, user, len); 1318 1313 } 1319 1314 1320 1315 static inline int ebt_make_matchname(const struct ebt_entry_match *m, ··· 1374 1339 return 0; 1375 1340 } 1376 1341 1342 + static int copy_counters_to_user(struct ebt_table *t, 1343 + const struct ebt_counter *oldcounters, 1344 + void __user *user, unsigned int num_counters, 1345 + unsigned int nentries) 1346 + { 1347 + struct ebt_counter *counterstmp; 1348 + int ret = 0; 1349 + 1350 + /* userspace might not need the counters */ 1351 + if (num_counters == 0) 1352 + return 0; 1353 + 1354 + if (num_counters != nentries) { 1355 + BUGPRINT("Num_counters wrong\n"); 1356 + return -EINVAL; 1357 + } 1358 + 1359 + counterstmp = vmalloc(nentries * sizeof(*counterstmp)); 1360 + if (!counterstmp) 1361 + return -ENOMEM; 1362 + 1363 + write_lock_bh(&t->lock); 1364 + get_counters(oldcounters, counterstmp, nentries); 1365 + write_unlock_bh(&t->lock); 1366 + 1367 + if (copy_to_user(user, counterstmp, 1368 + nentries * sizeof(struct ebt_counter))) 1369 + ret = -EFAULT; 1370 + vfree(counterstmp); 1371 + return ret; 1372 + } 1373 + 1377 1374 /* called with ebt_mutex locked */ 1378 1375 static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1379 1376 const int *len, int cmd) 1380 1377 { 1381 1378 struct ebt_replace tmp; 1382 - struct ebt_counter *counterstmp; 1383 1379 const struct ebt_counter *oldcounters; 1384 1380 unsigned int entries_size, nentries; 1381 + int ret; 1385 1382 char *entries; 1386 1383 1387 1384 if (cmd == EBT_SO_GET_ENTRIES) { ··· 1428 1361 oldcounters = t->table->counters; 1429 1362 } 1430 1363 1431 - if (copy_from_user(&tmp, user, sizeof(tmp))) { 1432 - BUGPRINT("Cfu didn't work\n"); 1364 + if (copy_from_user(&tmp, user, sizeof(tmp))) 1433 1365 return -EFAULT; 1434 - } 1435 1366 1436 1367 if (*len != sizeof(struct ebt_replace) + entries_size + 1437 - (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) { 1438 - BUGPRINT("Wrong size\n"); 1368 + (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) 1439 1369 return -EINVAL; 1440 - } 1441 1370 1442 1371 if (tmp.nentries != nentries) { 1443 1372 BUGPRINT("Nentries wrong\n"); ··· 1445 1382 return -EINVAL; 1446 1383 } 1447 1384 1448 - /* userspace might not need the counters */ 1449 - if (tmp.num_counters) { 1450 - if (tmp.num_counters != nentries) { 1451 - BUGPRINT("Num_counters wrong\n"); 1452 - return -EINVAL; 1453 - } 1454 - counterstmp = vmalloc(nentries * sizeof(*counterstmp)); 1455 - if (!counterstmp) { 1456 - MEMPRINT("Couldn't copy counters, out of memory\n"); 1457 - return -ENOMEM; 1458 - } 1459 - write_lock_bh(&t->lock); 1460 - get_counters(oldcounters, counterstmp, nentries); 1461 - write_unlock_bh(&t->lock); 1462 - 1463 - if (copy_to_user(tmp.counters, counterstmp, 1464 - nentries * sizeof(struct ebt_counter))) { 1465 - BUGPRINT("Couldn't copy counters to userspace\n"); 1466 - vfree(counterstmp); 1467 - return -EFAULT; 1468 - } 1469 - vfree(counterstmp); 1470 - } 1385 + ret = copy_counters_to_user(t, oldcounters, tmp.counters, 1386 + tmp.num_counters, nentries); 1387 + if (ret) 1388 + return ret; 1471 1389 1472 1390 if (copy_to_user(tmp.entries, entries, entries_size)) { 1473 1391 BUGPRINT("Couldn't copy entries to userspace\n"); ··· 1476 1432 break; 1477 1433 default: 1478 1434 ret = -EINVAL; 1479 - } 1435 + } 1480 1436 return ret; 1481 1437 } 1482 1438 ··· 1536 1492 return ret; 1537 1493 } 1538 1494 1495 + #ifdef CONFIG_COMPAT 1496 + /* 32 bit-userspace compatibility definitions. */ 1497 + struct compat_ebt_replace { 1498 + char name[EBT_TABLE_MAXNAMELEN]; 1499 + compat_uint_t valid_hooks; 1500 + compat_uint_t nentries; 1501 + compat_uint_t entries_size; 1502 + /* start of the chains */ 1503 + compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; 1504 + /* nr of counters userspace expects back */ 1505 + compat_uint_t num_counters; 1506 + /* where the kernel will put the old counters. */ 1507 + compat_uptr_t counters; 1508 + compat_uptr_t entries; 1509 + }; 1510 + 1511 + /* struct ebt_entry_match, _target and _watcher have same layout */ 1512 + struct compat_ebt_entry_mwt { 1513 + union { 1514 + char name[EBT_FUNCTION_MAXNAMELEN]; 1515 + compat_uptr_t ptr; 1516 + } u; 1517 + compat_uint_t match_size; 1518 + compat_uint_t data[0]; 1519 + }; 1520 + 1521 + /* account for possible padding between match_size and ->data */ 1522 + static int ebt_compat_entry_padsize(void) 1523 + { 1524 + BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < 1525 + COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); 1526 + return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - 1527 + COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); 1528 + } 1529 + 1530 + static int ebt_compat_match_offset(const struct xt_match *match, 1531 + unsigned int userlen) 1532 + { 1533 + /* 1534 + * ebt_among needs special handling. The kernel .matchsize is 1535 + * set to -1 at registration time; at runtime an EBT_ALIGN()ed 1536 + * value is expected. 1537 + * Example: userspace sends 4500, ebt_among.c wants 4504. 1538 + */ 1539 + if (unlikely(match->matchsize == -1)) 1540 + return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); 1541 + return xt_compat_match_offset(match); 1542 + } 1543 + 1544 + static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, 1545 + unsigned int *size) 1546 + { 1547 + const struct xt_match *match = m->u.match; 1548 + struct compat_ebt_entry_mwt __user *cm = *dstptr; 1549 + int off = ebt_compat_match_offset(match, m->match_size); 1550 + compat_uint_t msize = m->match_size - off; 1551 + 1552 + BUG_ON(off >= m->match_size); 1553 + 1554 + if (copy_to_user(cm->u.name, match->name, 1555 + strlen(match->name) + 1) || put_user(msize, &cm->match_size)) 1556 + return -EFAULT; 1557 + 1558 + if (match->compat_to_user) { 1559 + if (match->compat_to_user(cm->data, m->data)) 1560 + return -EFAULT; 1561 + } else if (copy_to_user(cm->data, m->data, msize)) 1562 + return -EFAULT; 1563 + 1564 + *size -= ebt_compat_entry_padsize() + off; 1565 + *dstptr = cm->data; 1566 + *dstptr += msize; 1567 + return 0; 1568 + } 1569 + 1570 + static int compat_target_to_user(struct ebt_entry_target *t, 1571 + void __user **dstptr, 1572 + unsigned int *size) 1573 + { 1574 + const struct xt_target *target = t->u.target; 1575 + struct compat_ebt_entry_mwt __user *cm = *dstptr; 1576 + int off = xt_compat_target_offset(target); 1577 + compat_uint_t tsize = t->target_size - off; 1578 + 1579 + BUG_ON(off >= t->target_size); 1580 + 1581 + if (copy_to_user(cm->u.name, target->name, 1582 + strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) 1583 + return -EFAULT; 1584 + 1585 + if (target->compat_to_user) { 1586 + if (target->compat_to_user(cm->data, t->data)) 1587 + return -EFAULT; 1588 + } else if (copy_to_user(cm->data, t->data, tsize)) 1589 + return -EFAULT; 1590 + 1591 + *size -= ebt_compat_entry_padsize() + off; 1592 + *dstptr = cm->data; 1593 + *dstptr += tsize; 1594 + return 0; 1595 + } 1596 + 1597 + static int compat_watcher_to_user(struct ebt_entry_watcher *w, 1598 + void __user **dstptr, 1599 + unsigned int *size) 1600 + { 1601 + return compat_target_to_user((struct ebt_entry_target *)w, 1602 + dstptr, size); 1603 + } 1604 + 1605 + static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, 1606 + unsigned int *size) 1607 + { 1608 + struct ebt_entry_target *t; 1609 + struct ebt_entry __user *ce; 1610 + u32 watchers_offset, target_offset, next_offset; 1611 + compat_uint_t origsize; 1612 + int ret; 1613 + 1614 + if (e->bitmask == 0) { 1615 + if (*size < sizeof(struct ebt_entries)) 1616 + return -EINVAL; 1617 + if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) 1618 + return -EFAULT; 1619 + 1620 + *dstptr += sizeof(struct ebt_entries); 1621 + *size -= sizeof(struct ebt_entries); 1622 + return 0; 1623 + } 1624 + 1625 + if (*size < sizeof(*ce)) 1626 + return -EINVAL; 1627 + 1628 + ce = (struct ebt_entry __user *)*dstptr; 1629 + if (copy_to_user(ce, e, sizeof(*ce))) 1630 + return -EFAULT; 1631 + 1632 + origsize = *size; 1633 + *dstptr += sizeof(*ce); 1634 + 1635 + ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); 1636 + if (ret) 1637 + return ret; 1638 + watchers_offset = e->watchers_offset - (origsize - *size); 1639 + 1640 + ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); 1641 + if (ret) 1642 + return ret; 1643 + target_offset = e->target_offset - (origsize - *size); 1644 + 1645 + t = (struct ebt_entry_target *) ((char *) e + e->target_offset); 1646 + 1647 + ret = compat_target_to_user(t, dstptr, size); 1648 + if (ret) 1649 + return ret; 1650 + next_offset = e->next_offset - (origsize - *size); 1651 + 1652 + if (put_user(watchers_offset, &ce->watchers_offset) || 1653 + put_user(target_offset, &ce->target_offset) || 1654 + put_user(next_offset, &ce->next_offset)) 1655 + return -EFAULT; 1656 + 1657 + *size -= sizeof(*ce); 1658 + return 0; 1659 + } 1660 + 1661 + static int compat_calc_match(struct ebt_entry_match *m, int *off) 1662 + { 1663 + *off += ebt_compat_match_offset(m->u.match, m->match_size); 1664 + *off += ebt_compat_entry_padsize(); 1665 + return 0; 1666 + } 1667 + 1668 + static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) 1669 + { 1670 + *off += xt_compat_target_offset(w->u.watcher); 1671 + *off += ebt_compat_entry_padsize(); 1672 + return 0; 1673 + } 1674 + 1675 + static int compat_calc_entry(const struct ebt_entry *e, 1676 + const struct ebt_table_info *info, 1677 + const void *base, 1678 + struct compat_ebt_replace *newinfo) 1679 + { 1680 + const struct ebt_entry_target *t; 1681 + unsigned int entry_offset; 1682 + int off, ret, i; 1683 + 1684 + if (e->bitmask == 0) 1685 + return 0; 1686 + 1687 + off = 0; 1688 + entry_offset = (void *)e - base; 1689 + 1690 + EBT_MATCH_ITERATE(e, compat_calc_match, &off); 1691 + EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); 1692 + 1693 + t = (const struct ebt_entry_target *) ((char *) e + e->target_offset); 1694 + 1695 + off += xt_compat_target_offset(t->u.target); 1696 + off += ebt_compat_entry_padsize(); 1697 + 1698 + newinfo->entries_size -= off; 1699 + 1700 + ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); 1701 + if (ret) 1702 + return ret; 1703 + 1704 + for (i = 0; i < NF_BR_NUMHOOKS; i++) { 1705 + const void *hookptr = info->hook_entry[i]; 1706 + if (info->hook_entry[i] && 1707 + (e < (struct ebt_entry *)(base - hookptr))) { 1708 + newinfo->hook_entry[i] -= off; 1709 + pr_debug("0x%08X -> 0x%08X\n", 1710 + newinfo->hook_entry[i] + off, 1711 + newinfo->hook_entry[i]); 1712 + } 1713 + } 1714 + 1715 + return 0; 1716 + } 1717 + 1718 + 1719 + static int compat_table_info(const struct ebt_table_info *info, 1720 + struct compat_ebt_replace *newinfo) 1721 + { 1722 + unsigned int size = info->entries_size; 1723 + const void *entries = info->entries; 1724 + 1725 + newinfo->entries_size = size; 1726 + 1727 + return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1728 + entries, newinfo); 1729 + } 1730 + 1731 + static int compat_copy_everything_to_user(struct ebt_table *t, 1732 + void __user *user, int *len, int cmd) 1733 + { 1734 + struct compat_ebt_replace repl, tmp; 1735 + struct ebt_counter *oldcounters; 1736 + struct ebt_table_info tinfo; 1737 + int ret; 1738 + void __user *pos; 1739 + 1740 + memset(&tinfo, 0, sizeof(tinfo)); 1741 + 1742 + if (cmd == EBT_SO_GET_ENTRIES) { 1743 + tinfo.entries_size = t->private->entries_size; 1744 + tinfo.nentries = t->private->nentries; 1745 + tinfo.entries = t->private->entries; 1746 + oldcounters = t->private->counters; 1747 + } else { 1748 + tinfo.entries_size = t->table->entries_size; 1749 + tinfo.nentries = t->table->nentries; 1750 + tinfo.entries = t->table->entries; 1751 + oldcounters = t->table->counters; 1752 + } 1753 + 1754 + if (copy_from_user(&tmp, user, sizeof(tmp))) 1755 + return -EFAULT; 1756 + 1757 + if (tmp.nentries != tinfo.nentries || 1758 + (tmp.num_counters && tmp.num_counters != tinfo.nentries)) 1759 + return -EINVAL; 1760 + 1761 + memcpy(&repl, &tmp, sizeof(repl)); 1762 + if (cmd == EBT_SO_GET_ENTRIES) 1763 + ret = compat_table_info(t->private, &repl); 1764 + else 1765 + ret = compat_table_info(&tinfo, &repl); 1766 + if (ret) 1767 + return ret; 1768 + 1769 + if (*len != sizeof(tmp) + repl.entries_size + 1770 + (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { 1771 + pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", 1772 + *len, tinfo.entries_size, repl.entries_size); 1773 + return -EINVAL; 1774 + } 1775 + 1776 + /* userspace might not need the counters */ 1777 + ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), 1778 + tmp.num_counters, tinfo.nentries); 1779 + if (ret) 1780 + return ret; 1781 + 1782 + pos = compat_ptr(tmp.entries); 1783 + return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, 1784 + compat_copy_entry_to_user, &pos, &tmp.entries_size); 1785 + } 1786 + 1787 + struct ebt_entries_buf_state { 1788 + char *buf_kern_start; /* kernel buffer to copy (translated) data to */ 1789 + u32 buf_kern_len; /* total size of kernel buffer */ 1790 + u32 buf_kern_offset; /* amount of data copied so far */ 1791 + u32 buf_user_offset; /* read position in userspace buffer */ 1792 + }; 1793 + 1794 + static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) 1795 + { 1796 + state->buf_kern_offset += sz; 1797 + return state->buf_kern_offset >= sz ? 0 : -EINVAL; 1798 + } 1799 + 1800 + static int ebt_buf_add(struct ebt_entries_buf_state *state, 1801 + void *data, unsigned int sz) 1802 + { 1803 + if (state->buf_kern_start == NULL) 1804 + goto count_only; 1805 + 1806 + BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); 1807 + 1808 + memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1809 + 1810 + count_only: 1811 + state->buf_user_offset += sz; 1812 + return ebt_buf_count(state, sz); 1813 + } 1814 + 1815 + static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) 1816 + { 1817 + char *b = state->buf_kern_start; 1818 + 1819 + BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); 1820 + 1821 + if (b != NULL && sz > 0) 1822 + memset(b + state->buf_kern_offset, 0, sz); 1823 + /* do not adjust ->buf_user_offset here, we added kernel-side padding */ 1824 + return ebt_buf_count(state, sz); 1825 + } 1826 + 1827 + enum compat_mwt { 1828 + EBT_COMPAT_MATCH, 1829 + EBT_COMPAT_WATCHER, 1830 + EBT_COMPAT_TARGET, 1831 + }; 1832 + 1833 + static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, 1834 + enum compat_mwt compat_mwt, 1835 + struct ebt_entries_buf_state *state, 1836 + const unsigned char *base) 1837 + { 1838 + char name[EBT_FUNCTION_MAXNAMELEN]; 1839 + struct xt_match *match; 1840 + struct xt_target *wt; 1841 + void *dst = NULL; 1842 + int off, pad = 0, ret = 0; 1843 + unsigned int size_kern, entry_offset, match_size = mwt->match_size; 1844 + 1845 + strlcpy(name, mwt->u.name, sizeof(name)); 1846 + 1847 + if (state->buf_kern_start) 1848 + dst = state->buf_kern_start + state->buf_kern_offset; 1849 + 1850 + entry_offset = (unsigned char *) mwt - base; 1851 + switch (compat_mwt) { 1852 + case EBT_COMPAT_MATCH: 1853 + match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, 1854 + name, 0), "ebt_%s", name); 1855 + if (match == NULL) 1856 + return -ENOENT; 1857 + if (IS_ERR(match)) 1858 + return PTR_ERR(match); 1859 + 1860 + off = ebt_compat_match_offset(match, match_size); 1861 + if (dst) { 1862 + if (match->compat_from_user) 1863 + match->compat_from_user(dst, mwt->data); 1864 + else 1865 + memcpy(dst, mwt->data, match_size); 1866 + } 1867 + 1868 + size_kern = match->matchsize; 1869 + if (unlikely(size_kern == -1)) 1870 + size_kern = match_size; 1871 + module_put(match->me); 1872 + break; 1873 + case EBT_COMPAT_WATCHER: /* fallthrough */ 1874 + case EBT_COMPAT_TARGET: 1875 + wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE, 1876 + name, 0), "ebt_%s", name); 1877 + if (wt == NULL) 1878 + return -ENOENT; 1879 + if (IS_ERR(wt)) 1880 + return PTR_ERR(wt); 1881 + off = xt_compat_target_offset(wt); 1882 + 1883 + if (dst) { 1884 + if (wt->compat_from_user) 1885 + wt->compat_from_user(dst, mwt->data); 1886 + else 1887 + memcpy(dst, mwt->data, match_size); 1888 + } 1889 + 1890 + size_kern = wt->targetsize; 1891 + module_put(wt->me); 1892 + break; 1893 + } 1894 + 1895 + if (!dst) { 1896 + ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, 1897 + off + ebt_compat_entry_padsize()); 1898 + if (ret < 0) 1899 + return ret; 1900 + } 1901 + 1902 + state->buf_kern_offset += match_size + off; 1903 + state->buf_user_offset += match_size; 1904 + pad = XT_ALIGN(size_kern) - size_kern; 1905 + 1906 + if (pad > 0 && dst) { 1907 + BUG_ON(state->buf_kern_len <= pad); 1908 + BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); 1909 + memset(dst + size_kern, 0, pad); 1910 + } 1911 + return off + match_size; 1912 + } 1913 + 1914 + /* 1915 + * return size of all matches, watchers or target, including necessary 1916 + * alignment and padding. 1917 + */ 1918 + static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, 1919 + unsigned int size_left, enum compat_mwt type, 1920 + struct ebt_entries_buf_state *state, const void *base) 1921 + { 1922 + int growth = 0; 1923 + char *buf; 1924 + 1925 + if (size_left == 0) 1926 + return 0; 1927 + 1928 + buf = (char *) match32; 1929 + 1930 + while (size_left >= sizeof(*match32)) { 1931 + struct ebt_entry_match *match_kern; 1932 + int ret; 1933 + 1934 + match_kern = (struct ebt_entry_match *) state->buf_kern_start; 1935 + if (match_kern) { 1936 + char *tmp; 1937 + tmp = state->buf_kern_start + state->buf_kern_offset; 1938 + match_kern = (struct ebt_entry_match *) tmp; 1939 + } 1940 + ret = ebt_buf_add(state, buf, sizeof(*match32)); 1941 + if (ret < 0) 1942 + return ret; 1943 + size_left -= sizeof(*match32); 1944 + 1945 + /* add padding before match->data (if any) */ 1946 + ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); 1947 + if (ret < 0) 1948 + return ret; 1949 + 1950 + if (match32->match_size > size_left) 1951 + return -EINVAL; 1952 + 1953 + size_left -= match32->match_size; 1954 + 1955 + ret = compat_mtw_from_user(match32, type, state, base); 1956 + if (ret < 0) 1957 + return ret; 1958 + 1959 + BUG_ON(ret < match32->match_size); 1960 + growth += ret - match32->match_size; 1961 + growth += ebt_compat_entry_padsize(); 1962 + 1963 + buf += sizeof(*match32); 1964 + buf += match32->match_size; 1965 + 1966 + if (match_kern) 1967 + match_kern->match_size = ret; 1968 + 1969 + WARN_ON(type == EBT_COMPAT_TARGET && size_left); 1970 + match32 = (struct compat_ebt_entry_mwt *) buf; 1971 + } 1972 + 1973 + return growth; 1974 + } 1975 + 1976 + #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \ 1977 + ({ \ 1978 + unsigned int __i; \ 1979 + int __ret = 0; \ 1980 + struct compat_ebt_entry_mwt *__watcher; \ 1981 + \ 1982 + for (__i = e->watchers_offset; \ 1983 + __i < (e)->target_offset; \ 1984 + __i += __watcher->watcher_size + \ 1985 + sizeof(struct compat_ebt_entry_mwt)) { \ 1986 + __watcher = (void *)(e) + __i; \ 1987 + __ret = fn(__watcher , ## args); \ 1988 + if (__ret != 0) \ 1989 + break; \ 1990 + } \ 1991 + if (__ret == 0) { \ 1992 + if (__i != (e)->target_offset) \ 1993 + __ret = -EINVAL; \ 1994 + } \ 1995 + __ret; \ 1996 + }) 1997 + 1998 + #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \ 1999 + ({ \ 2000 + unsigned int __i; \ 2001 + int __ret = 0; \ 2002 + struct compat_ebt_entry_mwt *__match; \ 2003 + \ 2004 + for (__i = sizeof(struct ebt_entry); \ 2005 + __i < (e)->watchers_offset; \ 2006 + __i += __match->match_size + \ 2007 + sizeof(struct compat_ebt_entry_mwt)) { \ 2008 + __match = (void *)(e) + __i; \ 2009 + __ret = fn(__match , ## args); \ 2010 + if (__ret != 0) \ 2011 + break; \ 2012 + } \ 2013 + if (__ret == 0) { \ 2014 + if (__i != (e)->watchers_offset) \ 2015 + __ret = -EINVAL; \ 2016 + } \ 2017 + __ret; \ 2018 + }) 2019 + 2020 + /* called for all ebt_entry structures. */ 2021 + static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, 2022 + unsigned int *total, 2023 + struct ebt_entries_buf_state *state) 2024 + { 2025 + unsigned int i, j, startoff, new_offset = 0; 2026 + /* stores match/watchers/targets & offset of next struct ebt_entry: */ 2027 + unsigned int offsets[4]; 2028 + unsigned int *offsets_update = NULL; 2029 + int ret; 2030 + char *buf_start; 2031 + 2032 + if (*total < sizeof(struct ebt_entries)) 2033 + return -EINVAL; 2034 + 2035 + if (!entry->bitmask) { 2036 + *total -= sizeof(struct ebt_entries); 2037 + return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); 2038 + } 2039 + if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) 2040 + return -EINVAL; 2041 + 2042 + startoff = state->buf_user_offset; 2043 + /* pull in most part of ebt_entry, it does not need to be changed. */ 2044 + ret = ebt_buf_add(state, entry, 2045 + offsetof(struct ebt_entry, watchers_offset)); 2046 + if (ret < 0) 2047 + return ret; 2048 + 2049 + offsets[0] = sizeof(struct ebt_entry); /* matches come first */ 2050 + memcpy(&offsets[1], &entry->watchers_offset, 2051 + sizeof(offsets) - sizeof(offsets[0])); 2052 + 2053 + if (state->buf_kern_start) { 2054 + buf_start = state->buf_kern_start + state->buf_kern_offset; 2055 + offsets_update = (unsigned int *) buf_start; 2056 + } 2057 + ret = ebt_buf_add(state, &offsets[1], 2058 + sizeof(offsets) - sizeof(offsets[0])); 2059 + if (ret < 0) 2060 + return ret; 2061 + buf_start = (char *) entry; 2062 + /* 2063 + * 0: matches offset, always follows ebt_entry. 2064 + * 1: watchers offset, from ebt_entry structure 2065 + * 2: target offset, from ebt_entry structure 2066 + * 3: next ebt_entry offset, from ebt_entry structure 2067 + * 2068 + * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2069 + */ 2070 + for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2071 + struct compat_ebt_entry_mwt *match32; 2072 + unsigned int size; 2073 + char *buf = buf_start; 2074 + 2075 + buf = buf_start + offsets[i]; 2076 + if (offsets[i] > offsets[j]) 2077 + return -EINVAL; 2078 + 2079 + match32 = (struct compat_ebt_entry_mwt *) buf; 2080 + size = offsets[j] - offsets[i]; 2081 + ret = ebt_size_mwt(match32, size, i, state, base); 2082 + if (ret < 0) 2083 + return ret; 2084 + new_offset += ret; 2085 + if (offsets_update && new_offset) { 2086 + pr_debug("ebtables: change offset %d to %d\n", 2087 + offsets_update[i], offsets[j] + new_offset); 2088 + offsets_update[i] = offsets[j] + new_offset; 2089 + } 2090 + } 2091 + 2092 + startoff = state->buf_user_offset - startoff; 2093 + 2094 + BUG_ON(*total < startoff); 2095 + *total -= startoff; 2096 + return 0; 2097 + } 2098 + 2099 + /* 2100 + * repl->entries_size is the size of the ebt_entry blob in userspace. 2101 + * It might need more memory when copied to a 64 bit kernel in case 2102 + * userspace is 32-bit. So, first task: find out how much memory is needed. 2103 + * 2104 + * Called before validation is performed. 2105 + */ 2106 + static int compat_copy_entries(unsigned char *data, unsigned int size_user, 2107 + struct ebt_entries_buf_state *state) 2108 + { 2109 + unsigned int size_remaining = size_user; 2110 + int ret; 2111 + 2112 + ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, 2113 + &size_remaining, state); 2114 + if (ret < 0) 2115 + return ret; 2116 + 2117 + WARN_ON(size_remaining); 2118 + return state->buf_kern_offset; 2119 + } 2120 + 2121 + 2122 + static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, 2123 + void __user *user, unsigned int len) 2124 + { 2125 + struct compat_ebt_replace tmp; 2126 + int i; 2127 + 2128 + if (len < sizeof(tmp)) 2129 + return -EINVAL; 2130 + 2131 + if (copy_from_user(&tmp, user, sizeof(tmp))) 2132 + return -EFAULT; 2133 + 2134 + if (len != sizeof(tmp) + tmp.entries_size) 2135 + return -EINVAL; 2136 + 2137 + if (tmp.entries_size == 0) 2138 + return -EINVAL; 2139 + 2140 + if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / 2141 + NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 2142 + return -ENOMEM; 2143 + if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 2144 + return -ENOMEM; 2145 + 2146 + memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); 2147 + 2148 + /* starting with hook_entry, 32 vs. 64 bit structures are different */ 2149 + for (i = 0; i < NF_BR_NUMHOOKS; i++) 2150 + repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); 2151 + 2152 + repl->num_counters = tmp.num_counters; 2153 + repl->counters = compat_ptr(tmp.counters); 2154 + repl->entries = compat_ptr(tmp.entries); 2155 + return 0; 2156 + } 2157 + 2158 + static int compat_do_replace(struct net *net, void __user *user, 2159 + unsigned int len) 2160 + { 2161 + int ret, i, countersize, size64; 2162 + struct ebt_table_info *newinfo; 2163 + struct ebt_replace tmp; 2164 + struct ebt_entries_buf_state state; 2165 + void *entries_tmp; 2166 + 2167 + ret = compat_copy_ebt_replace_from_user(&tmp, user, len); 2168 + if (ret) { 2169 + /* try real handler in case userland supplied needed padding */ 2170 + if (ret == -EINVAL && do_replace(net, user, len) == 0) 2171 + ret = 0; 2172 + return ret; 2173 + } 2174 + 2175 + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 2176 + newinfo = vmalloc(sizeof(*newinfo) + countersize); 2177 + if (!newinfo) 2178 + return -ENOMEM; 2179 + 2180 + if (countersize) 2181 + memset(newinfo->counters, 0, countersize); 2182 + 2183 + memset(&state, 0, sizeof(state)); 2184 + 2185 + newinfo->entries = vmalloc(tmp.entries_size); 2186 + if (!newinfo->entries) { 2187 + ret = -ENOMEM; 2188 + goto free_newinfo; 2189 + } 2190 + if (copy_from_user( 2191 + newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 2192 + ret = -EFAULT; 2193 + goto free_entries; 2194 + } 2195 + 2196 + entries_tmp = newinfo->entries; 2197 + 2198 + xt_compat_lock(NFPROTO_BRIDGE); 2199 + 2200 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2201 + if (ret < 0) 2202 + goto out_unlock; 2203 + 2204 + pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", 2205 + tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, 2206 + xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); 2207 + 2208 + size64 = ret; 2209 + newinfo->entries = vmalloc(size64); 2210 + if (!newinfo->entries) { 2211 + vfree(entries_tmp); 2212 + ret = -ENOMEM; 2213 + goto out_unlock; 2214 + } 2215 + 2216 + memset(&state, 0, sizeof(state)); 2217 + state.buf_kern_start = newinfo->entries; 2218 + state.buf_kern_len = size64; 2219 + 2220 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2221 + BUG_ON(ret < 0); /* parses same data again */ 2222 + 2223 + vfree(entries_tmp); 2224 + tmp.entries_size = size64; 2225 + 2226 + for (i = 0; i < NF_BR_NUMHOOKS; i++) { 2227 + char __user *usrptr; 2228 + if (tmp.hook_entry[i]) { 2229 + unsigned int delta; 2230 + usrptr = (char __user *) tmp.hook_entry[i]; 2231 + delta = usrptr - tmp.entries; 2232 + usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); 2233 + tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; 2234 + } 2235 + } 2236 + 2237 + xt_compat_flush_offsets(NFPROTO_BRIDGE); 2238 + xt_compat_unlock(NFPROTO_BRIDGE); 2239 + 2240 + ret = do_replace_finish(net, &tmp, newinfo); 2241 + if (ret == 0) 2242 + return ret; 2243 + free_entries: 2244 + vfree(newinfo->entries); 2245 + free_newinfo: 2246 + vfree(newinfo); 2247 + return ret; 2248 + out_unlock: 2249 + xt_compat_flush_offsets(NFPROTO_BRIDGE); 2250 + xt_compat_unlock(NFPROTO_BRIDGE); 2251 + goto free_entries; 2252 + } 2253 + 2254 + static int compat_update_counters(struct net *net, void __user *user, 2255 + unsigned int len) 2256 + { 2257 + struct compat_ebt_replace hlp; 2258 + 2259 + if (copy_from_user(&hlp, user, sizeof(hlp))) 2260 + return -EFAULT; 2261 + 2262 + /* try real handler in case userland supplied needed padding */ 2263 + if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 2264 + return update_counters(net, user, len); 2265 + 2266 + return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), 2267 + hlp.num_counters, user, len); 2268 + } 2269 + 2270 + static int compat_do_ebt_set_ctl(struct sock *sk, 2271 + int cmd, void __user *user, unsigned int len) 2272 + { 2273 + int ret; 2274 + 2275 + if (!capable(CAP_NET_ADMIN)) 2276 + return -EPERM; 2277 + 2278 + switch (cmd) { 2279 + case EBT_SO_SET_ENTRIES: 2280 + ret = compat_do_replace(sock_net(sk), user, len); 2281 + break; 2282 + case EBT_SO_SET_COUNTERS: 2283 + ret = compat_update_counters(sock_net(sk), user, len); 2284 + break; 2285 + default: 2286 + ret = -EINVAL; 2287 + } 2288 + return ret; 2289 + } 2290 + 2291 + static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, 2292 + void __user *user, int *len) 2293 + { 2294 + int ret; 2295 + struct compat_ebt_replace tmp; 2296 + struct ebt_table *t; 2297 + 2298 + if (!capable(CAP_NET_ADMIN)) 2299 + return -EPERM; 2300 + 2301 + /* try real handler in case userland supplied needed padding */ 2302 + if ((cmd == EBT_SO_GET_INFO || 2303 + cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp)) 2304 + return do_ebt_get_ctl(sk, cmd, user, len); 2305 + 2306 + if (copy_from_user(&tmp, user, sizeof(tmp))) 2307 + return -EFAULT; 2308 + 2309 + t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex); 2310 + if (!t) 2311 + return ret; 2312 + 2313 + xt_compat_lock(NFPROTO_BRIDGE); 2314 + switch (cmd) { 2315 + case EBT_SO_GET_INFO: 2316 + tmp.nentries = t->private->nentries; 2317 + ret = compat_table_info(t->private, &tmp); 2318 + if (ret) 2319 + goto out; 2320 + tmp.valid_hooks = t->valid_hooks; 2321 + 2322 + if (copy_to_user(user, &tmp, *len) != 0) { 2323 + ret = -EFAULT; 2324 + break; 2325 + } 2326 + ret = 0; 2327 + break; 2328 + case EBT_SO_GET_INIT_INFO: 2329 + tmp.nentries = t->table->nentries; 2330 + tmp.entries_size = t->table->entries_size; 2331 + tmp.valid_hooks = t->table->valid_hooks; 2332 + 2333 + if (copy_to_user(user, &tmp, *len) != 0) { 2334 + ret = -EFAULT; 2335 + break; 2336 + } 2337 + ret = 0; 2338 + break; 2339 + case EBT_SO_GET_ENTRIES: 2340 + case EBT_SO_GET_INIT_ENTRIES: 2341 + /* 2342 + * try real handler first in case of userland-side padding. 2343 + * in case we are dealing with an 'ordinary' 32 bit binary 2344 + * without 64bit compatibility padding, this will fail right 2345 + * after copy_from_user when the *len argument is validated. 2346 + * 2347 + * the compat_ variant needs to do one pass over the kernel 2348 + * data set to adjust for size differences before it the check. 2349 + */ 2350 + if (copy_everything_to_user(t, user, len, cmd) == 0) 2351 + ret = 0; 2352 + else 2353 + ret = compat_copy_everything_to_user(t, user, len, cmd); 2354 + break; 2355 + default: 2356 + ret = -EINVAL; 2357 + } 2358 + out: 2359 + xt_compat_flush_offsets(NFPROTO_BRIDGE); 2360 + xt_compat_unlock(NFPROTO_BRIDGE); 2361 + mutex_unlock(&ebt_mutex); 2362 + return ret; 2363 + } 2364 + #endif 2365 + 1539 2366 static struct nf_sockopt_ops ebt_sockopts = 1540 2367 { 1541 2368 .pf = PF_INET, 1542 2369 .set_optmin = EBT_BASE_CTL, 1543 2370 .set_optmax = EBT_SO_SET_MAX + 1, 1544 2371 .set = do_ebt_set_ctl, 2372 + #ifdef CONFIG_COMPAT 2373 + .compat_set = compat_do_ebt_set_ctl, 2374 + #endif 1545 2375 .get_optmin = EBT_BASE_CTL, 1546 2376 .get_optmax = EBT_SO_GET_MAX + 1, 1547 2377 .get = do_ebt_get_ctl, 2378 + #ifdef CONFIG_COMPAT 2379 + .compat_get = compat_do_ebt_get_ctl, 2380 + #endif 1548 2381 .owner = THIS_MODULE, 1549 2382 }; 1550 2383