···11+22+Options for the ipv6 module are supplied as parameters at load time.33+44+Module options may be given as command line arguments to the insmod55+or modprobe command, but are usually specified in either the66+/etc/modules.conf or /etc/modprobe.conf configuration file, or in a77+distro-specific configuration file.88+99+The available ipv6 module parameters are listed below. If a parameter1010+is not specified the default value is used.1111+1212+The parameters are as follows:1313+1414+disable1515+1616+ Specifies whether to load the IPv6 module, but disable all1717+ its functionality. This might be used when another module1818+ has a dependency on the IPv6 module being loaded, but no1919+ IPv6 addresses or operations are desired.2020+2121+ The possible values and their effects are:2222+2323+ 02424+ IPv6 is enabled.2525+2626+ This is the default value.2727+2828+ 12929+ IPv6 is disabled.3030+3131+ No IPv6 addresses will be added to interfaces, and3232+ it will not be possible to open an IPv6 socket.3333+3434+ A reboot is required to enable IPv6.3535+
+5-6
Documentation/scsi/cxgb3i.txt
···44============5566The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc.77-series of products) supports iSCSI acceleration and iSCSI Direct Data Placement77+series of products) support iSCSI acceleration and iSCSI Direct Data Placement88(DDP) where the hardware handles the expensive byte touching operations, such99as CRC computation and verification, and direct DMA to the final host memory1010destination:···3131 the TCP segments onto the wire. It handles TCP retransmission if3232 needed.33333434- On receving, S3 h/w recovers the iSCSI PDU by reassembling TCP3434+ On receiving, S3 h/w recovers the iSCSI PDU by reassembling TCP3535 segments, separating the header and data, calculating and verifying3636- the digests, then forwards the header to the host. The payload data,3636+ the digests, then forwarding the header to the host. The payload data,3737 if possible, will be directly placed into the pre-posted host DDP3838 buffer. Otherwise, the payload data will be sent to the host too.3939···6868 sure the ip address is unique in the network.696970703. edit /etc/iscsi/iscsid.conf7171- The default setting for MaxRecvDataSegmentLength (131072) is too big,7272- replace "node.conn[0].iscsi.MaxRecvDataSegmentLength" to be a value no7373- bigger than 15360 (for example 8192):7171+ The default setting for MaxRecvDataSegmentLength (131072) is too big;7272+ replace with a value no bigger than 15360 (for example 8192):74737574 node.conn[0].iscsi.MaxRecvDataSegmentLength = 81927675
···142142{143143 unsigned int val;144144145145+ /* Do not do the fixup on other platforms! */146146+ if (!machine_is(gef_sbc610))147147+ return;148148+145149 printk(KERN_INFO "Running NEC uPD720101 Fixup\n");146150147151 /* Ensure ports 1, 2, 3, 4 & 5 are enabled */
···2020#include <asm/pat.h>2121#include <linux/module.h>22222323-#ifdef CONFIG_X86_PAE2424-int2525-is_io_mapping_possible(resource_size_t base, unsigned long size)2323+int is_io_mapping_possible(resource_size_t base, unsigned long size)2624{2727- return 1;2828-}2929-#else3030-int3131-is_io_mapping_possible(resource_size_t base, unsigned long size)3232-{2525+#ifndef CONFIG_X86_PAE3326 /* There is no way to map greater than 1 << 32 address without PAE */3427 if (base + size > 0x100000000ULL)3528 return 0;3636-2929+#endif3730 return 1;3831}3939-#endif3232+EXPORT_SYMBOL_GPL(is_io_mapping_possible);40334134/* Map 'pfn' using fixed map 'type' and protections 'prot'4235 */
+96-53
arch/x86/mm/kmmio.c
···3232 struct list_head list;3333 struct kmmio_fault_page *release_next;3434 unsigned long page; /* location of the fault page */3535+ bool old_presence; /* page presence prior to arming */3636+ bool armed;35373638 /*3739 * Number of times this page has been registered as a part3840 * of a probe. If zero, page is disarmed and this may be freed.3939- * Used only by writers (RCU).4141+ * Used only by writers (RCU) and post_kmmio_handler().4242+ * Protected by kmmio_lock, when linked into kmmio_page_table.4043 */4144 int count;4245};···108105 return NULL;109106}110107111111-static void set_page_present(unsigned long addr, bool present,112112- unsigned int *pglevel)108108+static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)113109{114114- pteval_t pteval;115115- pmdval_t pmdval;110110+ pmdval_t v = pmd_val(*pmd);111111+ *old = !!(v & _PAGE_PRESENT);112112+ v &= ~_PAGE_PRESENT;113113+ if (present)114114+ v |= _PAGE_PRESENT;115115+ set_pmd(pmd, __pmd(v));116116+}117117+118118+static void set_pte_presence(pte_t *pte, bool present, bool *old)119119+{120120+ pteval_t v = pte_val(*pte);121121+ *old = !!(v & _PAGE_PRESENT);122122+ v &= ~_PAGE_PRESENT;123123+ if (present)124124+ v |= _PAGE_PRESENT;125125+ set_pte_atomic(pte, __pte(v));126126+}127127+128128+static int set_page_presence(unsigned long addr, bool present, bool *old)129129+{116130 unsigned int level;117117- pmd_t *pmd;118131 pte_t *pte = lookup_address(addr, &level);119132120133 if (!pte) {121134 pr_err("kmmio: no pte for page 0x%08lx\n", addr);122122- return;135135+ return -1;123136 }124124-125125- if (pglevel)126126- *pglevel = level;127137128138 switch (level) {129139 case PG_LEVEL_2M:130130- pmd = (pmd_t *)pte;131131- pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;132132- if (present)133133- pmdval |= _PAGE_PRESENT;134134- set_pmd(pmd, __pmd(pmdval));140140+ set_pmd_presence((pmd_t *)pte, present, old);135141 break;136136-137142 case PG_LEVEL_4K:138138- pteval = pte_val(*pte) & ~_PAGE_PRESENT;139139- if (present)140140- pteval |= _PAGE_PRESENT;141141- set_pte_atomic(pte, __pte(pteval));143143+ set_pte_presence(pte, present, old);142144 break;143143-144145 default:145146 pr_err("kmmio: unexpected page level 0x%x.\n", level);146146- return;147147+ return -1;147148 }148149149150 __flush_tlb_one(addr);151151+ return 0;150152}151153152152-/** Mark the given page as not present. Access to it will trigger a fault. */153153-static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)154154+/*155155+ * Mark the given page as not present. Access to it will trigger a fault.156156+ *157157+ * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the158158+ * protection is ignored here. RCU read lock is assumed held, so the struct159159+ * will not disappear unexpectedly. Furthermore, the caller must guarantee,160160+ * that double arming the same virtual address (page) cannot occur.161161+ *162162+ * Double disarming on the other hand is allowed, and may occur when a fault163163+ * and mmiotrace shutdown happen simultaneously.164164+ */165165+static int arm_kmmio_fault_page(struct kmmio_fault_page *f)154166{155155- set_page_present(page & PAGE_MASK, false, pglevel);167167+ int ret;168168+ WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");169169+ if (f->armed) {170170+ pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",171171+ f->page, f->count, f->old_presence);172172+ }173173+ ret = set_page_presence(f->page, false, &f->old_presence);174174+ WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);175175+ f->armed = true;176176+ return ret;156177}157178158158-/** Mark the given page as present. */159159-static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)179179+/** Restore the given page to saved presence state. */180180+static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)160181{161161- set_page_present(page & PAGE_MASK, true, pglevel);182182+ bool tmp;183183+ int ret = set_page_presence(f->page, f->old_presence, &tmp);184184+ WARN_ONCE(ret < 0,185185+ KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);186186+ f->armed = false;162187}163188164189/*···233202234203 ctx = &get_cpu_var(kmmio_ctx);235204 if (ctx->active) {236236- disarm_kmmio_fault_page(faultpage->page, NULL);237205 if (addr == ctx->addr) {238206 /*239239- * On SMP we sometimes get recursive probe hits on the240240- * same address. Context is already saved, fall out.207207+ * A second fault on the same page means some other208208+ * condition needs handling by do_page_fault(), the209209+ * page really not being present is the most common.241210 */242242- pr_debug("kmmio: duplicate probe hit on CPU %d, for "243243- "address 0x%08lx.\n",244244- smp_processor_id(), addr);245245- ret = 1;246246- goto no_kmmio_ctx;247247- }248248- /*249249- * Prevent overwriting already in-flight context.250250- * This should not happen, let's hope disarming at least251251- * prevents a panic.252252- */253253- pr_emerg("kmmio: recursive probe hit on CPU %d, "211211+ pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",212212+ addr, smp_processor_id());213213+214214+ if (!faultpage->old_presence)215215+ pr_info("kmmio: unexpected secondary hit for "216216+ "address 0x%08lx on CPU %d.\n", addr,217217+ smp_processor_id());218218+ } else {219219+ /*220220+ * Prevent overwriting already in-flight context.221221+ * This should not happen, let's hope disarming at222222+ * least prevents a panic.223223+ */224224+ pr_emerg("kmmio: recursive probe hit on CPU %d, "254225 "for address 0x%08lx. Ignoring.\n",255226 smp_processor_id(), addr);256256- pr_emerg("kmmio: previous hit was at 0x%08lx.\n",257257- ctx->addr);227227+ pr_emerg("kmmio: previous hit was at 0x%08lx.\n",228228+ ctx->addr);229229+ disarm_kmmio_fault_page(faultpage);230230+ }258231 goto no_kmmio_ctx;259232 }260233 ctx->active++;···279244 regs->flags &= ~X86_EFLAGS_IF;280245281246 /* Now we set present bit in PTE and single step. */282282- disarm_kmmio_fault_page(ctx->fpage->page, NULL);247247+ disarm_kmmio_fault_page(ctx->fpage);283248284249 /*285250 * If another cpu accesses the same page while we are stepping,···310275 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);311276312277 if (!ctx->active) {313313- pr_debug("kmmio: spurious debug trap on CPU %d.\n",278278+ pr_warning("kmmio: spurious debug trap on CPU %d.\n",314279 smp_processor_id());315280 goto out;316281 }···318283 if (ctx->probe && ctx->probe->post_handler)319284 ctx->probe->post_handler(ctx->probe, condition, regs);320285321321- arm_kmmio_fault_page(ctx->fpage->page, NULL);286286+ /* Prevent racing against release_kmmio_fault_page(). */287287+ spin_lock(&kmmio_lock);288288+ if (ctx->fpage->count)289289+ arm_kmmio_fault_page(ctx->fpage);290290+ spin_unlock(&kmmio_lock);322291323292 regs->flags &= ~X86_EFLAGS_TF;324293 regs->flags |= ctx->saved_flags;···354315 f = get_kmmio_fault_page(page);355316 if (f) {356317 if (!f->count)357357- arm_kmmio_fault_page(f->page, NULL);318318+ arm_kmmio_fault_page(f);358319 f->count++;359320 return 0;360321 }361322362362- f = kmalloc(sizeof(*f), GFP_ATOMIC);323323+ f = kzalloc(sizeof(*f), GFP_ATOMIC);363324 if (!f)364325 return -1;365326366327 f->count = 1;367328 f->page = page;368368- list_add_rcu(&f->list, kmmio_page_list(f->page));369329370370- arm_kmmio_fault_page(f->page, NULL);330330+ if (arm_kmmio_fault_page(f)) {331331+ kfree(f);332332+ return -1;333333+ }334334+335335+ list_add_rcu(&f->list, kmmio_page_list(f->page));371336372337 return 0;373338}···390347 f->count--;391348 BUG_ON(f->count < 0);392349 if (!f->count) {393393- disarm_kmmio_fault_page(f->page, NULL);350350+ disarm_kmmio_fault_page(f);394351 f->release_next = *release_list;395352 *release_list = f;396353 }
···484484 mutex_lock(&dev->struct_mutex);485485486486 if (file_priv->is_master) {487487+ struct drm_master *master = file_priv->master;487488 struct drm_file *temp;488489 list_for_each_entry(temp, &dev->filelist, lhead) {489490 if ((temp->master == file_priv->master) &&490491 (temp != file_priv))491492 temp->authenticated = 0;493493+ }494494+495495+ /**496496+ * Since the master is disappearing, so is the497497+ * possibility to lock.498498+ */499499+500500+ if (master->lock.hw_lock) {501501+ if (dev->sigdata.lock == master->lock.hw_lock)502502+ dev->sigdata.lock = NULL;503503+ master->lock.hw_lock = NULL;504504+ master->lock.file_priv = NULL;505505+ wake_up_interruptible_all(&master->lock.lock_queue);492506 }493507494508 if (file_priv->minor->master == file_priv->master) {
+2-1
drivers/gpu/drm/drm_lock.c
···8080 __set_current_state(TASK_INTERRUPTIBLE);8181 if (!master->lock.hw_lock) {8282 /* Device has been unregistered */8383+ send_sig(SIGTERM, current, 0);8384 ret = -EINTR;8485 break;8586 }···9493 /* Contention */9594 schedule();9695 if (signal_pending(current)) {9797- ret = -ERESTARTSYS;9696+ ret = -EINTR;9897 break;9998 }10099 }
···343343#define spitzkbd_resume NULL344344#endif345345346346-static int __init spitzkbd_probe(struct platform_device *dev)346346+static int __devinit spitzkbd_probe(struct platform_device *dev)347347{348348 struct spitzkbd *spitzkbd;349349 struct input_dev *input_dev;···444444 return err;445445}446446447447-static int spitzkbd_remove(struct platform_device *dev)447447+static int __devexit spitzkbd_remove(struct platform_device *dev)448448{449449 int i;450450 struct spitzkbd *spitzkbd = platform_get_drvdata(dev);···470470471471static struct platform_driver spitzkbd_driver = {472472 .probe = spitzkbd_probe,473473- .remove = spitzkbd_remove,473473+ .remove = __devexit_p(spitzkbd_remove),474474 .suspend = spitzkbd_suspend,475475 .resume = spitzkbd_resume,476476 .driver = {···479479 },480480};481481482482-static int __devinit spitzkbd_init(void)482482+static int __init spitzkbd_init(void)483483{484484 return platform_driver_register(&spitzkbd_driver);485485}
+1-1
drivers/input/mouse/Kconfig
···7070config MOUSE_PS2_LIFEBOOK7171 bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED7272 default y7373- depends on MOUSE_PS27373+ depends on MOUSE_PS2 && X867474 help7575 Say Y here if you have a Fujitsu B-series Lifebook PS/27676 TouchScreen connected to your system.
+24-8
drivers/input/mouse/elantech.c
···542542 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||543543 ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||544544 ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {545545- pr_err("elantech.c: sending Elantech magic knock failed.\n");545545+ pr_debug("elantech.c: sending Elantech magic knock failed.\n");546546 return -1;547547 }548548···551551 * set of magic numbers552552 */553553 if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {554554- pr_info("elantech.c: unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",555555- param[0], param[1], param[2]);554554+ pr_debug("elantech.c: "555555+ "unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",556556+ param[0], param[1], param[2]);557557+ return -1;558558+ }559559+560560+ /*561561+ * Query touchpad's firmware version and see if it reports known562562+ * value to avoid mis-detection. Logitech mice are known to respond563563+ * to Elantech magic knock and there might be more.564564+ */565565+ if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {566566+ pr_debug("elantech.c: failed to query firmware version.\n");567567+ return -1;568568+ }569569+570570+ pr_debug("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",571571+ param[0], param[1], param[2]);572572+573573+ if (param[0] == 0 || param[1] != 0) {574574+ pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");556575 return -1;557576 }558577···619600 int i, error;620601 unsigned char param[3];621602622622- etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);623623- psmouse->private = etd;603603+ psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);624604 if (!etd)625605 return -1;626606···628610 etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;629611630612 /*631631- * Find out what version hardware this is613613+ * Do the version query again so we can store the result632614 */633615 if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {634616 pr_err("elantech.c: failed to query firmware version.\n");635617 goto init_fail;636618 }637637- pr_info("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",638638- param[0], param[1], param[2]);639619 etd->fw_version_maj = param[0];640620 etd->fw_version_min = param[2];641621
+1-1
drivers/input/mouse/pxa930_trkball.c
···83838484 __raw_writel(v, trkball->mmio_base + TBCR);85858686- while (i--) {8686+ while (--i) {8787 if (__raw_readl(trkball->mmio_base + TBCR) == v)8888 break;8989 msleep(1);
+4-5
drivers/input/mouse/synaptics.c
···182182183183static int synaptics_query_hardware(struct psmouse *psmouse)184184{185185- int retries = 0;186186-187187- while ((retries++ < 3) && psmouse_reset(psmouse))188188- /* empty */;189189-190185 if (synaptics_identify(psmouse))191186 return -1;192187 if (synaptics_model_id(psmouse))···577582 struct synaptics_data *priv = psmouse->private;578583 struct synaptics_data old_priv = *priv;579584585585+ psmouse_reset(psmouse);586586+580587 if (synaptics_detect(psmouse, 0))581588 return -1;582589···636639 psmouse->private = priv = kzalloc(sizeof(struct synaptics_data), GFP_KERNEL);637640 if (!priv)638641 return -1;642642+643643+ psmouse_reset(psmouse);639644640645 if (synaptics_query_hardware(psmouse)) {641646 printk(KERN_ERR "Unable to query Synaptics hardware.\n");
···9191 controllers (default=0)");92929393static int mpt_msi_enable_sas;9494-module_param(mpt_msi_enable_sas, int, 1);9494+module_param(mpt_msi_enable_sas, int, 0);9595MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \9696- controllers (default=1)");9696+ controllers (default=0)");979798989999static int mpt_channel_mapping;
···12911291 if (host->cmd->data)12921292 DBG("Cannot wait for busy signal when also "12931293 "doing a data transfer");12941294- else12941294+ else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))12951295 return;12961296+12971297+ /* The controller does not support the end-of-busy IRQ,12981298+ * fall through and take the SDHCI_INT_RESPONSE */12961299 }1297130012981301 if (intmask & SDHCI_INT_RESPONSE)
+2
drivers/mmc/host/sdhci.h
···208208#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)209209/* Controller has an issue with buffer bits for small transfers */210210#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)211211+/* Controller does not provide transfer-complete interrupt when not busy */212212+#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)211213212214 int irq; /* Device IRQ */213215 void __iomem * ioaddr; /* Mapped address */
···560560 msleep(1);561561 }562562563563- if (reset_timeout == 0) {563563+ if (reset_timeout < 0) {564564 dev_crit(ksp->dev,565565 "Timeout waiting for DMA engines to reset\n");566566 /* And blithely carry on */
···10401040 action = ACTION_FAIL;10411041 break;10421042 case ABORTED_COMMAND:10431043+ action = ACTION_FAIL;10431044 if (sshdr.asc == 0x10) { /* DIF */10441045 description = "Target Data Integrity Failure";10451045- action = ACTION_FAIL;10461046 error = -EILSEQ;10471047- } else10481048- action = ACTION_RETRY;10471047+ }10491048 break;10501049 case NOT_READY:10511050 /* If the device is in the process of becoming
+7
drivers/scsi/sd.c
···107107static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);108108static void sd_print_result(struct scsi_disk *, int);109109110110+static DEFINE_SPINLOCK(sd_index_lock);110111static DEFINE_IDA(sd_index_ida);111112112113/* This semaphore is used to mediate the 0->1 reference get in the···19151914 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))19161915 goto out_put;1917191619171917+ spin_lock(&sd_index_lock);19181918 error = ida_get_new(&sd_index_ida, &index);19191919+ spin_unlock(&sd_index_lock);19191920 } while (error == -EAGAIN);1920192119211922 if (error)···19391936 return 0;1940193719411938 out_free_index:19391939+ spin_lock(&sd_index_lock);19421940 ida_remove(&sd_index_ida, index);19411941+ spin_unlock(&sd_index_lock);19431942 out_put:19441943 put_disk(gd);19451944 out_free:···19911986 struct scsi_disk *sdkp = to_scsi_disk(dev);19921987 struct gendisk *disk = sdkp->disk;1993198819891989+ spin_lock(&sd_index_lock);19941990 ida_remove(&sd_index_ida, sdkp->index);19911991+ spin_unlock(&sd_index_lock);1995199219961993 disk->private_data = NULL;19971994 put_disk(disk);
···6969# Do not add any filesystems before this line7070obj-$(CONFIG_REISERFS_FS) += reiserfs/7171obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext37272-obj-$(CONFIG_EXT4_FS) += ext4/ # Before ext2 so root fs can be ext47272+obj-$(CONFIG_EXT2_FS) += ext2/7373+# We place ext4 after ext2 so plain ext2 root fs's are mounted using ext27474+# unless explicitly requested by rootfstype7575+obj-$(CONFIG_EXT4_FS) += ext4/7376obj-$(CONFIG_JBD) += jbd/7477obj-$(CONFIG_JBD2) += jbd2/7575-obj-$(CONFIG_EXT2_FS) += ext2/7678obj-$(CONFIG_CRAMFS) += cramfs/7779obj-$(CONFIG_SQUASHFS) += squashfs/7880obj-y += ramfs/
+3-1
fs/ext4/balloc.c
···609609 */610610int ext4_should_retry_alloc(struct super_block *sb, int *retries)611611{612612- if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || (*retries)++ > 3)612612+ if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||613613+ (*retries)++ > 3 ||614614+ !EXT4_SB(sb)->s_journal)613615 return 0;614616615617 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
+1-1
fs/ext4/inode.c
···2544254425452545 ext4_journal_stop(handle);2546254625472547- if (mpd.retval == -ENOSPC) {25472547+ if ((mpd.retval == -ENOSPC) && sbi->s_journal) {25482548 /* commit the transaction which would25492549 * free blocks released in the transaction25502550 * and try again
···10851085extern int register_netdevice_notifier(struct notifier_block *nb);10861086extern int unregister_netdevice_notifier(struct notifier_block *nb);10871087extern int init_dummy_netdev(struct net_device *dev);10881088+extern void netdev_resync_ops(struct net_device *dev);1088108910891090extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);10901091extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
+6
include/linux/rcuclassic.h
···181181#define rcu_enter_nohz() do { } while (0)182182#define rcu_exit_nohz() do { } while (0)183183184184+/* A context switch is a grace period for rcuclassic. */185185+static inline int rcu_blocking_is_gp(void)186186+{187187+ return num_online_cpus() == 1;188188+}189189+184190#endif /* __LINUX_RCUCLASSIC_H */
+4
include/linux/rcupdate.h
···5252 void (*func)(struct rcu_head *head);5353};54545555+/* Internal to kernel, but needed by rcupreempt.h. */5656+extern int rcu_scheduler_active;5757+5558#if defined(CONFIG_CLASSIC_RCU)5659#include <linux/rcuclassic.h>5760#elif defined(CONFIG_TREE_RCU)···268265269266/* Internal to kernel */270267extern void rcu_init(void);268268+extern void rcu_scheduler_starting(void);271269extern int rcu_needs_cpu(int cpu);272270273271#endif /* __LINUX_RCUPDATE_H */
+15
include/linux/rcupreempt.h
···142142#define rcu_exit_nohz() do { } while (0)143143#endif /* CONFIG_NO_HZ */144144145145+/*146146+ * A context switch is a grace period for rcupreempt synchronize_rcu()147147+ * only during early boot, before the scheduler has been initialized.148148+ * So, how the heck do we get a context switch? Well, if the caller149149+ * invokes synchronize_rcu(), they are willing to accept a context150150+ * switch, so we simply pretend that one happened.151151+ *152152+ * After boot, there might be a blocked or preempted task in an RCU153153+ * read-side critical section, so we cannot then take the fastpath.154154+ */155155+static inline int rcu_blocking_is_gp(void)156156+{157157+ return num_online_cpus() == 1 && !rcu_scheduler_active;158158+}159159+145160#endif /* __LINUX_RCUPREEMPT_H */
+6
include/linux/rcutree.h
···326326}327327#endif /* CONFIG_NO_HZ */328328329329+/* A context switch is a grace period for rcutree. */330330+static inline int rcu_blocking_is_gp(void)331331+{332332+ return num_online_cpus() == 1;333333+}334334+329335#endif /* __LINUX_RCUTREE_H */
+4
include/linux/sched.h
···22912291extern int sched_group_set_rt_period(struct task_group *tg,22922292 long rt_period_us);22932293extern long sched_group_rt_period(struct task_group *tg);22942294+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);22942295#endif22952296#endif22972297+22982298+extern int task_can_switch_user(struct user_struct *up,22992299+ struct task_struct *tsk);2296230022972301#ifdef CONFIG_TASK_XACCT22982302static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+2-1
init/main.c
···9797extern void tc_init(void);9898#endif9999100100-enum system_states system_state;100100+enum system_states system_state __read_mostly;101101EXPORT_SYMBOL(system_state);102102103103/*···463463 * at least once to get things moving:464464 */465465 init_idle_bootup_task(current);466466+ rcu_scheduler_starting();466467 preempt_enable_no_resched();467468 schedule();468469 preempt_disable();
+2-2
kernel/rcuclassic.c
···679679void rcu_check_callbacks(int cpu, int user)680680{681681 if (user ||682682- (idle_cpu(cpu) && !in_softirq() &&683683- hardirq_count() <= (1 << HARDIRQ_SHIFT))) {682682+ (idle_cpu(cpu) && rcu_scheduler_active &&683683+ !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {684684685685 /*686686 * Get here if this CPU took its interrupt from user
+12
kernel/rcupdate.c
···4444#include <linux/cpu.h>4545#include <linux/mutex.h>4646#include <linux/module.h>4747+#include <linux/kernel_stat.h>47484849enum rcu_barrier {4950 RCU_BARRIER_STD,···5655static atomic_t rcu_barrier_cpu_count;5756static DEFINE_MUTEX(rcu_barrier_mutex);5857static struct completion rcu_barrier_completion;5858+int rcu_scheduler_active __read_mostly;59596060/*6161 * Awaken the corresponding synchronize_rcu() instance now that a···8280void synchronize_rcu(void)8381{8482 struct rcu_synchronize rcu;8383+8484+ if (rcu_blocking_is_gp())8585+ return;8686+8587 init_completion(&rcu.completion);8688 /* Will wake me after RCU finished. */8789 call_rcu(&rcu.head, wakeme_after_rcu);···181175 __rcu_init();182176}183177178178+void rcu_scheduler_starting(void)179179+{180180+ WARN_ON(num_online_cpus() != 1);181181+ WARN_ON(nr_context_switches() > 0);182182+ rcu_scheduler_active = 1;183183+}
+3
kernel/rcupreempt.c
···11811181{11821182 struct rcu_synchronize rcu;1183118311841184+ if (num_online_cpus() == 1)11851185+ return; /* blocking is gp if only one CPU! */11861186+11841187 init_completion(&rcu.completion);11851188 /* Will wake me after RCU finished. */11861189 call_rcu_sched(&rcu.head, wakeme_after_rcu);
+2-2
kernel/rcutree.c
···948948void rcu_check_callbacks(int cpu, int user)949949{950950 if (user ||951951- (idle_cpu(cpu) && !in_softirq() &&952952- hardirq_count() <= (1 << HARDIRQ_SHIFT))) {951951+ (idle_cpu(cpu) && rcu_scheduler_active &&952952+ !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {953953954954 /*955955 * Get here if this CPU took its interrupt from user
+12-3
kernel/sched.c
···223223{224224 ktime_t now;225225226226- if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)226226+ if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)227227 return;228228229229 if (hrtimer_active(&rt_b->rt_period_timer))···9224922492259225 return ret;92269226}92279227+92289228+int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)92299229+{92309230+ /* Don't accept realtime tasks when there is no way for them to run */92319231+ if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)92329232+ return 0;92339233+92349234+ return 1;92359235+}92369236+92279237#else /* !CONFIG_RT_GROUP_SCHED */92289238static int sched_rt_global_constraints(void)92299239{···93279317 struct task_struct *tsk)93289318{93299319#ifdef CONFIG_RT_GROUP_SCHED93309330- /* Don't accept realtime tasks when there is no way for them to run */93319331- if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)93209320+ if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))93329321 return -EINVAL;93339322#else93349323 /* We don't support RT-tasks being in separate groups */
+4-3
kernel/seccomp.c
···8899#include <linux/seccomp.h>1010#include <linux/sched.h>1111+#include <linux/compat.h>11121213/* #define SECCOMP_DEBUG 1 */1314#define NR_SECCOMP_MODES 1···2322 0, /* null terminated */2423};25242626-#ifdef TIF_32BIT2525+#ifdef CONFIG_COMPAT2726static int mode1_syscalls_32[] = {2827 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,2928 0, /* null terminated */···3837 switch (mode) {3938 case 1:4039 syscall = mode1_syscalls;4141-#ifdef TIF_32BIT4242- if (test_thread_flag(TIF_32BIT))4040+#ifdef CONFIG_COMPAT4141+ if (is_compat_task())4342 syscall = mode1_syscalls_32;4443#endif4544 do {
+20-11
kernel/sys.c
···559559 abort_creds(new);560560 return retval;561561}562562-562562+563563/*564564 * change the user struct in a credentials set to match the new UID565565 */···570570 new_user = alloc_uid(current_user_ns(), new->uid);571571 if (!new_user)572572 return -EAGAIN;573573+574574+ if (!task_can_switch_user(new_user, current)) {575575+ free_uid(new_user);576576+ return -EINVAL;577577+ }573578574579 if (atomic_read(&new_user->processes) >=575580 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&···636631 goto error;637632 }638633639639- retval = -EAGAIN;640640- if (new->uid != old->uid && set_user(new) < 0)641641- goto error;642642-634634+ if (new->uid != old->uid) {635635+ retval = set_user(new);636636+ if (retval < 0)637637+ goto error;638638+ }643639 if (ruid != (uid_t) -1 ||644640 (euid != (uid_t) -1 && euid != old->uid))645641 new->suid = new->euid;···686680 retval = -EPERM;687681 if (capable(CAP_SETUID)) {688682 new->suid = new->uid = uid;689689- if (uid != old->uid && set_user(new) < 0) {690690- retval = -EAGAIN;691691- goto error;683683+ if (uid != old->uid) {684684+ retval = set_user(new);685685+ if (retval < 0)686686+ goto error;692687 }693688 } else if (uid != old->uid && uid != new->suid) {694689 goto error;···741734 goto error;742735 }743736744744- retval = -EAGAIN;745737 if (ruid != (uid_t) -1) {746738 new->uid = ruid;747747- if (ruid != old->uid && set_user(new) < 0)748748- goto error;739739+ if (ruid != old->uid) {740740+ retval = set_user(new);741741+ if (retval < 0)742742+ goto error;743743+ }749744 }750745 if (euid != (uid_t) -1)751746 new->euid = euid;
+18
kernel/user.c
···362362363363#endif364364365365+#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)366366+/*367367+ * We need to check if a setuid can take place. This function should be called368368+ * before successfully completing the setuid.369369+ */370370+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)371371+{372372+373373+ return sched_rt_can_attach(up->tg, tsk);374374+375375+}376376+#else377377+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)378378+{379379+ return 1;380380+}381381+#endif382382+365383/*366384 * Locate the user_struct for the passed UID. If found, take a ref on it. The367385 * caller must undo that ref with free_uid().
···43364336}43374337EXPORT_SYMBOL(netdev_fix_features);4338433843394339+/* Some devices need to (re-)set their netdev_ops inside43404340+ * ->init() or similar. If that happens, we have to setup43414341+ * the compat pointers again.43424342+ */43434343+void netdev_resync_ops(struct net_device *dev)43444344+{43454345+#ifdef CONFIG_COMPAT_NET_DEV_OPS43464346+ const struct net_device_ops *ops = dev->netdev_ops;43474347+43484348+ dev->init = ops->ndo_init;43494349+ dev->uninit = ops->ndo_uninit;43504350+ dev->open = ops->ndo_open;43514351+ dev->change_rx_flags = ops->ndo_change_rx_flags;43524352+ dev->set_rx_mode = ops->ndo_set_rx_mode;43534353+ dev->set_multicast_list = ops->ndo_set_multicast_list;43544354+ dev->set_mac_address = ops->ndo_set_mac_address;43554355+ dev->validate_addr = ops->ndo_validate_addr;43564356+ dev->do_ioctl = ops->ndo_do_ioctl;43574357+ dev->set_config = ops->ndo_set_config;43584358+ dev->change_mtu = ops->ndo_change_mtu;43594359+ dev->neigh_setup = ops->ndo_neigh_setup;43604360+ dev->tx_timeout = ops->ndo_tx_timeout;43614361+ dev->get_stats = ops->ndo_get_stats;43624362+ dev->vlan_rx_register = ops->ndo_vlan_rx_register;43634363+ dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;43644364+ dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;43654365+#ifdef CONFIG_NET_POLL_CONTROLLER43664366+ dev->poll_controller = ops->ndo_poll_controller;43674367+#endif43684368+#endif43694369+}43704370+EXPORT_SYMBOL(netdev_resync_ops);43714371+43394372/**43404373 * register_netdevice - register a network device43414374 * @dev: device to register···44134380 * This is temporary until all network devices are converted.44144381 */44154382 if (dev->netdev_ops) {44164416- const struct net_device_ops *ops = dev->netdev_ops;44174417-44184418- dev->init = ops->ndo_init;44194419- dev->uninit = ops->ndo_uninit;44204420- dev->open = ops->ndo_open;44214421- dev->change_rx_flags = ops->ndo_change_rx_flags;44224422- dev->set_rx_mode = ops->ndo_set_rx_mode;44234423- dev->set_multicast_list = ops->ndo_set_multicast_list;44244424- dev->set_mac_address = ops->ndo_set_mac_address;44254425- dev->validate_addr = ops->ndo_validate_addr;44264426- dev->do_ioctl = ops->ndo_do_ioctl;44274427- dev->set_config = ops->ndo_set_config;44284428- dev->change_mtu = ops->ndo_change_mtu;44294429- dev->tx_timeout = ops->ndo_tx_timeout;44304430- dev->get_stats = ops->ndo_get_stats;44314431- dev->vlan_rx_register = ops->ndo_vlan_rx_register;44324432- dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;44334433- dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;44344434-#ifdef CONFIG_NET_POLL_CONTROLLER44354435- dev->poll_controller = ops->ndo_poll_controller;44364436-#endif43834383+ netdev_resync_ops(dev);44374384 } else {44384385 char drivername[64];44394386 pr_info("%s (%s): not using net_device_ops yet\n",
+3-1
net/core/net-sysfs.c
···7777 if (endp == buf)7878 goto err;79798080- rtnl_lock();8080+ if (!rtnl_trylock())8181+ return -ERESTARTSYS;8282+8183 if (dev_isalive(net)) {8284 if ((ret = (*set)(net, new)) == 0)8385 ret = len;
+17-36
net/ipv6/addrconf.c
···493493 read_unlock(&dev_base_lock);494494}495495496496-static void addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)496496+static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)497497{498498 struct net *net;499499500500 net = (struct net *)table->extra2;501501 if (p == &net->ipv6.devconf_dflt->forwarding)502502- return;502502+ return 0;503503504504- rtnl_lock();504504+ if (!rtnl_trylock())505505+ return -ERESTARTSYS;506506+505507 if (p == &net->ipv6.devconf_all->forwarding) {506508 __s32 newf = net->ipv6.devconf_all->forwarding;507509 net->ipv6.devconf_dflt->forwarding = newf;···514512515513 if (*p)516514 rt6_purge_dflt_routers(net);515515+ return 1;517516}518517#endif519518···2602259926032600 ASSERT_RTNL();2604260126052605- if ((dev->flags & IFF_LOOPBACK) && how == 1)26062606- how = 0;26072607-26082602 rt6_ifdown(net, dev);26092603 neigh_ifdown(&nd_tbl, dev);26102604···39773977 ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);3978397839793979 if (write)39803980- addrconf_fixup_forwarding(ctl, valp, val);39803980+ ret = addrconf_fixup_forwarding(ctl, valp, val);39813981 return ret;39823982}39833983···40134013 }4014401440154015 *valp = new;40164016- addrconf_fixup_forwarding(table, valp, val);40174017- return 1;40164016+ return addrconf_fixup_forwarding(table, valp, val);40184017}4019401840204019static struct addrconf_sysctl_table···4439444044404441EXPORT_SYMBOL(unregister_inet6addr_notifier);4441444244424442-static void addrconf_net_exit(struct net *net)44434443-{44444444- struct net_device *dev;44454445-44464446- rtnl_lock();44474447- /* clean dev list */44484448- for_each_netdev(net, dev) {44494449- if (__in6_dev_get(dev) == NULL)44504450- continue;44514451- addrconf_ifdown(dev, 1);44524452- }44534453- addrconf_ifdown(net->loopback_dev, 2);44544454- rtnl_unlock();44554455-}44564456-44574457-static struct pernet_operations addrconf_net_ops = {44584458- .exit = addrconf_net_exit,44594459-};44604460-44614443/*44624444 * Init / cleanup code44634445 */···44804500 if (err)44814501 goto errlo;4482450244834483- err = register_pernet_device(&addrconf_net_ops);44844484- if (err)44854485- return err;44864486-44874503 register_netdevice_notifier(&ipv6_dev_notf);4488450444894505 addrconf_verify(0);···45094533void addrconf_cleanup(void)45104534{45114535 struct inet6_ifaddr *ifa;45364536+ struct net_device *dev;45124537 int i;4513453845144539 unregister_netdevice_notifier(&ipv6_dev_notf);45154515- unregister_pernet_device(&addrconf_net_ops);45164516-45174540 unregister_pernet_subsys(&addrconf_ops);4518454145194542 rtnl_lock();45434543+45444544+ /* clean dev list */45454545+ for_each_netdev(&init_net, dev) {45464546+ if (__in6_dev_get(dev) == NULL)45474547+ continue;45484548+ addrconf_ifdown(dev, 1);45494549+ }45504550+ addrconf_ifdown(init_net.loopback_dev, 2);4520455145214552 /*45224553 * Check hash table.···4545456245464563 del_timer(&addr_chk_timer);45474564 rtnl_unlock();45484548-45494549- unregister_pernet_subsys(&addrconf_net_ops);45504565}
+16-5
net/ipv6/af_inet6.c
···7272static struct list_head inetsw6[SOCK_MAX];7373static DEFINE_SPINLOCK(inetsw6_lock);74747575+static int disable_ipv6 = 0;7676+module_param_named(disable, disable_ipv6, int, 0);7777+MODULE_PARM_DESC(disable, "Disable IPv6 such that it is non-functional");7878+7579static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)7680{7781 const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);···10051001{10061002 struct sk_buff *dummy_skb;10071003 struct list_head *r;10081008- int err;10041004+ int err = 0;1009100510101006 BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));10071007+10081008+ /* Register the socket-side information for inet6_create. */10091009+ for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)10101010+ INIT_LIST_HEAD(r);10111011+10121012+ if (disable_ipv6) {10131013+ printk(KERN_INFO10141014+ "IPv6: Loaded, but administratively disabled, "10151015+ "reboot required to enable\n");10161016+ goto out;10171017+ }1011101810121019 err = proto_register(&tcpv6_prot, 1);10131020 if (err)···10361021 if (err)10371022 goto out_unregister_udplite_proto;1038102310391039-10401040- /* Register the socket-side information for inet6_create. */10411041- for(r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)10421042- INIT_LIST_HEAD(r);1043102410441025 /* We MUST register RAW sockets before we create the ICMP6,10451026 * IGMP6, or NDISC control sockets.
+9-1
net/netlink/af_netlink.c
···10911091 return 0;10921092}1093109310941094+/**10951095+ * netlink_set_err - report error to broadcast listeners10961096+ * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()10971097+ * @pid: the PID of a process that we want to skip (if any)10981098+ * @groups: the broadcast group that will notice the error10991099+ * @code: error code, must be negative (as usual in kernelspace)11001100+ */10941101void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)10951102{10961103 struct netlink_set_err_data info;···11071100 info.exclude_sk = ssk;11081101 info.pid = pid;11091102 info.group = group;11101110- info.code = code;11031103+ /* sk->sk_err wants a positive error value */11041104+ info.code = -code;1111110511121106 read_lock(&nl_table_lock);11131107
+6-7
net/sched/act_police.c
···183183 if (R_tab == NULL)184184 goto failure;185185186186- if (!est && (ret == ACT_P_CREATED ||187187- !gen_estimator_active(&police->tcf_bstats,188188- &police->tcf_rate_est))) {189189- err = -EINVAL;190190- goto failure;191191- }192192-193186 if (parm->peakrate.rate) {194187 P_tab = qdisc_get_rtab(&parm->peakrate,195188 tb[TCA_POLICE_PEAKRATE]);···198205 &police->tcf_lock, est);199206 if (err)200207 goto failure_unlock;208208+ } else if (tb[TCA_POLICE_AVRATE] &&209209+ (ret == ACT_P_CREATED ||210210+ !gen_estimator_active(&police->tcf_bstats,211211+ &police->tcf_rate_est))) {212212+ err = -EINVAL;213213+ goto failure_unlock;201214 }202215203216 /* No failure allowed after this point */
+9-7
net/sctp/protocol.c
···692692static int sctp_ctl_sock_init(void)693693{694694 int err;695695- sa_family_t family;695695+ sa_family_t family = PF_INET;696696697697 if (sctp_get_pf_specific(PF_INET6))698698 family = PF_INET6;699699- else700700- family = PF_INET;701699702700 err = inet_ctl_sock_create(&sctp_ctl_sock, family,703701 SOCK_SEQPACKET, IPPROTO_SCTP, &init_net);702702+703703+ /* If IPv6 socket could not be created, try the IPv4 socket */704704+ if (err < 0 && family == PF_INET6)705705+ err = inet_ctl_sock_create(&sctp_ctl_sock, AF_INET,706706+ SOCK_SEQPACKET, IPPROTO_SCTP,707707+ &init_net);708708+704709 if (err < 0) {705710 printk(KERN_ERR706711 "SCTP: Failed to create the SCTP control socket.\n");···13021297out:13031298 return status;13041299err_v6_add_protocol:13051305- sctp_v6_del_protocol();13061306-err_add_protocol:13071300 sctp_v4_del_protocol();13011301+err_add_protocol:13081302 inet_ctl_sock_destroy(sctp_ctl_sock);13091303err_ctl_sock_init:13101304 sctp_v6_protosw_exit();···13141310 sctp_v4_pf_exit();13151311 sctp_v6_pf_exit();13161312 sctp_sysctl_unregister();13171317- list_del(&sctp_af_inet.list);13181313 free_pages((unsigned long)sctp_port_hashtable,13191314 get_order(sctp_port_hashsize *13201315 sizeof(struct sctp_bind_hashbucket)));···13611358 sctp_v4_pf_exit();1362135913631360 sctp_sysctl_unregister();13641364- list_del(&sctp_af_inet.list);1365136113661362 free_pages((unsigned long)sctp_assoc_hashtable,13671363 get_order(sctp_assoc_hashsize *
+33-21
net/sctp/sm_sideeffect.c
···787787 struct sctp_association *asoc,788788 struct sctp_chunk *chunk)789789{790790- struct sctp_operr_chunk *operr_chunk;791790 struct sctp_errhdr *err_hdr;791791+ struct sctp_ulpevent *ev;792792793793- operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;794794- err_hdr = &operr_chunk->err_hdr;793793+ while (chunk->chunk_end > chunk->skb->data) {794794+ err_hdr = (struct sctp_errhdr *)(chunk->skb->data);795795796796- switch (err_hdr->cause) {797797- case SCTP_ERROR_UNKNOWN_CHUNK:798798- {799799- struct sctp_chunkhdr *unk_chunk_hdr;796796+ ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,797797+ GFP_ATOMIC);798798+ if (!ev)799799+ return;800800801801- unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;802802- switch (unk_chunk_hdr->type) {803803- /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an804804- * ERROR chunk reporting that it did not recognized the ASCONF805805- * chunk type, the sender of the ASCONF MUST NOT send any806806- * further ASCONF chunks and MUST stop its T-4 timer.807807- */808808- case SCTP_CID_ASCONF:809809- asoc->peer.asconf_capable = 0;810810- sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,801801+ sctp_ulpq_tail_event(&asoc->ulpq, ev);802802+803803+ switch (err_hdr->cause) {804804+ case SCTP_ERROR_UNKNOWN_CHUNK:805805+ {806806+ sctp_chunkhdr_t *unk_chunk_hdr;807807+808808+ unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;809809+ switch (unk_chunk_hdr->type) {810810+ /* ADDIP 4.1 A9) If the peer responds to an ASCONF with811811+ * an ERROR chunk reporting that it did not recognized812812+ * the ASCONF chunk type, the sender of the ASCONF MUST813813+ * NOT send any further ASCONF chunks and MUST stop its814814+ * T-4 timer.815815+ */816816+ case SCTP_CID_ASCONF:817817+ if (asoc->peer.asconf_capable == 0)818818+ break;819819+820820+ asoc->peer.asconf_capable = 0;821821+ sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,811822 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));823823+ break;824824+ default:825825+ break;826826+ }812827 break;828828+ }813829 default:814830 break;815831 }816816- break;817817- }818818- default:819819- break;820832 }821833}822834