···1142114211431143 pids.max1144114411451145- A read-write single value file which exists on non-root cgroups. The11461146- default is "max".11451145+ A read-write single value file which exists on non-root11461146+ cgroups. The default is "max".1147114711481148- Hard limit of number of processes.11481148+ Hard limit of number of processes.1149114911501150 pids.current1151115111521152- A read-only single value file which exists on all cgroups.11521152+ A read-only single value file which exists on all cgroups.1153115311541154- The number of processes currently in the cgroup and its descendants.11541154+ The number of processes currently in the cgroup and its11551155+ descendants.1155115611561157Organisational operations are not blocked by cgroup policies, so it is11571158possible to have pids.current > pids.max. This can be done by either
···7171 For Axon it can be absent, though my current driver7272 doesn't handle phy-address yet so for now, keep7373 0x00ffffff in it.7474+ - phy-handle : Used to describe configurations where a external PHY7575+ is used. Please refer to:7676+ Documentation/devicetree/bindings/net/ethernet.txt7477 - rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec7578 operations (if absent the value is the same as7679 rx-fifo-size). For Axon, either absent or 2048.···8481 offload, phandle of the TAH device node.8582 - tah-channel : 1 cell, optional. If appropriate, channel used on the8683 TAH engine.8484+ - fixed-link : Fixed-link subnode describing a link to a non-MDIO8585+ managed entity. See8686+ Documentation/devicetree/bindings/net/fixed-link.txt8787+ for details.8888+ - mdio subnode : When the EMAC has a phy connected to its local8989+ mdio, which us supported by the kernel's network9090+ PHY library in drivers/net/phy, there must be device9191+ tree subnode with the following required properties:9292+ - #address-cells: Must be <1>.9393+ - #size-cells: Must be <0>.87948888- Example:9595+ For PHY definitions: Please refer to9696+ Documentation/devicetree/bindings/net/phy.txt and9797+ Documentation/devicetree/bindings/net/ethernet.txt9898+9999+ Examples:8910090101 EMAC0: ethernet@40000800 {91102 device_type = "network";···120103 zmii-device = <&ZMII0>;121104 zmii-channel = <0>;122105 };106106+107107+ EMAC1: ethernet@ef600c00 {108108+ device_type = "network";109109+ compatible = "ibm,emac-apm821xx", "ibm,emac4sync";110110+ interrupt-parent = <&EMAC1>;111111+ interrupts = <0 1>;112112+ #interrupt-cells = <1>;113113+ #address-cells = <0>;114114+ #size-cells = <0>;115115+ interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */116116+ 1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;117117+ reg = <0xef600c00 0x000000c4>;118118+ local-mac-address = [000000000000]; /* Filled in by U-Boot */119119+ mal-device = <&MAL0>;120120+ mal-tx-channel = <0>;121121+ mal-rx-channel = <0>;122122+ cell-index = <0>;123123+ max-frame-size = <9000>;124124+ rx-fifo-size = <16384>;125125+ tx-fifo-size = <2048>;126126+ fifo-entry-size = <10>;127127+ phy-mode = "rgmii";128128+ phy-handle = <&phy0>;129129+ phy-map = <0x00000000>;130130+ rgmii-device = <&RGMII0>;131131+ rgmii-channel = <0>;132132+ tah-device = <&TAH0>;133133+ tah-channel = <0>;134134+ has-inverted-stacr-oc;135135+ has-new-stacr-staopc;136136+137137+ mdio {138138+ #address-cells = <1>;139139+ #size-cells = <0>;140140+141141+ phy0: ethernet-phy@0 {142142+ compatible = "ethernet-phy-ieee802.3-c22";143143+ reg = <0>;144144+ };145145+ };146146+ };147147+123148124149 ii) McMAL node125150···204145 - revision : as provided by the RGMII new version register if205146 available.206147 For Axon: 0x0000012a207207-
+2-1
Documentation/networking/ip-sysctl.txt
···10061006 FALSE (router)1007100710081008forwarding - BOOLEAN10091009- Enable IP forwarding on this interface.10091009+ Enable IP forwarding on this interface. This controls whether packets10101010+ received _on_ this interface can be forwarded.1010101110111012mc_forwarding - BOOLEAN10121013 Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
···5151#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))5252#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))53535454+/* Put a PPC bit into a "normal" bit position */5555+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \5656+ ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))5757+5458#include <asm/barrier.h>55595660/* Macro for generating the ***_bits() functions */
···6565 return !(event & ~valid_mask);6666}67676868-static u64 mmcra_sdar_mode(u64 event)6868+static inline bool is_event_marked(u64 event)6969{7070- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))7171- return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;7070+ if (event & EVENT_IS_MARKED)7171+ return true;72727373- return MMCRA_SDAR_MODE_TLB;7373+ return false;7474+}7575+7676+static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)7777+{7878+ /*7979+ * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in8080+ * continous sampling mode.8181+ *8282+ * Incase of Power8:8383+ * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling8484+ * mode and will be un-changed when setting MMCRA[63] (Marked events).8585+ *8686+ * Incase of Power9:8787+ * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),8888+ * or if group already have any marked events.8989+ * Non-Marked events (for DD1):9090+ * MMCRA[SDAR_MODE] will be set to 0b019191+ * For rest9292+ * MMCRA[SDAR_MODE] will be set from event code.9393+ */9494+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {9595+ if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))9696+ *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;9797+ else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))9898+ *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;9999+ else if (cpu_has_feature(CPU_FTR_POWER9_DD1))100100+ *mmcra |= MMCRA_SDAR_MODE_TLB;101101+ } else102102+ *mmcra |= MMCRA_SDAR_MODE_TLB;74103}7510476105static u64 thresh_cmp_val(u64 value)···209180 value |= CNST_L1_QUAL_VAL(cache);210181 }211182212212- if (event & EVENT_IS_MARKED) {183183+ if (is_event_marked(event)) {213184 mask |= CNST_SAMPLE_MASK;214185 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);215186 }···305276 }306277307278 /* In continuous sampling mode, update SDAR on TLB miss */308308- mmcra |= mmcra_sdar_mode(event[i]);279279+ mmcra_sdar_mode(event[i], &mmcra);309280310281 if (event[i] & EVENT_IS_L1) {311282 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;···314285 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;315286 }316287317317- if (event[i] & EVENT_IS_MARKED) {288288+ if (is_event_marked(event[i])) {318289 mmcra |= MMCRA_SAMPLE_ENABLE;319290320291 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
···395395 struct machine_check_event *evt)396396{397397 int recovered = 0;398398- uint64_t ea = get_mce_fault_addr(evt);399398400399 if (!(regs->msr & MSR_RI)) {401400 /* If MSR_RI isn't set, we cannot recover */···403404 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {404405 /* Platform corrected itself */405406 recovered = 1;406406- } else if (ea && !is_kernel_addr(ea)) {407407+ } else if (evt->severity == MCE_SEV_FATAL) {408408+ /* Fatal machine check */409409+ pr_err("Machine check interrupt is fatal\n");410410+ recovered = 0;411411+ } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&412412+ (user_mode(regs) && !is_global_init(current))) {407413 /*408408- * Faulting address is not in kernel text. We should be fine.409409- * We need to find which process uses this address.410414 * For now, kill the task if we have received exception when411415 * in userspace.412416 *413417 * TODO: Queue up this address for hwpoisioning later.414414- */415415- if (user_mode(regs) && !is_global_init(current)) {416416- _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);417417- recovered = 1;418418- } else419419- recovered = 0;420420- } else if (user_mode(regs) && !is_global_init(current) &&421421- evt->severity == MCE_SEV_ERROR_SYNC) {422422- /*423423- * If we have received a synchronous error when in userspace424424- * kill the task.425418 */426419 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);427420 recovered = 1;
···376376 bio_list_init(&punt);377377 bio_list_init(&nopunt);378378379379- while ((bio = bio_list_pop(current->bio_list)))379379+ while ((bio = bio_list_pop(¤t->bio_list[0])))380380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);381381+ current->bio_list[0] = nopunt;381382382382- *current->bio_list = nopunt;383383+ bio_list_init(&nopunt);384384+ while ((bio = bio_list_pop(¤t->bio_list[1])))385385+ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);386386+ current->bio_list[1] = nopunt;383387384388 spin_lock(&bs->rescue_lock);385389 bio_list_merge(&bs->rescue_list, &punt);···470466 * we retry with the original gfp_flags.471467 */472468473473- if (current->bio_list && !bio_list_empty(current->bio_list))469469+ if (current->bio_list &&470470+ (!bio_list_empty(¤t->bio_list[0]) ||471471+ !bio_list_empty(¤t->bio_list[1])))474472 gfp_mask &= ~__GFP_DIRECT_RECLAIM;475473476474 p = mempool_alloc(bs->bio_pool, gfp_mask);
+18-12
block/blk-core.c
···19731973 */19741974blk_qc_t generic_make_request(struct bio *bio)19751975{19761976- struct bio_list bio_list_on_stack;19761976+ /*19771977+ * bio_list_on_stack[0] contains bios submitted by the current19781978+ * make_request_fn.19791979+ * bio_list_on_stack[1] contains bios that were submitted before19801980+ * the current make_request_fn, but that haven't been processed19811981+ * yet.19821982+ */19831983+ struct bio_list bio_list_on_stack[2];19771984 blk_qc_t ret = BLK_QC_T_NONE;1978198519791986 if (!generic_make_request_checks(bio))···19971990 * should be added at the tail19981991 */19991992 if (current->bio_list) {20002000- bio_list_add(current->bio_list, bio);19931993+ bio_list_add(¤t->bio_list[0], bio);20011994 goto out;20021995 }20031996···20162009 * bio_list, and call into ->make_request() again.20172010 */20182011 BUG_ON(bio->bi_next);20192019- bio_list_init(&bio_list_on_stack);20202020- current->bio_list = &bio_list_on_stack;20122012+ bio_list_init(&bio_list_on_stack[0]);20132013+ current->bio_list = bio_list_on_stack;20212014 do {20222015 struct request_queue *q = bdev_get_queue(bio->bi_bdev);2023201620242017 if (likely(blk_queue_enter(q, false) == 0)) {20252025- struct bio_list hold;20262018 struct bio_list lower, same;2027201920282020 /* Create a fresh bio_list for all subordinate requests */20292029- hold = bio_list_on_stack;20302030- bio_list_init(&bio_list_on_stack);20212021+ bio_list_on_stack[1] = bio_list_on_stack[0];20222022+ bio_list_init(&bio_list_on_stack[0]);20312023 ret = q->make_request_fn(q, bio);2032202420332025 blk_queue_exit(q);···20362030 */20372031 bio_list_init(&lower);20382032 bio_list_init(&same);20392039- while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)20332033+ while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)20402034 if (q == bdev_get_queue(bio->bi_bdev))20412035 bio_list_add(&same, bio);20422036 else20432037 bio_list_add(&lower, bio);20442038 /* now assemble so we handle the lowest level first */20452045- bio_list_merge(&bio_list_on_stack, &lower);20462046- bio_list_merge(&bio_list_on_stack, &same);20472047- bio_list_merge(&bio_list_on_stack, &hold);20392039+ bio_list_merge(&bio_list_on_stack[0], &lower);20402040+ bio_list_merge(&bio_list_on_stack[0], &same);20412041+ bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);20482042 } else {20492043 bio_io_error(bio);20502044 }20512051- bio = bio_list_pop(current->bio_list);20452045+ bio = bio_list_pop(&bio_list_on_stack[0]);20522046 } while (bio);20532047 current->bio_list = NULL; /* deactivate */20542048
+3
block/blk-mq-tag.c
···295295 for (i = 0; i < set->nr_hw_queues; i++) {296296 struct blk_mq_tags *tags = set->tags[i];297297298298+ if (!tags)299299+ continue;300300+298301 for (j = 0; j < tags->nr_tags; j++) {299302 if (!tags->static_rqs[j])300303 continue;
···397397 irq, err);398398 return err;399399 }400400- omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);401400402402- priv->clk = of_clk_get(pdev->dev.of_node, 0);401401+ priv->clk = devm_clk_get(&pdev->dev, NULL);403402 if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)404403 return -EPROBE_DEFER;405404 if (!IS_ERR(priv->clk)) {···407408 dev_err(&pdev->dev, "unable to enable the clk, "408409 "err = %d\n", err);409410 }411411+412412+ /*413413+ * On OMAP4, enabling the shutdown_oflo interrupt is414414+ * done in the interrupt mask register. There is no415415+ * such register on EIP76, and it's enabled by the416416+ * same bit in the control register417417+ */418418+ if (priv->pdata->regs[RNG_INTMASK_REG])419419+ omap_rng_write(priv, RNG_INTMASK_REG,420420+ RNG_SHUTDOWN_OFLO_MASK);421421+ else422422+ omap_rng_write(priv, RNG_CONTROL_REG,423423+ RNG_SHUTDOWN_OFLO_MASK);410424 }411425 return 0;412426}
+85-47
drivers/crypto/s5p-sss.c
···270270 scatterwalk_done(&walk, out, 0);271271}272272273273-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)273273+static void s5p_sg_done(struct s5p_aes_dev *dev)274274{275275 if (dev->sg_dst_cpy) {276276 dev_dbg(dev->dev,···281281 }282282 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);283283 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);284284+}284285285285- /* holding a lock outside */286286+/* Calls the completion. Cannot be called with dev->lock hold. */287287+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)288288+{286289 dev->req->base.complete(&dev->req->base, err);287290 dev->busy = false;288291}···371368}372369373370/*374374- * Returns true if new transmitting (output) data is ready and its375375- * address+length have to be written to device (by calling376376- * s5p_set_dma_outdata()). False otherwise.371371+ * Returns -ERRNO on error (mapping of new data failed).372372+ * On success returns:373373+ * - 0 if there is no more data,374374+ * - 1 if new transmitting (output) data is ready and its address+length375375+ * have to be written to device (by calling s5p_set_dma_outdata()).377376 */378378-static bool s5p_aes_tx(struct s5p_aes_dev *dev)377377+static int s5p_aes_tx(struct s5p_aes_dev *dev)379378{380380- int err = 0;381381- bool ret = false;379379+ int ret = 0;382380383381 s5p_unset_outdata(dev);384382385383 if (!sg_is_last(dev->sg_dst)) {386386- err = s5p_set_outdata(dev, sg_next(dev->sg_dst));387387- if (err)388388- s5p_aes_complete(dev, err);389389- else390390- ret = true;391391- } else {392392- s5p_aes_complete(dev, err);393393-394394- dev->busy = true;395395- tasklet_schedule(&dev->tasklet);384384+ ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));385385+ if (!ret)386386+ ret = 1;396387 }397388398389 return ret;399390}400391401392/*402402- * Returns true if new receiving (input) data is ready and its403403- * address+length have to be written to device (by calling404404- * s5p_set_dma_indata()). False otherwise.393393+ * Returns -ERRNO on error (mapping of new data failed).394394+ * On success returns:395395+ * - 0 if there is no more data,396396+ * - 1 if new receiving (input) data is ready and its address+length397397+ * have to be written to device (by calling s5p_set_dma_indata()).405398 */406406-static bool s5p_aes_rx(struct s5p_aes_dev *dev)399399+static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)407400{408408- int err;409409- bool ret = false;401401+ int ret = 0;410402411403 s5p_unset_indata(dev);412404413405 if (!sg_is_last(dev->sg_src)) {414414- err = s5p_set_indata(dev, sg_next(dev->sg_src));415415- if (err)416416- s5p_aes_complete(dev, err);417417- else418418- ret = true;406406+ ret = s5p_set_indata(dev, sg_next(dev->sg_src));407407+ if (!ret)408408+ ret = 1;419409 }420410421411 return ret;···418422{419423 struct platform_device *pdev = dev_id;420424 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);421421- bool set_dma_tx = false;422422- bool set_dma_rx = false;425425+ int err_dma_tx = 0;426426+ int err_dma_rx = 0;427427+ bool tx_end = false;423428 unsigned long flags;424429 uint32_t status;430430+ int err;425431426432 spin_lock_irqsave(&dev->lock, flags);427433434434+ /*435435+ * Handle rx or tx interrupt. If there is still data (scatterlist did not436436+ * reach end), then map next scatterlist entry.437437+ * In case of such mapping error, s5p_aes_complete() should be called.438438+ *439439+ * If there is no more data in tx scatter list, call s5p_aes_complete()440440+ * and schedule new tasklet.441441+ */428442 status = SSS_READ(dev, FCINTSTAT);429443 if (status & SSS_FCINTSTAT_BRDMAINT)430430- set_dma_rx = s5p_aes_rx(dev);431431- if (status & SSS_FCINTSTAT_BTDMAINT)432432- set_dma_tx = s5p_aes_tx(dev);444444+ err_dma_rx = s5p_aes_rx(dev);445445+446446+ if (status & SSS_FCINTSTAT_BTDMAINT) {447447+ if (sg_is_last(dev->sg_dst))448448+ tx_end = true;449449+ err_dma_tx = s5p_aes_tx(dev);450450+ }433451434452 SSS_WRITE(dev, FCINTPEND, status);435453436436- /*437437- * Writing length of DMA block (either receiving or transmitting)438438- * will start the operation immediately, so this should be done439439- * at the end (even after clearing pending interrupts to not miss the440440- * interrupt).441441- */442442- if (set_dma_tx)443443- s5p_set_dma_outdata(dev, dev->sg_dst);444444- if (set_dma_rx)445445- s5p_set_dma_indata(dev, dev->sg_src);454454+ if (err_dma_rx < 0) {455455+ err = err_dma_rx;456456+ goto error;457457+ }458458+ if (err_dma_tx < 0) {459459+ err = err_dma_tx;460460+ goto error;461461+ }446462463463+ if (tx_end) {464464+ s5p_sg_done(dev);465465+466466+ spin_unlock_irqrestore(&dev->lock, flags);467467+468468+ s5p_aes_complete(dev, 0);469469+ dev->busy = true;470470+ tasklet_schedule(&dev->tasklet);471471+ } else {472472+ /*473473+ * Writing length of DMA block (either receiving or474474+ * transmitting) will start the operation immediately, so this475475+ * should be done at the end (even after clearing pending476476+ * interrupts to not miss the interrupt).477477+ */478478+ if (err_dma_tx == 1)479479+ s5p_set_dma_outdata(dev, dev->sg_dst);480480+ if (err_dma_rx == 1)481481+ s5p_set_dma_indata(dev, dev->sg_src);482482+483483+ spin_unlock_irqrestore(&dev->lock, flags);484484+ }485485+486486+ return IRQ_HANDLED;487487+488488+error:489489+ s5p_sg_done(dev);447490 spin_unlock_irqrestore(&dev->lock, flags);491491+ s5p_aes_complete(dev, err);448492449493 return IRQ_HANDLED;450494}···633597 s5p_unset_indata(dev);634598635599indata_error:636636- s5p_aes_complete(dev, err);600600+ s5p_sg_done(dev);637601 spin_unlock_irqrestore(&dev->lock, flags);602602+ s5p_aes_complete(dev, err);638603}639604640605static void s5p_tasklet_cb(unsigned long data)···842805 dev_warn(dev, "feed control interrupt is not available.\n");843806 goto err_irq;844807 }845845- err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,846846- IRQF_SHARED, pdev->name, pdev);808808+ err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,809809+ s5p_aes_interrupt, IRQF_ONESHOT,810810+ pdev->name, pdev);847811 if (err < 0) {848812 dev_warn(dev, "feed control interrupt is not available.\n");849813 goto err_irq;
···392392 * To get all the fields, copy all archdata393393 */394394 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;395395+ dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;395396#endif /* CONFIG_PCI */396397397398#ifdef DEBUG
+15-12
drivers/md/dm.c
···989989 struct dm_offload *o = container_of(cb, struct dm_offload, cb);990990 struct bio_list list;991991 struct bio *bio;992992+ int i;992993993994 INIT_LIST_HEAD(&o->cb.list);994995995996 if (unlikely(!current->bio_list))996997 return;997998998998- list = *current->bio_list;999999- bio_list_init(current->bio_list);999999+ for (i = 0; i < 2; i++) {10001000+ list = current->bio_list[i];10011001+ bio_list_init(¤t->bio_list[i]);1000100210011001- while ((bio = bio_list_pop(&list))) {10021002- struct bio_set *bs = bio->bi_pool;10031003- if (unlikely(!bs) || bs == fs_bio_set) {10041004- bio_list_add(current->bio_list, bio);10051005- continue;10031003+ while ((bio = bio_list_pop(&list))) {10041004+ struct bio_set *bs = bio->bi_pool;10051005+ if (unlikely(!bs) || bs == fs_bio_set) {10061006+ bio_list_add(¤t->bio_list[i], bio);10071007+ continue;10081008+ }10091009+10101010+ spin_lock(&bs->rescue_lock);10111011+ bio_list_add(&bs->rescue_list, bio);10121012+ queue_work(bs->rescue_workqueue, &bs->rescue_work);10131013+ spin_unlock(&bs->rescue_lock);10061014 }10071007-10081008- spin_lock(&bs->rescue_lock);10091009- bio_list_add(&bs->rescue_list, bio);10101010- queue_work(bs->rescue_workqueue, &bs->rescue_work);10111011- spin_unlock(&bs->rescue_lock);10121015 }10131016}10141017
···1329213292 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |1329313293 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;13294132941329513295- /* VF with OLD Hypervisor or old PF do not support filtering */1329613295 if (IS_PF(bp)) {1329713296 if (chip_is_e1x)1329813297 bp->accept_any_vlan = true;1329913298 else1330013299 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;1330113301-#ifdef CONFIG_BNX2X_SRIOV1330213302- } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {1330313303- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;1330413304-#endif1330513300 }1330113301+ /* For VF we'll know whether to enable VLAN filtering after1330213302+ * getting a response to CHANNEL_TLV_ACQUIRE from PF.1330313303+ */13306133041330713305 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;1330813306 dev->features |= NETIF_F_HIGHDMA;···1373613738 if (!netif_running(bp->dev)) {1373713739 DP(BNX2X_MSG_PTP,1373813740 "PTP adjfreq called while the interface is down\n");1373913739- return -EFAULT;1374113741+ return -ENETDOWN;1374013742 }13741137431374213744 if (ppb < 0) {···1379513797{1379613798 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);13797137991380013800+ if (!netif_running(bp->dev)) {1380113801+ DP(BNX2X_MSG_PTP,1380213802+ "PTP adjtime called while the interface is down\n");1380313803+ return -ENETDOWN;1380413804+ }1380513805+1379813806 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);13799138071380013808 timecounter_adjtime(&bp->timecounter, delta);···1381213808{1381313809 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);1381413810 u64 ns;1381113811+1381213812+ if (!netif_running(bp->dev)) {1381313813+ DP(BNX2X_MSG_PTP,1381413814+ "PTP gettime called while the interface is down\n");1381513815+ return -ENETDOWN;1381613816+ }13815138171381613818 ns = timecounter_read(&bp->timecounter);1381713819···1383313823{1383413824 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);1383513825 u64 ns;1382613826+1382713827+ if (!netif_running(bp->dev)) {1382813828+ DP(BNX2X_MSG_PTP,1382913829+ "PTP settime called while the interface is down\n");1383013830+ return -ENETDOWN;1383113831+ }13836138321383713833 ns = timespec64_to_ns(ts);1383813834···1400713991 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);1400813992 if (rc)1400913993 goto init_one_freemem;1399413994+1399513995+#ifdef CONFIG_BNX2X_SRIOV1399613996+ /* VF with OLD Hypervisor or old PF do not support filtering */1399713997+ if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {1399813998+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;1399913999+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;1400014000+ }1400114001+#endif1401014002 }14011140031401214004 /* Enable SRIOV if capability found in configuration space */
+16-8
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
···434434435435 /* Add/Remove the filter */436436 rc = bnx2x_config_vlan_mac(bp, &ramrod);437437- if (rc && rc != -EEXIST) {437437+ if (rc == -EEXIST)438438+ return 0;439439+ if (rc) {438440 BNX2X_ERR("Failed to %s %s\n",439441 filter->add ? "add" : "delete",440442 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?···445443 "MAC" : "VLAN");446444 return rc;447445 }446446+447447+ filter->applied = true;448448449449 return 0;450450}···473469 /* Rollback if needed */474470 if (i != filters->count) {475471 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",476476- i, filters->count + 1);472472+ i, filters->count);477473 while (--i >= 0) {474474+ if (!filters->filters[i].applied)475475+ continue;478476 filters->filters[i].add = !filters->filters[i].add;479477 bnx2x_vf_mac_vlan_config(bp, vf, qid,480478 &filters->filters[i],···19051899 continue;19061900 }1907190119081908- DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);19021902+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),19031903+ "add addresses for vf %d\n", vf->abs_vfid);19091904 for_each_vfq(vf, j) {19101905 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);19111906···19271920 cpu_to_le32(U64_HI(q_stats_addr));19281921 cur_query_entry->address.lo =19291922 cpu_to_le32(U64_LO(q_stats_addr));19301930- DP(BNX2X_MSG_IOV,19311931- "added address %x %x for vf %d queue %d client %d\n",19321932- cur_query_entry->address.hi,19331933- cur_query_entry->address.lo, cur_query_entry->funcID,19341934- j, cur_query_entry->index);19231923+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),19241924+ "added address %x %x for vf %d queue %d client %d\n",19251925+ cur_query_entry->address.hi,19261926+ cur_query_entry->address.lo,19271927+ cur_query_entry->funcID,19281928+ j, cur_query_entry->index);19351929 cur_query_entry++;19361930 cur_data_offset += sizeof(struct per_queue_stats);19371931 stats_count++;
···474474 return;475475476476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;477477- if (BNXT_PF(bp))477477+ if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))478478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST;479479 else480480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
+147-59
drivers/net/ethernet/broadcom/genet/bcmgenet.c
···11/*22 * Broadcom GENET (Gigabit Ethernet) controller driver33 *44- * Copyright (c) 2014 Broadcom Corporation44+ * Copyright (c) 2014-2017 Broadcom55 *66 * This program is free software; you can redistribute it and/or modify77 * it under the terms of the GNU General Public License version 2 as···450450 genet_dma_ring_regs[r]);451451}452452453453+static int bcmgenet_begin(struct net_device *dev)454454+{455455+ struct bcmgenet_priv *priv = netdev_priv(dev);456456+457457+ /* Turn on the clock */458458+ return clk_prepare_enable(priv->clk);459459+}460460+461461+static void bcmgenet_complete(struct net_device *dev)462462+{463463+ struct bcmgenet_priv *priv = netdev_priv(dev);464464+465465+ /* Turn off the clock */466466+ clk_disable_unprepare(priv->clk);467467+}468468+453469static int bcmgenet_get_link_ksettings(struct net_device *dev,454470 struct ethtool_link_ksettings *cmd)455471{···794778 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),795779 /* Misc UniMAC counters */796780 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,797797- UMAC_RBUF_OVFL_CNT),798798- STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),781781+ UMAC_RBUF_OVFL_CNT_V1),782782+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,783783+ UMAC_RBUF_ERR_CNT_V1),799784 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),800785 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),801786 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),···838821 }839822}840823824824+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)825825+{826826+ u16 new_offset;827827+ u32 val;828828+829829+ switch (offset) {830830+ case UMAC_RBUF_OVFL_CNT_V1:831831+ if (GENET_IS_V2(priv))832832+ new_offset = RBUF_OVFL_CNT_V2;833833+ else834834+ new_offset = RBUF_OVFL_CNT_V3PLUS;835835+836836+ val = bcmgenet_rbuf_readl(priv, new_offset);837837+ /* clear if overflowed */838838+ if (val == ~0)839839+ bcmgenet_rbuf_writel(priv, 0, new_offset);840840+ break;841841+ case UMAC_RBUF_ERR_CNT_V1:842842+ if (GENET_IS_V2(priv))843843+ new_offset = RBUF_ERR_CNT_V2;844844+ else845845+ new_offset = RBUF_ERR_CNT_V3PLUS;846846+847847+ val = bcmgenet_rbuf_readl(priv, new_offset);848848+ /* clear if overflowed */849849+ if (val == ~0)850850+ bcmgenet_rbuf_writel(priv, 0, new_offset);851851+ break;852852+ default:853853+ val = bcmgenet_umac_readl(priv, offset);854854+ /* clear if overflowed */855855+ if (val == ~0)856856+ bcmgenet_umac_writel(priv, 0, offset);857857+ break;858858+ }859859+860860+ return val;861861+}862862+841863static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)842864{843865 int i, j = 0;···892836 case BCMGENET_STAT_NETDEV:893837 case BCMGENET_STAT_SOFT:894838 continue;895895- case BCMGENET_STAT_MIB_RX:896896- case BCMGENET_STAT_MIB_TX:897839 case BCMGENET_STAT_RUNT:898898- if (s->type != BCMGENET_STAT_MIB_RX)899899- offset = BCMGENET_STAT_OFFSET;840840+ offset += BCMGENET_STAT_OFFSET;841841+ /* fall through */842842+ case BCMGENET_STAT_MIB_TX:843843+ offset += BCMGENET_STAT_OFFSET;844844+ /* fall through */845845+ case BCMGENET_STAT_MIB_RX:900846 val = bcmgenet_umac_readl(priv,901847 UMAC_MIB_START + j + offset);848848+ offset = 0; /* Reset Offset */902849 break;903850 case BCMGENET_STAT_MISC:904904- val = bcmgenet_umac_readl(priv, s->reg_offset);905905- /* clear if overflowed */906906- if (val == ~0)907907- bcmgenet_umac_writel(priv, 0, s->reg_offset);851851+ if (GENET_IS_V1(priv)) {852852+ val = bcmgenet_umac_readl(priv, s->reg_offset);853853+ /* clear if overflowed */854854+ if (val == ~0)855855+ bcmgenet_umac_writel(priv, 0,856856+ s->reg_offset);857857+ } else {858858+ val = bcmgenet_update_stat_misc(priv,859859+ s->reg_offset);860860+ }908861 break;909862 }910863···10389731039974/* standard ethtool support functions. */1040975static const struct ethtool_ops bcmgenet_ethtool_ops = {976976+ .begin = bcmgenet_begin,977977+ .complete = bcmgenet_complete,1041978 .get_strings = bcmgenet_get_strings,1042979 .get_sset_count = bcmgenet_get_sset_count,1043980 .get_ethtool_stats = bcmgenet_get_ethtool_stats,···12341167 struct bcmgenet_priv *priv = netdev_priv(dev);12351168 struct device *kdev = &priv->pdev->dev;12361169 struct enet_cb *tx_cb_ptr;12371237- struct netdev_queue *txq;12381170 unsigned int pkts_compl = 0;12391171 unsigned int bytes_compl = 0;12401172 unsigned int c_index;···12851219 dev->stats.tx_packets += pkts_compl;12861220 dev->stats.tx_bytes += bytes_compl;1287122112881288- txq = netdev_get_tx_queue(dev, ring->queue);12891289- netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);12901290-12911291- if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {12921292- if (netif_tx_queue_stopped(txq))12931293- netif_tx_wake_queue(txq);12941294- }12221222+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),12231223+ pkts_compl, bytes_compl);1295122412961225 return pkts_compl;12971226}···13091248 struct bcmgenet_tx_ring *ring =13101249 container_of(napi, struct bcmgenet_tx_ring, napi);13111250 unsigned int work_done = 0;12511251+ struct netdev_queue *txq;12521252+ unsigned long flags;1312125313131313- work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);12541254+ spin_lock_irqsave(&ring->lock, flags);12551255+ work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);12561256+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {12571257+ txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);12581258+ netif_tx_wake_queue(txq);12591259+ }12601260+ spin_unlock_irqrestore(&ring->lock, flags);1314126113151262 if (work_done == 0) {13161263 napi_complete(napi);···25262457/* Interrupt bottom half */25272458static void bcmgenet_irq_task(struct work_struct *work)25282459{24602460+ unsigned long flags;24612461+ unsigned int status;25292462 struct bcmgenet_priv *priv = container_of(25302463 work, struct bcmgenet_priv, bcmgenet_irq_work);2531246425322465 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);2533246625342534- if (priv->irq0_stat & UMAC_IRQ_MPD_R) {25352535- priv->irq0_stat &= ~UMAC_IRQ_MPD_R;24672467+ spin_lock_irqsave(&priv->lock, flags);24682468+ status = priv->irq0_stat;24692469+ priv->irq0_stat = 0;24702470+ spin_unlock_irqrestore(&priv->lock, flags);24712471+24722472+ if (status & UMAC_IRQ_MPD_R) {25362473 netif_dbg(priv, wol, priv->dev,25372474 "magic packet detected, waking up\n");25382475 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);25392476 }2540247725412478 /* Link UP/DOWN event */25422542- if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {24792479+ if (status & UMAC_IRQ_LINK_EVENT)25432480 phy_mac_interrupt(priv->phydev,25442544- !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));25452545- priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;25462546- }24812481+ !!(status & UMAC_IRQ_LINK_UP));25472482}2548248325492484/* bcmgenet_isr1: handle Rx and Tx priority queues */···25562483 struct bcmgenet_priv *priv = dev_id;25572484 struct bcmgenet_rx_ring *rx_ring;25582485 struct bcmgenet_tx_ring *tx_ring;25592559- unsigned int index;24862486+ unsigned int index, status;2560248725612561- /* Save irq status for bottom-half processing. */25622562- priv->irq1_stat =25632563- bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &24882488+ /* Read irq status */24892489+ status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &25642490 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);2565249125662492 /* clear interrupts */25672567- bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);24932493+ bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);2568249425692495 netif_dbg(priv, intr, priv->dev,25702570- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);24962496+ "%s: IRQ=0x%x\n", __func__, status);2571249725722498 /* Check Rx priority queue interrupts */25732499 for (index = 0; index < priv->hw_params->rx_queues; index++) {25742574- if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))25002500+ if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))25752501 continue;2576250225772503 rx_ring = &priv->rx_rings[index];···2583251125842512 /* Check Tx priority queue interrupts */25852513 for (index = 0; index < priv->hw_params->tx_queues; index++) {25862586- if (!(priv->irq1_stat & BIT(index)))25142514+ if (!(status & BIT(index)))25872515 continue;2588251625892517 tx_ring = &priv->tx_rings[index];···26032531 struct bcmgenet_priv *priv = dev_id;26042532 struct bcmgenet_rx_ring *rx_ring;26052533 struct bcmgenet_tx_ring *tx_ring;25342534+ unsigned int status;25352535+ unsigned long flags;2606253626072607- /* Save irq status for bottom-half processing. */26082608- priv->irq0_stat =26092609- bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &25372537+ /* Read irq status */25382538+ status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &26102539 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);2611254026122541 /* clear interrupts */26132613- bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);25422542+ bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);2614254326152544 netif_dbg(priv, intr, priv->dev,26162616- "IRQ=0x%x\n", priv->irq0_stat);25452545+ "IRQ=0x%x\n", status);2617254626182618- if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {25472547+ if (status & UMAC_IRQ_RXDMA_DONE) {26192548 rx_ring = &priv->rx_rings[DESC_INDEX];2620254926212550 if (likely(napi_schedule_prep(&rx_ring->napi))) {···26252552 }26262553 }2627255426282628- if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {25552555+ if (status & UMAC_IRQ_TXDMA_DONE) {26292556 tx_ring = &priv->tx_rings[DESC_INDEX];2630255726312558 if (likely(napi_schedule_prep(&tx_ring->napi))) {···26342561 }26352562 }2636256326372637- if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |26382638- UMAC_IRQ_PHY_DET_F |26392639- UMAC_IRQ_LINK_EVENT |26402640- UMAC_IRQ_HFB_SM |26412641- UMAC_IRQ_HFB_MM |26422642- UMAC_IRQ_MPD_R)) {26432643- /* all other interested interrupts handled in bottom half */26442644- schedule_work(&priv->bcmgenet_irq_work);25642564+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&25652565+ status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {25662566+ wake_up(&priv->wq);26452567 }2646256826472647- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&26482648- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {26492649- priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);26502650- wake_up(&priv->wq);25692569+ /* all other interested interrupts handled in bottom half */25702570+ status &= (UMAC_IRQ_LINK_EVENT |25712571+ UMAC_IRQ_MPD_R);25722572+ if (status) {25732573+ /* Save irq status for bottom-half processing. */25742574+ spin_lock_irqsave(&priv->lock, flags);25752575+ priv->irq0_stat |= status;25762576+ spin_unlock_irqrestore(&priv->lock, flags);25772577+25782578+ schedule_work(&priv->bcmgenet_irq_work);26512579 }2652258026532581 return IRQ_HANDLED;···28752801err_fini_dma:28762802 bcmgenet_fini_dma(priv);28772803err_clk_disable:28042804+ if (priv->internal_phy)28052805+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);28782806 clk_disable_unprepare(priv->clk);28792807 return ret;28802808}···32533177 */32543178 gphy_rev = reg & 0xffff;3255317931803180+ /* This is reserved so should require special treatment */31813181+ if (gphy_rev == 0 || gphy_rev == 0x01ff) {31823182+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);31833183+ return;31843184+ }31853185+32563186 /* This is the good old scheme, just GPHY major, no minor nor patch */32573187 if ((gphy_rev & 0xf0) != 0)32583188 priv->gphy_rev = gphy_rev << 8;···32663184 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */32673185 else if ((gphy_rev & 0xff00) != 0)32683186 priv->gphy_rev = gphy_rev;32693269-32703270- /* This is reserved so should require special treatment */32713271- else if (gphy_rev == 0 || gphy_rev == 0x01ff) {32723272- pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);32733273- return;32743274- }3275318732763188#ifdef CONFIG_PHYS_ADDR_T_64BIT32773189 if (!(params->flags & GENET_HAS_40BITS))···33093233 const void *macaddr;33103234 struct resource *r;33113235 int err = -EIO;32363236+ const char *phy_mode_str;3312323733133238 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */33143239 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,···33523275 err = PTR_ERR(priv->base);33533276 goto err;33543277 }32783278+32793279+ spin_lock_init(&priv->lock);3355328033563281 SET_NETDEV_DEV(dev, &pdev->dev);33573282 dev_set_drvdata(&pdev->dev, dev);···34163337 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");34173338 priv->clk_eee = NULL;34183339 }33403340+33413341+ /* If this is an internal GPHY, power it on now, before UniMAC is33423342+ * brought out of reset as absolutely no UniMAC activity is allowed33433343+ */33443344+ if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&33453345+ !strcasecmp(phy_mode_str, "internal"))33463346+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);3419334734203348 err = reset_umac(priv);34213349 if (err)···35883502 return 0;3589350335903504out_clk_disable:35053505+ if (priv->internal_phy)35063506+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);35913507 clk_disable_unprepare(priv->clk);35923508 return ret;35933509}
+11-5
drivers/net/ethernet/broadcom/genet/bcmgenet.h
···11/*22- * Copyright (c) 2014 Broadcom Corporation22+ * Copyright (c) 2014-2017 Broadcom33 *44 * This program is free software; you can redistribute it and/or modify55 * it under the terms of the GNU General Public License version 2 as···214214#define MDIO_REG_SHIFT 16215215#define MDIO_REG_MASK 0x1F216216217217-#define UMAC_RBUF_OVFL_CNT 0x61C217217+#define UMAC_RBUF_OVFL_CNT_V1 0x61C218218+#define RBUF_OVFL_CNT_V2 0x80219219+#define RBUF_OVFL_CNT_V3PLUS 0x94218220219221#define UMAC_MPD_CTRL 0x620220222#define MPD_EN (1 << 0)···226224227225#define UMAC_MPD_PW_MS 0x624228226#define UMAC_MPD_PW_LS 0x628229229-#define UMAC_RBUF_ERR_CNT 0x634227227+#define UMAC_RBUF_ERR_CNT_V1 0x634228228+#define RBUF_ERR_CNT_V2 0x84229229+#define RBUF_ERR_CNT_V3PLUS 0x98230230#define UMAC_MDF_ERR_CNT 0x638231231#define UMAC_MDF_CTRL 0x650232232#define UMAC_MDF_ADDR 0x654···623619 struct work_struct bcmgenet_irq_work;624620 int irq0;625621 int irq1;626626- unsigned int irq0_stat;627627- unsigned int irq1_stat;628622 int wol_irq;629623 bool wol_irq_disabled;624624+625625+ /* shared status */626626+ spinlock_t lock;627627+ unsigned int irq0_stat;630628631629 /* HW descriptors/checksum variables */632630 bool desc_64b_en;
+55-55
drivers/net/ethernet/cavium/liquidio/lio_main.c
···152152 */153153 struct octeon_sg_entry *sg;154154155155- u64 sg_dma_ptr;155155+ dma_addr_t sg_dma_ptr;156156};157157158158struct handshake {···734734 struct octnic_gather *g;735735 int i;736736737737+ kfree(lio->glist_lock);738738+ lio->glist_lock = NULL;739739+737740 if (!lio->glist)738741 return;739742···744741 do {745742 g = (struct octnic_gather *)746743 list_delete_head(&lio->glist[i]);747747- if (g) {748748- if (g->sg) {749749- dma_unmap_single(&lio->oct_dev->750750- pci_dev->dev,751751- g->sg_dma_ptr,752752- g->sg_size,753753- DMA_TO_DEVICE);754754- kfree((void *)((unsigned long)g->sg -755755- g->adjust));756756- }744744+ if (g)757745 kfree(g);758758- }759746 } while (g);747747+748748+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {749749+ lio_dma_free(lio->oct_dev,750750+ lio->glist_entry_size * lio->tx_qsize,751751+ lio->glists_virt_base[i],752752+ lio->glists_dma_base[i]);753753+ }760754 }761755762762- kfree((void *)lio->glist);763763- kfree((void *)lio->glist_lock);756756+ kfree(lio->glists_virt_base);757757+ lio->glists_virt_base = NULL;758758+759759+ kfree(lio->glists_dma_base);760760+ lio->glists_dma_base = NULL;761761+762762+ kfree(lio->glist);763763+ lio->glist = NULL;764764}765765766766/**···778772 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),779773 GFP_KERNEL);780774 if (!lio->glist_lock)781781- return 1;775775+ return -ENOMEM;782776783777 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),784778 GFP_KERNEL);785779 if (!lio->glist) {786786- kfree((void *)lio->glist_lock);787787- return 1;780780+ kfree(lio->glist_lock);781781+ lio->glist_lock = NULL;782782+ return -ENOMEM;783783+ }784784+785785+ lio->glist_entry_size =786786+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);787787+788788+ /* allocate memory to store virtual and dma base address of789789+ * per glist consistent memory790790+ */791791+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),792792+ GFP_KERNEL);793793+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),794794+ GFP_KERNEL);795795+796796+ if (!lio->glists_virt_base || !lio->glists_dma_base) {797797+ delete_glists(lio);798798+ return -ENOMEM;788799 }789800790801 for (i = 0; i < num_iqs; i++) {···811788812789 INIT_LIST_HEAD(&lio->glist[i]);813790791791+ lio->glists_virt_base[i] =792792+ lio_dma_alloc(oct,793793+ lio->glist_entry_size * lio->tx_qsize,794794+ &lio->glists_dma_base[i]);795795+796796+ if (!lio->glists_virt_base[i]) {797797+ delete_glists(lio);798798+ return -ENOMEM;799799+ }800800+814801 for (j = 0; j < lio->tx_qsize; j++) {815802 g = kzalloc_node(sizeof(*g), GFP_KERNEL,816803 numa_node);···829796 if (!g)830797 break;831798832832- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *833833- OCT_SG_ENTRY_SIZE);799799+ g->sg = lio->glists_virt_base[i] +800800+ (j * lio->glist_entry_size);834801835835- g->sg = kmalloc_node(g->sg_size + 8,836836- GFP_KERNEL, numa_node);837837- if (!g->sg)838838- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);839839- if (!g->sg) {840840- kfree(g);841841- break;842842- }843843-844844- /* The gather component should be aligned on 64-bit845845- * boundary846846- */847847- if (((unsigned long)g->sg) & 7) {848848- g->adjust = 8 - (((unsigned long)g->sg) & 7);849849- g->sg = (struct octeon_sg_entry *)850850- ((unsigned long)g->sg + g->adjust);851851- }852852- g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,853853- g->sg, g->sg_size,854854- DMA_TO_DEVICE);855855- if (dma_mapping_error(&oct->pci_dev->dev,856856- g->sg_dma_ptr)) {857857- kfree((void *)((unsigned long)g->sg -858858- g->adjust));859859- kfree(g);860860- break;861861- }802802+ g->sg_dma_ptr = lio->glists_dma_base[i] +803803+ (j * lio->glist_entry_size);862804863805 list_add_tail(&g->list, &lio->glist[i]);864806 }865807866808 if (j != lio->tx_qsize) {867809 delete_glists(lio);868868- return 1;810810+ return -ENOMEM;869811 }870812 }871813···18931885 i++;18941886 }1895188718961896- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,18971897- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);18981898-18991888 iq = skb_iq(lio, skb);19001889 spin_lock(&lio->glist_lock[iq]);19011890 list_add_tail(&g->list, &lio->glist[iq]);···19371932 frag->size, DMA_TO_DEVICE);19381933 i++;19391934 }19401940-19411941- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,19421942- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);1943193519441936 iq = skb_iq(lio, skb);19451937···32753273 i++;32763274 }3277327532783278- dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,32793279- g->sg_size, DMA_TO_DEVICE);32803276 dptr = g->sg_dma_ptr;3281327732823278 if (OCTEON_CN23XX_PF(oct))
···325325 size_t desc_ring_dma;326326327327 /** Info ptr list are allocated at this virtual address. */328328- size_t info_base_addr;328328+ void *info_base_addr;329329330330 /** DMA mapped address of the info list */331331- size_t info_list_dma;331331+ dma_addr_t info_list_dma;332332333333 /** Allocated size of info list. */334334 u32 info_alloc_size;
···13521352 if (err)13531353 goto clean_load;1354135413551355+ pci_save_state(pdev);13551356 return 0;1356135713571358clean_load:···1408140714091408 mlx5_enter_error_state(dev);14101409 mlx5_unload_one(dev, priv, false);14111411- /* In case of kernel call save the pci state and drain the health wq */14101410+ /* In case of kernel call drain the health wq */14121411 if (state) {14131413- pci_save_state(pdev);14141412 mlx5_drain_health_wq(dev);14151413 mlx5_pci_disable_device(dev);14161414 }···1461146114621462 pci_set_master(pdev);14631463 pci_restore_state(pdev);14641464+ pci_save_state(pdev);1464146514651466 if (wait_vital(pdev)) {14661467 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+2-2
drivers/net/ethernet/mellanox/mlxsw/reg.h
···769769#define MLXSW_REG_SPVM_ID 0x200F770770#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */771771#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */772772-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256772772+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255773773#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \774774 MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)775775···17021702#define MLXSW_REG_SPVMLR_ID 0x202017031703#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */17041704#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */17051705-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 25617051705+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 25517061706#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \17071707 MLXSW_REG_SPVMLR_REC_LEN * \17081708 MLXSW_REG_SPVMLR_REC_MAX_COUNT)
···822822/* Net device open. */823823static int tun_net_open(struct net_device *dev)824824{825825+ struct tun_struct *tun = netdev_priv(dev);826826+ int i;827827+825828 netif_tx_start_all_queues(dev);829829+830830+ for (i = 0; i < tun->numqueues; i++) {831831+ struct tun_file *tfile;832832+833833+ tfile = rtnl_dereference(tun->tfiles[i]);834834+ tfile->socket.sk->sk_write_space(tfile->socket.sk);835835+ }836836+826837 return 0;827838}828839···11141103 if (!skb_array_empty(&tfile->tx_array))11151104 mask |= POLLIN | POLLRDNORM;1116110511171117- if (sock_writeable(sk) ||11181118- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&11191119- sock_writeable(sk)))11061106+ if (tun->dev->flags & IFF_UP &&11071107+ (sock_writeable(sk) ||11081108+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&11091109+ sock_writeable(sk))))11201110 mask |= POLLOUT | POLLWRNORM;1121111111221112 if (tun->dev->reg_state != NETREG_REGISTERED)···25822570 int ret = 0;2583257125842572 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);25852585- pr_info("%s\n", DRV_COPYRIGHT);2586257325872574 ret = rtnl_link_register(&tun_link_ops);25882575 if (ret) {
+2-1
drivers/net/vrf.c
···340340341341static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)342342{343343+ int len = skb->len;343344 netdev_tx_t ret = is_ip_tx_frame(skb, dev);344345345346 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {···348347349348 u64_stats_update_begin(&dstats->syncp);350349 dstats->tx_pkts++;351351- dstats->tx_bytes += skb->len;350350+ dstats->tx_bytes += len;352351 u64_stats_update_end(&dstats->syncp);353352 } else {354353 this_cpu_inc(dev->dstats->tx_drps);
+40-33
drivers/net/vxlan.c
···29762976 return 0;29772977}2978297829792979+static int __vxlan_dev_create(struct net *net, struct net_device *dev,29802980+ struct vxlan_config *conf)29812981+{29822982+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);29832983+ struct vxlan_dev *vxlan = netdev_priv(dev);29842984+ int err;29852985+29862986+ err = vxlan_dev_configure(net, dev, conf, false);29872987+ if (err)29882988+ return err;29892989+29902990+ dev->ethtool_ops = &vxlan_ethtool_ops;29912991+29922992+ /* create an fdb entry for a valid default destination */29932993+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {29942994+ err = vxlan_fdb_create(vxlan, all_zeros_mac,29952995+ &vxlan->default_dst.remote_ip,29962996+ NUD_REACHABLE | NUD_PERMANENT,29972997+ NLM_F_EXCL | NLM_F_CREATE,29982998+ vxlan->cfg.dst_port,29992999+ vxlan->default_dst.remote_vni,30003000+ vxlan->default_dst.remote_vni,30013001+ vxlan->default_dst.remote_ifindex,30023002+ NTF_SELF);30033003+ if (err)30043004+ return err;30053005+ }30063006+30073007+ err = register_netdevice(dev);30083008+ if (err) {30093009+ vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);30103010+ return err;30113011+ }30123012+30133013+ list_add(&vxlan->next, &vn->vxlan_list);30143014+ return 0;30153015+}30163016+29793017static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],29803018 struct net_device *dev, struct vxlan_config *conf,29813019 bool changelink)···32103172static int vxlan_newlink(struct net *src_net, struct net_device *dev,32113173 struct nlattr *tb[], struct nlattr *data[])32123174{32133213- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);32143214- struct vxlan_dev *vxlan = netdev_priv(dev);32153175 struct vxlan_config conf;32163176 int err;32173177···32173181 if (err)32183182 return err;3219318332203220- err = vxlan_dev_configure(src_net, dev, &conf, false);32213221- if (err)32223222- return err;32233223-32243224- dev->ethtool_ops = &vxlan_ethtool_ops;32253225-32263226- /* create an fdb entry for a valid default destination */32273227- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {32283228- err = vxlan_fdb_create(vxlan, all_zeros_mac,32293229- &vxlan->default_dst.remote_ip,32303230- NUD_REACHABLE | NUD_PERMANENT,32313231- NLM_F_EXCL | NLM_F_CREATE,32323232- vxlan->cfg.dst_port,32333233- vxlan->default_dst.remote_vni,32343234- vxlan->default_dst.remote_vni,32353235- vxlan->default_dst.remote_ifindex,32363236- NTF_SELF);32373237- if (err)32383238- return err;32393239- }32403240-32413241- err = register_netdevice(dev);32423242- if (err) {32433243- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);32443244- return err;32453245- }32463246-32473247- list_add(&vxlan->next, &vn->vxlan_list);32483248-32493249- return 0;31843184+ return __vxlan_dev_create(src_net, dev, &conf);32503185}3251318632523187static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],···34473440 if (IS_ERR(dev))34483441 return dev;3449344234503450- err = vxlan_dev_configure(net, dev, conf, false);34433443+ err = __vxlan_dev_create(net, dev, conf);34513444 if (err < 0) {34523445 free_netdev(dev);34533446 return ERR_PTR(err);
+2-2
drivers/net/wan/fsl_ucc_hdlc.c
···381381 /* set bd status and length */382382 bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;383383384384- iowrite16be(bd_status, &bd->status);385384 iowrite16be(skb->len, &bd->length);385385+ iowrite16be(bd_status, &bd->status);386386387387 /* Move to next BD in the ring */388388 if (!(bd_status & T_W_S))···457457 struct sk_buff *skb;458458 hdlc_device *hdlc = dev_to_hdlc(dev);459459 struct qe_bd *bd;460460- u32 bd_status;460460+ u16 bd_status;461461 u16 length, howmany = 0;462462 u8 *bdbuffer;463463 int i;
+3
drivers/net/wimax/i2400m/usb.c
···467467 struct i2400mu *i2400mu;468468 struct usb_device *usb_dev = interface_to_usbdev(iface);469469470470+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)471471+ return -ENODEV;472472+470473 if (usb_dev->speed != USB_SPEED_HIGH)471474 dev_err(dev, "device not connected as high speed\n");472475
+17-9
drivers/net/xen-netback/interface.c
···165165{166166 struct xenvif *vif = netdev_priv(dev);167167 struct xenvif_queue *queue = NULL;168168- unsigned int num_queues = vif->num_queues;168168+ unsigned int num_queues;169169 u16 index;170170 struct xenvif_rx_cb *cb;171171172172 BUG_ON(skb->dev != dev);173173174174- /* Drop the packet if queues are not set up */174174+ /* Drop the packet if queues are not set up.175175+ * This handler should be called inside an RCU read section176176+ * so we don't need to enter it here explicitly.177177+ */178178+ num_queues = READ_ONCE(vif->num_queues);175179 if (num_queues < 1)176180 goto drop;177181···226222{227223 struct xenvif *vif = netdev_priv(dev);228224 struct xenvif_queue *queue = NULL;225225+ unsigned int num_queues;229226 u64 rx_bytes = 0;230227 u64 rx_packets = 0;231228 u64 tx_bytes = 0;232229 u64 tx_packets = 0;233230 unsigned int index;234231235235- spin_lock(&vif->lock);236236- if (vif->queues == NULL)237237- goto out;232232+ rcu_read_lock();233233+ num_queues = READ_ONCE(vif->num_queues);238234239235 /* Aggregate tx and rx stats from each queue */240240- for (index = 0; index < vif->num_queues; ++index) {236236+ for (index = 0; index < num_queues; ++index) {241237 queue = &vif->queues[index];242238 rx_bytes += queue->stats.rx_bytes;243239 rx_packets += queue->stats.rx_packets;···245241 tx_packets += queue->stats.tx_packets;246242 }247243248248-out:249249- spin_unlock(&vif->lock);244244+ rcu_read_unlock();250245251246 vif->dev->stats.rx_bytes = rx_bytes;252247 vif->dev->stats.rx_packets = rx_packets;···381378 struct ethtool_stats *stats, u64 * data)382379{383380 struct xenvif *vif = netdev_priv(dev);384384- unsigned int num_queues = vif->num_queues;381381+ unsigned int num_queues;385382 int i;386383 unsigned int queue_index;384384+385385+ rcu_read_lock();386386+ num_queues = READ_ONCE(vif->num_queues);387387388388 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {389389 unsigned long accum = 0;···396390 }397391 data[i] = accum;398392 }393393+394394+ rcu_read_unlock();399395}400396401397static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+1-1
drivers/net/xen-netback/netback.c
···214214 netdev_err(vif->dev, "fatal error; disabling device\n");215215 vif->disabled = true;216216 /* Disable the vif from queue 0's kthread */217217- if (vif->queues)217217+ if (vif->num_queues)218218 xenvif_kick_thread(&vif->queues[0]);219219}220220
+10-10
drivers/net/xen-netback/xenbus.c
···495495 struct xenvif *vif = be->vif;496496497497 if (vif) {498498+ unsigned int num_queues = vif->num_queues;498499 unsigned int queue_index;499499- struct xenvif_queue *queues;500500501501 xen_unregister_watchers(vif);502502#ifdef CONFIG_DEBUG_FS503503 xenvif_debugfs_delif(vif);504504#endif /* CONFIG_DEBUG_FS */505505 xenvif_disconnect_data(vif);506506- for (queue_index = 0;507507- queue_index < vif->num_queues;508508- ++queue_index)506506+507507+ /* At this point some of the handlers may still be active508508+ * so we need to have additional synchronization here.509509+ */510510+ vif->num_queues = 0;511511+ synchronize_net();512512+513513+ for (queue_index = 0; queue_index < num_queues; ++queue_index)509514 xenvif_deinit_queue(&vif->queues[queue_index]);510515511511- spin_lock(&vif->lock);512512- queues = vif->queues;513513- vif->num_queues = 0;516516+ vfree(vif->queues);514517 vif->queues = NULL;515515- spin_unlock(&vif->lock);516516-517517- vfree(queues);518518519519 xenvif_disconnect_ctrl(vif);520520 }
···78787979#define FUJITSU_LCD_N_LEVELS 880808181-#define ACPI_FUJITSU_CLASS "fujitsu"8282-#define ACPI_FUJITSU_HID "FUJ02B1"8383-#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"8484-#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1"8585-#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3"8686-#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"8787-#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"8181+#define ACPI_FUJITSU_CLASS "fujitsu"8282+#define ACPI_FUJITSU_BL_HID "FUJ02B1"8383+#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"8484+#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"8585+#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"8686+#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"8787+#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"88888989#define ACPI_FUJITSU_NOTIFY_CODE1 0x8090909191/* FUNC interface - command values */9292-#define FUNC_RFKILL 0x10009292+#define FUNC_FLAGS 0x10009393#define FUNC_LEDS 0x10019494#define FUNC_BUTTONS 0x10029595#define FUNC_BACKLIGHT 0x100496969797/* FUNC interface - responses */9898#define UNSUPPORTED_CMD 0x800000009999+100100+/* FUNC interface - status flags */101101+#define FLAG_RFKILL 0x020102102+#define FLAG_LID 0x100103103+#define FLAG_DOCK 0x20099104100105#if IS_ENABLED(CONFIG_LEDS_CLASS)101106/* FUNC interface - LED control */···141136#endif142137143138/* Device controlling the backlight and associated keys */144144-struct fujitsu_t {139139+struct fujitsu_bl {145140 acpi_handle acpi_handle;146141 struct acpi_device *dev;147142 struct input_dev *input;···155150 unsigned int brightness_level;156151};157152158158-static struct fujitsu_t *fujitsu;153153+static struct fujitsu_bl *fujitsu_bl;159154static int use_alt_lcd_levels = -1;160155static int disable_brightness_adjust = -1;161156162162-/* Device used to access other hotkeys on the laptop */163163-struct fujitsu_hotkey_t {157157+/* Device used to access hotkeys and other features on the laptop */158158+struct fujitsu_laptop {164159 acpi_handle acpi_handle;165160 struct acpi_device *dev;166161 struct input_dev *input;···168163 struct platform_device *pf_device;169164 struct kfifo fifo;170165 spinlock_t fifo_lock;171171- int rfkill_supported;172172- int rfkill_state;166166+ int flags_supported;167167+ int flags_state;173168 int logolamp_registered;174169 int kblamps_registered;175170 int radio_led_registered;176171 int eco_led_registered;177172};178173179179-static struct fujitsu_hotkey_t *fujitsu_hotkey;180180-181181-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);174174+static struct fujitsu_laptop *fujitsu_laptop;182175183176#if IS_ENABLED(CONFIG_LEDS_CLASS)184177static enum led_brightness logolamp_get(struct led_classdev *cdev);···225222static u32 dbg_level = 0x03;226223#endif227224228228-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);229229-230225/* Fujitsu ACPI interface function */231226232227static int call_fext_func(int cmd, int arg0, int arg1, int arg2)···240239 unsigned long long value;241240 acpi_handle handle = NULL;242241243243- status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);242242+ status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);244243 if (ACPI_FAILURE(status)) {245244 vdbg_printk(FUJLAPTOP_DBG_ERROR,246245 "FUNC interface is not present\n");···301300 enum led_brightness brightness)302301{303302 if (brightness >= LED_FULL)304304- return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);303303+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);305304 else306306- return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);305305+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);307306}308307309308static int eco_led_set(struct led_classdev *cdev,···347346{348347 enum led_brightness brightness = LED_OFF;349348350350- if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)349349+ if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)351350 brightness = LED_FULL;352351353352 return brightness;···374373 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",375374 level);376375377377- if (level < 0 || level >= fujitsu->max_brightness)376376+ if (level < 0 || level >= fujitsu_bl->max_brightness)378377 return -EINVAL;379378380380- status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);379379+ status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);381380 if (ACPI_FAILURE(status)) {382381 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");383382 return -ENODEV;···399398 vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",400399 level);401400402402- if (level < 0 || level >= fujitsu->max_brightness)401401+ if (level < 0 || level >= fujitsu_bl->max_brightness)403402 return -EINVAL;404403405405- status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);404404+ status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);406405 if (ACPI_FAILURE(status)) {407406 vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");408407 return -ENODEV;···422421423422 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");424423425425- status =426426- acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);424424+ status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,425425+ &state);427426 if (ACPI_FAILURE(status))428427 return 0;429428430430- fujitsu->brightness_level = state & 0x0fffffff;429429+ fujitsu_bl->brightness_level = state & 0x0fffffff;431430432431 if (state & 0x80000000)433433- fujitsu->brightness_changed = 1;432432+ fujitsu_bl->brightness_changed = 1;434433 else435435- fujitsu->brightness_changed = 0;434434+ fujitsu_bl->brightness_changed = 0;436435437437- return fujitsu->brightness_level;436436+ return fujitsu_bl->brightness_level;438437}439438440439static int get_max_brightness(void)···444443445444 vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");446445447447- status =448448- acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);446446+ status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,447447+ &state);449448 if (ACPI_FAILURE(status))450449 return -1;451450452452- fujitsu->max_brightness = state;451451+ fujitsu_bl->max_brightness = state;453452454454- return fujitsu->max_brightness;453453+ return fujitsu_bl->max_brightness;455454}456455457456/* Backlight device stuff */···484483 return ret;485484}486485487487-static const struct backlight_ops fujitsubl_ops = {486486+static const struct backlight_ops fujitsu_bl_ops = {488487 .get_brightness = bl_get_brightness,489488 .update_status = bl_update_status,490489};···512511513512 int ret;514513515515- ret = fujitsu->brightness_changed;514514+ ret = fujitsu_bl->brightness_changed;516515 if (ret < 0)517516 return ret;518517···540539 int level, ret;541540542541 if (sscanf(buf, "%i", &level) != 1543543- || (level < 0 || level >= fujitsu->max_brightness))542542+ || (level < 0 || level >= fujitsu_bl->max_brightness))544543 return -EINVAL;545544546545 if (use_alt_lcd_levels)···568567show_lid_state(struct device *dev,569568 struct device_attribute *attr, char *buf)570569{571571- if (!(fujitsu_hotkey->rfkill_supported & 0x100))570570+ if (!(fujitsu_laptop->flags_supported & FLAG_LID))572571 return sprintf(buf, "unknown\n");573573- if (fujitsu_hotkey->rfkill_state & 0x100)572572+ if (fujitsu_laptop->flags_state & FLAG_LID)574573 return sprintf(buf, "open\n");575574 else576575 return sprintf(buf, "closed\n");···580579show_dock_state(struct device *dev,581580 struct device_attribute *attr, char *buf)582581{583583- if (!(fujitsu_hotkey->rfkill_supported & 0x200))582582+ if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))584583 return sprintf(buf, "unknown\n");585585- if (fujitsu_hotkey->rfkill_state & 0x200)584584+ if (fujitsu_laptop->flags_state & FLAG_DOCK)586585 return sprintf(buf, "docked\n");587586 else588587 return sprintf(buf, "undocked\n");···592591show_radios_state(struct device *dev,593592 struct device_attribute *attr, char *buf)594593{595595- if (!(fujitsu_hotkey->rfkill_supported & 0x20))594594+ if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))596595 return sprintf(buf, "unknown\n");597597- if (fujitsu_hotkey->rfkill_state & 0x20)596596+ if (fujitsu_laptop->flags_state & FLAG_RFKILL)598597 return sprintf(buf, "on\n");599598 else600599 return sprintf(buf, "killed\n");···608607static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);609608static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);610609611611-static struct attribute *fujitsupf_attributes[] = {610610+static struct attribute *fujitsu_pf_attributes[] = {612611 &dev_attr_brightness_changed.attr,613612 &dev_attr_max_brightness.attr,614613 &dev_attr_lcd_level.attr,···618617 NULL619618};620619621621-static struct attribute_group fujitsupf_attribute_group = {622622- .attrs = fujitsupf_attributes620620+static struct attribute_group fujitsu_pf_attribute_group = {621621+ .attrs = fujitsu_pf_attributes623622};624623625625-static struct platform_driver fujitsupf_driver = {624624+static struct platform_driver fujitsu_pf_driver = {626625 .driver = {627626 .name = "fujitsu-laptop",628627 }···631630static void __init dmi_check_cb_common(const struct dmi_system_id *id)632631{633632 pr_info("Identified laptop model '%s'\n", id->ident);634634- if (use_alt_lcd_levels == -1) {635635- if (acpi_has_method(NULL,636636- "\\_SB.PCI0.LPCB.FJEX.SBL2"))637637- use_alt_lcd_levels = 1;638638- else639639- use_alt_lcd_levels = 0;640640- vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "641641- "%i\n", use_alt_lcd_levels);642642- }643633}644634645635static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)646636{647637 dmi_check_cb_common(id);648648- fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */649649- fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */638638+ fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */639639+ fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */650640 return 1;651641}652642653643static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)654644{655645 dmi_check_cb_common(id);656656- fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */657657- fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */646646+ fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */647647+ fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */658648 return 1;659649}660650661651static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)662652{663653 dmi_check_cb_common(id);664664- fujitsu->keycode1 = KEY_HELP; /* "Support" */665665- fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */666666- fujitsu->keycode4 = KEY_WWW; /* "Internet" */654654+ fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */655655+ fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */656656+ fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */667657 return 1;668658}669659···685693686694/* ACPI device for LCD brightness control */687695688688-static int acpi_fujitsu_add(struct acpi_device *device)696696+static int acpi_fujitsu_bl_add(struct acpi_device *device)689697{690698 int state = 0;691699 struct input_dev *input;···694702 if (!device)695703 return -EINVAL;696704697697- fujitsu->acpi_handle = device->handle;698698- sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);705705+ fujitsu_bl->acpi_handle = device->handle;706706+ sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);699707 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);700700- device->driver_data = fujitsu;708708+ device->driver_data = fujitsu_bl;701709702702- fujitsu->input = input = input_allocate_device();710710+ fujitsu_bl->input = input = input_allocate_device();703711 if (!input) {704712 error = -ENOMEM;705713 goto err_stop;706714 }707715708708- snprintf(fujitsu->phys, sizeof(fujitsu->phys),716716+ snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),709717 "%s/video/input0", acpi_device_hid(device));710718711719 input->name = acpi_device_name(device);712712- input->phys = fujitsu->phys;720720+ input->phys = fujitsu_bl->phys;713721 input->id.bustype = BUS_HOST;714722 input->id.product = 0x06;715723 input->dev.parent = &device->dev;···722730 if (error)723731 goto err_free_input_dev;724732725725- error = acpi_bus_update_power(fujitsu->acpi_handle, &state);733733+ error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);726734 if (error) {727735 pr_err("Error reading power state\n");728736 goto err_unregister_input_dev;···732740 acpi_device_name(device), acpi_device_bid(device),733741 !device->power.state ? "on" : "off");734742735735- fujitsu->dev = device;743743+ fujitsu_bl->dev = device;736744737745 if (acpi_has_method(device->handle, METHOD_NAME__INI)) {738746 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");···740748 (acpi_evaluate_object741749 (device->handle, METHOD_NAME__INI, NULL, NULL)))742750 pr_err("_INI Method failed\n");751751+ }752752+753753+ if (use_alt_lcd_levels == -1) {754754+ if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))755755+ use_alt_lcd_levels = 1;756756+ else757757+ use_alt_lcd_levels = 0;758758+ vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",759759+ use_alt_lcd_levels);743760 }744761745762 /* do config (detect defaults) */···759758 use_alt_lcd_levels, disable_brightness_adjust);760759761760 if (get_max_brightness() <= 0)762762- fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;761761+ fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;763762 get_lcd_level();764763765764 return 0;···773772 return error;774773}775774776776-static int acpi_fujitsu_remove(struct acpi_device *device)775775+static int acpi_fujitsu_bl_remove(struct acpi_device *device)777776{778778- struct fujitsu_t *fujitsu = acpi_driver_data(device);779779- struct input_dev *input = fujitsu->input;777777+ struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);778778+ struct input_dev *input = fujitsu_bl->input;780779781780 input_unregister_device(input);782781783783- fujitsu->acpi_handle = NULL;782782+ fujitsu_bl->acpi_handle = NULL;784783785784 return 0;786785}787786788787/* Brightness notify */789788790790-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)789789+static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)791790{792791 struct input_dev *input;793792 int keycode;794793 int oldb, newb;795794796796- input = fujitsu->input;795795+ input = fujitsu_bl->input;797796798797 switch (event) {799798 case ACPI_FUJITSU_NOTIFY_CODE1:800799 keycode = 0;801801- oldb = fujitsu->brightness_level;800800+ oldb = fujitsu_bl->brightness_level;802801 get_lcd_level();803803- newb = fujitsu->brightness_level;802802+ newb = fujitsu_bl->brightness_level;804803805804 vdbg_printk(FUJLAPTOP_DBG_TRACE,806805 "brightness button event [%i -> %i (%i)]\n",807807- oldb, newb, fujitsu->brightness_changed);806806+ oldb, newb, fujitsu_bl->brightness_changed);808807809808 if (oldb < newb) {810809 if (disable_brightness_adjust != 1) {···841840842841/* ACPI device for hotkey handling */843842844844-static int acpi_fujitsu_hotkey_add(struct acpi_device *device)843843+static int acpi_fujitsu_laptop_add(struct acpi_device *device)845844{846845 int result = 0;847846 int state = 0;···852851 if (!device)853852 return -EINVAL;854853855855- fujitsu_hotkey->acpi_handle = device->handle;854854+ fujitsu_laptop->acpi_handle = device->handle;856855 sprintf(acpi_device_name(device), "%s",857857- ACPI_FUJITSU_HOTKEY_DEVICE_NAME);856856+ ACPI_FUJITSU_LAPTOP_DEVICE_NAME);858857 sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);859859- device->driver_data = fujitsu_hotkey;858858+ device->driver_data = fujitsu_laptop;860859861860 /* kfifo */862862- spin_lock_init(&fujitsu_hotkey->fifo_lock);863863- error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),861861+ spin_lock_init(&fujitsu_laptop->fifo_lock);862862+ error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),864863 GFP_KERNEL);865864 if (error) {866865 pr_err("kfifo_alloc failed\n");867866 goto err_stop;868867 }869868870870- fujitsu_hotkey->input = input = input_allocate_device();869869+ fujitsu_laptop->input = input = input_allocate_device();871870 if (!input) {872871 error = -ENOMEM;873872 goto err_free_fifo;874873 }875874876876- snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),875875+ snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),877876 "%s/video/input0", acpi_device_hid(device));878877879878 input->name = acpi_device_name(device);880880- input->phys = fujitsu_hotkey->phys;879879+ input->phys = fujitsu_laptop->phys;881880 input->id.bustype = BUS_HOST;882881 input->id.product = 0x06;883882 input->dev.parent = &device->dev;884883885884 set_bit(EV_KEY, input->evbit);886886- set_bit(fujitsu->keycode1, input->keybit);887887- set_bit(fujitsu->keycode2, input->keybit);888888- set_bit(fujitsu->keycode3, input->keybit);889889- set_bit(fujitsu->keycode4, input->keybit);890890- set_bit(fujitsu->keycode5, input->keybit);885885+ set_bit(fujitsu_bl->keycode1, input->keybit);886886+ set_bit(fujitsu_bl->keycode2, input->keybit);887887+ set_bit(fujitsu_bl->keycode3, input->keybit);888888+ set_bit(fujitsu_bl->keycode4, input->keybit);889889+ set_bit(fujitsu_bl->keycode5, input->keybit);891890 set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);892891 set_bit(KEY_UNKNOWN, input->keybit);893892···895894 if (error)896895 goto err_free_input_dev;897896898898- error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);897897+ error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);899898 if (error) {900899 pr_err("Error reading power state\n");901900 goto err_unregister_input_dev;···905904 acpi_device_name(device), acpi_device_bid(device),906905 !device->power.state ? "on" : "off");907906908908- fujitsu_hotkey->dev = device;907907+ fujitsu_laptop->dev = device;909908910909 if (acpi_has_method(device->handle, METHOD_NAME__INI)) {911910 vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");···921920 ; /* No action, result is discarded */922921 vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);923922924924- fujitsu_hotkey->rfkill_supported =925925- call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0);923923+ fujitsu_laptop->flags_supported =924924+ call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);926925927926 /* Make sure our bitmask of supported functions is cleared if the928927 RFKILL function block is not implemented, like on the S7020. */929929- if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD)930930- fujitsu_hotkey->rfkill_supported = 0;928928+ if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)929929+ fujitsu_laptop->flags_supported = 0;931930932932- if (fujitsu_hotkey->rfkill_supported)933933- fujitsu_hotkey->rfkill_state =934934- call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);931931+ if (fujitsu_laptop->flags_supported)932932+ fujitsu_laptop->flags_state =933933+ call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);935934936935 /* Suspect this is a keymap of the application panel, print it */937936 pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));938937939938#if IS_ENABLED(CONFIG_LEDS_CLASS)940939 if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {941941- result = led_classdev_register(&fujitsu->pf_device->dev,940940+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,942941 &logolamp_led);943942 if (result == 0) {944944- fujitsu_hotkey->logolamp_registered = 1;943943+ fujitsu_laptop->logolamp_registered = 1;945944 } else {946945 pr_err("Could not register LED handler for logo lamp, error %i\n",947946 result);···950949951950 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&952951 (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {953953- result = led_classdev_register(&fujitsu->pf_device->dev,952952+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,954953 &kblamps_led);955954 if (result == 0) {956956- fujitsu_hotkey->kblamps_registered = 1;955955+ fujitsu_laptop->kblamps_registered = 1;957956 } else {958957 pr_err("Could not register LED handler for keyboard lamps, error %i\n",959958 result);···967966 * that an RF LED is present.968967 */969968 if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {970970- result = led_classdev_register(&fujitsu->pf_device->dev,969969+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,971970 &radio_led);972971 if (result == 0) {973973- fujitsu_hotkey->radio_led_registered = 1;972972+ fujitsu_laptop->radio_led_registered = 1;974973 } else {975974 pr_err("Could not register LED handler for radio LED, error %i\n",976975 result);···984983 */985984 if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&986985 (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {987987- result = led_classdev_register(&fujitsu->pf_device->dev,986986+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,988987 &eco_led);989988 if (result == 0) {990990- fujitsu_hotkey->eco_led_registered = 1;989989+ fujitsu_laptop->eco_led_registered = 1;991990 } else {992991 pr_err("Could not register LED handler for eco LED, error %i\n",993992 result);···10031002err_free_input_dev:10041003 input_free_device(input);10051004err_free_fifo:10061006- kfifo_free(&fujitsu_hotkey->fifo);10051005+ kfifo_free(&fujitsu_laptop->fifo);10071006err_stop:10081007 return error;10091008}1010100910111011-static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)10101010+static int acpi_fujitsu_laptop_remove(struct acpi_device *device)10121011{10131013- struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);10141014- struct input_dev *input = fujitsu_hotkey->input;10121012+ struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);10131013+ struct input_dev *input = fujitsu_laptop->input;1015101410161015#if IS_ENABLED(CONFIG_LEDS_CLASS)10171017- if (fujitsu_hotkey->logolamp_registered)10161016+ if (fujitsu_laptop->logolamp_registered)10181017 led_classdev_unregister(&logolamp_led);1019101810201020- if (fujitsu_hotkey->kblamps_registered)10191019+ if (fujitsu_laptop->kblamps_registered)10211020 led_classdev_unregister(&kblamps_led);1022102110231023- if (fujitsu_hotkey->radio_led_registered)10221022+ if (fujitsu_laptop->radio_led_registered)10241023 led_classdev_unregister(&radio_led);1025102410261026- if (fujitsu_hotkey->eco_led_registered)10251025+ if (fujitsu_laptop->eco_led_registered)10271026 led_classdev_unregister(&eco_led);10281027#endif1029102810301029 input_unregister_device(input);1031103010321032- kfifo_free(&fujitsu_hotkey->fifo);10311031+ kfifo_free(&fujitsu_laptop->fifo);1033103210341034- fujitsu_hotkey->acpi_handle = NULL;10331033+ fujitsu_laptop->acpi_handle = NULL;1035103410361035 return 0;10371036}1038103710391039-static void acpi_fujitsu_hotkey_press(int keycode)10381038+static void acpi_fujitsu_laptop_press(int keycode)10401039{10411041- struct input_dev *input = fujitsu_hotkey->input;10401040+ struct input_dev *input = fujitsu_laptop->input;10421041 int status;1043104210441044- status = kfifo_in_locked(&fujitsu_hotkey->fifo,10431043+ status = kfifo_in_locked(&fujitsu_laptop->fifo,10451044 (unsigned char *)&keycode, sizeof(keycode),10461046- &fujitsu_hotkey->fifo_lock);10451045+ &fujitsu_laptop->fifo_lock);10471046 if (status != sizeof(keycode)) {10481047 vdbg_printk(FUJLAPTOP_DBG_WARN,10491048 "Could not push keycode [0x%x]\n", keycode);···10551054 "Push keycode into ringbuffer [%d]\n", keycode);10561055}1057105610581058-static void acpi_fujitsu_hotkey_release(void)10571057+static void acpi_fujitsu_laptop_release(void)10591058{10601060- struct input_dev *input = fujitsu_hotkey->input;10591059+ struct input_dev *input = fujitsu_laptop->input;10611060 int keycode, status;1062106110631062 while (true) {10641064- status = kfifo_out_locked(&fujitsu_hotkey->fifo,10631063+ status = kfifo_out_locked(&fujitsu_laptop->fifo,10651064 (unsigned char *)&keycode,10661065 sizeof(keycode),10671067- &fujitsu_hotkey->fifo_lock);10661066+ &fujitsu_laptop->fifo_lock);10681067 if (status != sizeof(keycode))10691068 return;10701069 input_report_key(input, keycode, 0);···10741073 }10751074}1076107510771077-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)10761076+static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)10781077{10791078 struct input_dev *input;10801079 int keycode;10811080 unsigned int irb = 1;10821081 int i;1083108210841084- input = fujitsu_hotkey->input;10831083+ input = fujitsu_laptop->input;1085108410861085 if (event != ACPI_FUJITSU_NOTIFY_CODE1) {10871086 keycode = KEY_UNKNOWN;···10941093 return;10951094 }1096109510971097- if (fujitsu_hotkey->rfkill_supported)10981098- fujitsu_hotkey->rfkill_state =10991099- call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);10961096+ if (fujitsu_laptop->flags_supported)10971097+ fujitsu_laptop->flags_state =10981098+ call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);1100109911011100 i = 0;11021101 while ((irb =···11041103 && (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {11051104 switch (irb & 0x4ff) {11061105 case KEY1_CODE:11071107- keycode = fujitsu->keycode1;11061106+ keycode = fujitsu_bl->keycode1;11081107 break;11091108 case KEY2_CODE:11101110- keycode = fujitsu->keycode2;11091109+ keycode = fujitsu_bl->keycode2;11111110 break;11121111 case KEY3_CODE:11131113- keycode = fujitsu->keycode3;11121112+ keycode = fujitsu_bl->keycode3;11141113 break;11151114 case KEY4_CODE:11161116- keycode = fujitsu->keycode4;11151115+ keycode = fujitsu_bl->keycode4;11171116 break;11181117 case KEY5_CODE:11191119- keycode = fujitsu->keycode5;11181118+ keycode = fujitsu_bl->keycode5;11201119 break;11211120 case 0:11221121 keycode = 0;···11291128 }1130112911311130 if (keycode > 0)11321132- acpi_fujitsu_hotkey_press(keycode);11311131+ acpi_fujitsu_laptop_press(keycode);11331132 else if (keycode == 0)11341134- acpi_fujitsu_hotkey_release();11331133+ acpi_fujitsu_laptop_release();11351134 }1136113511371136 /* On some models (first seen on the Skylake-based Lifebook11381137 * E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is11391139- * handled in software; its state is queried using FUNC_RFKILL11381138+ * handled in software; its state is queried using FUNC_FLAGS11401139 */11411141- if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&11421142- (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {11401140+ if ((fujitsu_laptop->flags_supported & BIT(26)) &&11411141+ (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {11431142 keycode = KEY_TOUCHPAD_TOGGLE;11441143 input_report_key(input, keycode, 1);11451144 input_sync(input);···1151115011521151/* Initialization */1153115211541154-static const struct acpi_device_id fujitsu_device_ids[] = {11551155- {ACPI_FUJITSU_HID, 0},11531153+static const struct acpi_device_id fujitsu_bl_device_ids[] = {11541154+ {ACPI_FUJITSU_BL_HID, 0},11561155 {"", 0},11571156};1158115711591159-static struct acpi_driver acpi_fujitsu_driver = {11601160- .name = ACPI_FUJITSU_DRIVER_NAME,11581158+static struct acpi_driver acpi_fujitsu_bl_driver = {11591159+ .name = ACPI_FUJITSU_BL_DRIVER_NAME,11611160 .class = ACPI_FUJITSU_CLASS,11621162- .ids = fujitsu_device_ids,11611161+ .ids = fujitsu_bl_device_ids,11631162 .ops = {11641164- .add = acpi_fujitsu_add,11651165- .remove = acpi_fujitsu_remove,11661166- .notify = acpi_fujitsu_notify,11631163+ .add = acpi_fujitsu_bl_add,11641164+ .remove = acpi_fujitsu_bl_remove,11651165+ .notify = acpi_fujitsu_bl_notify,11671166 },11681167};1169116811701170-static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {11711171- {ACPI_FUJITSU_HOTKEY_HID, 0},11691169+static const struct acpi_device_id fujitsu_laptop_device_ids[] = {11701170+ {ACPI_FUJITSU_LAPTOP_HID, 0},11721171 {"", 0},11731172};1174117311751175-static struct acpi_driver acpi_fujitsu_hotkey_driver = {11761176- .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,11741174+static struct acpi_driver acpi_fujitsu_laptop_driver = {11751175+ .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,11771176 .class = ACPI_FUJITSU_CLASS,11781178- .ids = fujitsu_hotkey_device_ids,11771177+ .ids = fujitsu_laptop_device_ids,11791178 .ops = {11801180- .add = acpi_fujitsu_hotkey_add,11811181- .remove = acpi_fujitsu_hotkey_remove,11821182- .notify = acpi_fujitsu_hotkey_notify,11791179+ .add = acpi_fujitsu_laptop_add,11801180+ .remove = acpi_fujitsu_laptop_remove,11811181+ .notify = acpi_fujitsu_laptop_notify,11831182 },11841183};1185118411861185static const struct acpi_device_id fujitsu_ids[] __used = {11871187- {ACPI_FUJITSU_HID, 0},11881188- {ACPI_FUJITSU_HOTKEY_HID, 0},11861186+ {ACPI_FUJITSU_BL_HID, 0},11871187+ {ACPI_FUJITSU_LAPTOP_HID, 0},11891188 {"", 0}11901189};11911190MODULE_DEVICE_TABLE(acpi, fujitsu_ids);1192119111931192static int __init fujitsu_init(void)11941193{11951195- int ret, result, max_brightness;11941194+ int ret, max_brightness;1196119511971196 if (acpi_disabled)11981197 return -ENODEV;1199119812001200- fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL);12011201- if (!fujitsu)11991199+ fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);12001200+ if (!fujitsu_bl)12021201 return -ENOMEM;12031203- fujitsu->keycode1 = KEY_PROG1;12041204- fujitsu->keycode2 = KEY_PROG2;12051205- fujitsu->keycode3 = KEY_PROG3;12061206- fujitsu->keycode4 = KEY_PROG4;12071207- fujitsu->keycode5 = KEY_RFKILL;12021202+ fujitsu_bl->keycode1 = KEY_PROG1;12031203+ fujitsu_bl->keycode2 = KEY_PROG2;12041204+ fujitsu_bl->keycode3 = KEY_PROG3;12051205+ fujitsu_bl->keycode4 = KEY_PROG4;12061206+ fujitsu_bl->keycode5 = KEY_RFKILL;12081207 dmi_check_system(fujitsu_dmi_table);1209120812101210- result = acpi_bus_register_driver(&acpi_fujitsu_driver);12111211- if (result < 0) {12121212- ret = -ENODEV;12091209+ ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);12101210+ if (ret)12131211 goto fail_acpi;12141214- }1215121212161213 /* Register platform stuff */1217121412181218- fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);12191219- if (!fujitsu->pf_device) {12151215+ fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);12161216+ if (!fujitsu_bl->pf_device) {12201217 ret = -ENOMEM;12211218 goto fail_platform_driver;12221219 }1223122012241224- ret = platform_device_add(fujitsu->pf_device);12211221+ ret = platform_device_add(fujitsu_bl->pf_device);12251222 if (ret)12261223 goto fail_platform_device1;1227122412281225 ret =12291229- sysfs_create_group(&fujitsu->pf_device->dev.kobj,12301230- &fujitsupf_attribute_group);12261226+ sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,12271227+ &fujitsu_pf_attribute_group);12311228 if (ret)12321229 goto fail_platform_device2;12331230···12351236 struct backlight_properties props;1236123712371238 memset(&props, 0, sizeof(struct backlight_properties));12381238- max_brightness = fujitsu->max_brightness;12391239+ max_brightness = fujitsu_bl->max_brightness;12391240 props.type = BACKLIGHT_PLATFORM;12401241 props.max_brightness = max_brightness - 1;12411241- fujitsu->bl_device = backlight_device_register("fujitsu-laptop",12421242- NULL, NULL,12431243- &fujitsubl_ops,12441244- &props);12451245- if (IS_ERR(fujitsu->bl_device)) {12461246- ret = PTR_ERR(fujitsu->bl_device);12471247- fujitsu->bl_device = NULL;12421242+ fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",12431243+ NULL, NULL,12441244+ &fujitsu_bl_ops,12451245+ &props);12461246+ if (IS_ERR(fujitsu_bl->bl_device)) {12471247+ ret = PTR_ERR(fujitsu_bl->bl_device);12481248+ fujitsu_bl->bl_device = NULL;12481249 goto fail_sysfs_group;12491250 }12501250- fujitsu->bl_device->props.brightness = fujitsu->brightness_level;12511251+ fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;12511252 }1252125312531253- ret = platform_driver_register(&fujitsupf_driver);12541254+ ret = platform_driver_register(&fujitsu_pf_driver);12541255 if (ret)12551256 goto fail_backlight;1256125712571257- /* Register hotkey driver */12581258+ /* Register laptop driver */1258125912591259- fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);12601260- if (!fujitsu_hotkey) {12601260+ fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);12611261+ if (!fujitsu_laptop) {12611262 ret = -ENOMEM;12621262- goto fail_hotkey;12631263+ goto fail_laptop;12631264 }1264126512651265- result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);12661266- if (result < 0) {12671267- ret = -ENODEV;12681268- goto fail_hotkey1;12691269- }12661266+ ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);12671267+ if (ret)12681268+ goto fail_laptop1;1270126912711270 /* Sync backlight power status (needs FUJ02E3 device, hence deferred) */12721271 if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {12731272 if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)12741274- fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;12731273+ fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;12751274 else12761276- fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;12751275+ fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;12771276 }1278127712791278 pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");1280127912811280 return 0;1282128112831283-fail_hotkey1:12841284- kfree(fujitsu_hotkey);12851285-fail_hotkey:12861286- platform_driver_unregister(&fujitsupf_driver);12821282+fail_laptop1:12831283+ kfree(fujitsu_laptop);12841284+fail_laptop:12851285+ platform_driver_unregister(&fujitsu_pf_driver);12871286fail_backlight:12881288- backlight_device_unregister(fujitsu->bl_device);12871287+ backlight_device_unregister(fujitsu_bl->bl_device);12891288fail_sysfs_group:12901290- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,12911291- &fujitsupf_attribute_group);12891289+ sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,12901290+ &fujitsu_pf_attribute_group);12921291fail_platform_device2:12931293- platform_device_del(fujitsu->pf_device);12921292+ platform_device_del(fujitsu_bl->pf_device);12941293fail_platform_device1:12951295- platform_device_put(fujitsu->pf_device);12941294+ platform_device_put(fujitsu_bl->pf_device);12961295fail_platform_driver:12971297- acpi_bus_unregister_driver(&acpi_fujitsu_driver);12961296+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);12981297fail_acpi:12991299- kfree(fujitsu);12981298+ kfree(fujitsu_bl);1300129913011300 return ret;13021301}1303130213041303static void __exit fujitsu_cleanup(void)13051304{13061306- acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);13051305+ acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);1307130613081308- kfree(fujitsu_hotkey);13071307+ kfree(fujitsu_laptop);1309130813101310- platform_driver_unregister(&fujitsupf_driver);13091309+ platform_driver_unregister(&fujitsu_pf_driver);1311131013121312- backlight_device_unregister(fujitsu->bl_device);13111311+ backlight_device_unregister(fujitsu_bl->bl_device);1313131213141314- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,13151315- &fujitsupf_attribute_group);13131313+ sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,13141314+ &fujitsu_pf_attribute_group);1316131513171317- platform_device_unregister(fujitsu->pf_device);13161316+ platform_device_unregister(fujitsu_bl->pf_device);1318131713191319- acpi_bus_unregister_driver(&acpi_fujitsu_driver);13181318+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);1320131913211321- kfree(fujitsu);13201320+ kfree(fujitsu_bl);1322132113231322 pr_info("driver unloaded\n");13241323}···13381341MODULE_DESCRIPTION("Fujitsu laptop extras support");13391342MODULE_VERSION(FUJITSU_DRIVER_VERSION);13401343MODULE_LICENSE("GPL");13411341-13421342-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");13431343-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");13441344-MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
+16-3
drivers/scsi/Kconfig
···12411241 tristate "Emulex LightPulse Fibre Channel Support"12421242 depends on PCI && SCSI12431243 depends on SCSI_FC_ATTRS12441244- depends on NVME_FC && NVME_TARGET_FC12451244 select CRC_T10DIF12461246- help12451245+ ---help---12471246 This lpfc driver supports the Emulex LightPulse12481247 Family of Fibre Channel PCI host adapters.1249124812501249config SCSI_LPFC_DEBUG_FS12511250 bool "Emulex LightPulse Fibre Channel debugfs Support"12521251 depends on SCSI_LPFC && DEBUG_FS12531253- help12521252+ ---help---12541253 This makes debugging information from the lpfc driver12551254 available via the debugfs filesystem.12551255+12561256+config LPFC_NVME_INITIATOR12571257+ bool "Emulex LightPulse Fibre Channel NVME Initiator Support"12581258+ depends on SCSI_LPFC && NVME_FC12591259+ ---help---12601260+ This enables NVME Initiator support in the Emulex lpfc driver.12611261+12621262+config LPFC_NVME_TARGET12631263+ bool "Emulex LightPulse Fibre Channel NVME Initiator Support"12641264+ depends on SCSI_LPFC && NVME_TARGET_FC12651265+ ---help---12661266+ This enables NVME Target support in the Emulex lpfc driver.12671267+ Target enablement must still be enabled on a per adapter12681268+ basis by module parameters.1256126912571270config SCSI_SIM71012581271 tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
···561561 WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);562562 task->state = state;563563564564- if (!list_empty(&task->running))564564+ spin_lock_bh(&conn->taskqueuelock);565565+ if (!list_empty(&task->running)) {566566+ pr_debug_once("%s while task on list", __func__);565567 list_del_init(&task->running);568568+ }569569+ spin_unlock_bh(&conn->taskqueuelock);566570567571 if (conn->task == task)568572 conn->task = NULL;···788784 if (session->tt->xmit_task(task))789785 goto free_task;790786 } else {787787+ spin_lock_bh(&conn->taskqueuelock);791788 list_add_tail(&task->running, &conn->mgmtqueue);789789+ spin_unlock_bh(&conn->taskqueuelock);792790 iscsi_conn_queue_work(conn);793791 }794792···14811475 * this may be on the requeue list already if the xmit_task callout14821476 * is handling the r2ts while we are adding new ones14831477 */14781478+ spin_lock_bh(&conn->taskqueuelock);14841479 if (list_empty(&task->running))14851480 list_add_tail(&task->running, &conn->requeue);14811481+ spin_unlock_bh(&conn->taskqueuelock);14861482 iscsi_conn_queue_work(conn);14871483}14881484EXPORT_SYMBOL_GPL(iscsi_requeue_task);···15211513 * only have one nop-out as a ping from us and targets should not15221514 * overflow us with nop-ins15231515 */15161516+ spin_lock_bh(&conn->taskqueuelock);15241517check_mgmt:15251518 while (!list_empty(&conn->mgmtqueue)) {15261519 conn->task = list_entry(conn->mgmtqueue.next,15271520 struct iscsi_task, running);15281521 list_del_init(&conn->task->running);15221522+ spin_unlock_bh(&conn->taskqueuelock);15291523 if (iscsi_prep_mgmt_task(conn, conn->task)) {15301524 /* regular RX path uses back_lock */15311525 spin_lock_bh(&conn->session->back_lock);15321526 __iscsi_put_task(conn->task);15331527 spin_unlock_bh(&conn->session->back_lock);15341528 conn->task = NULL;15291529+ spin_lock_bh(&conn->taskqueuelock);15351530 continue;15361531 }15371532 rc = iscsi_xmit_task(conn);15381533 if (rc)15391534 goto done;15351535+ spin_lock_bh(&conn->taskqueuelock);15401536 }1541153715421538 /* process pending command queue */···15481536 conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,15491537 running);15501538 list_del_init(&conn->task->running);15391539+ spin_unlock_bh(&conn->taskqueuelock);15511540 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {15521541 fail_scsi_task(conn->task, DID_IMM_RETRY);15421542+ spin_lock_bh(&conn->taskqueuelock);15531543 continue;15541544 }15551545 rc = iscsi_prep_scsi_cmd_pdu(conn->task);15561546 if (rc) {15571547 if (rc == -ENOMEM || rc == -EACCES) {15481548+ spin_lock_bh(&conn->taskqueuelock);15581549 list_add_tail(&conn->task->running,15591550 &conn->cmdqueue);15601551 conn->task = NULL;15521552+ spin_unlock_bh(&conn->taskqueuelock);15611553 goto done;15621554 } else15631555 fail_scsi_task(conn->task, DID_ABORT);15561556+ spin_lock_bh(&conn->taskqueuelock);15641557 continue;15651558 }15661559 rc = iscsi_xmit_task(conn);···15761559 * we need to check the mgmt queue for nops that need to15771560 * be sent to aviod starvation15781561 */15621562+ spin_lock_bh(&conn->taskqueuelock);15791563 if (!list_empty(&conn->mgmtqueue))15801564 goto check_mgmt;15811565 }···15961578 conn->task = task;15971579 list_del_init(&conn->task->running);15981580 conn->task->state = ISCSI_TASK_RUNNING;15811581+ spin_unlock_bh(&conn->taskqueuelock);15991582 rc = iscsi_xmit_task(conn);16001583 if (rc)16011584 goto done;15851585+ spin_lock_bh(&conn->taskqueuelock);16021586 if (!list_empty(&conn->mgmtqueue))16031587 goto check_mgmt;16041588 }15891589+ spin_unlock_bh(&conn->taskqueuelock);16051590 spin_unlock_bh(&conn->session->frwd_lock);16061591 return -ENODATA;16071592···17601739 goto prepd_reject;17611740 }17621741 } else {17421742+ spin_lock_bh(&conn->taskqueuelock);17631743 list_add_tail(&task->running, &conn->cmdqueue);17441744+ spin_unlock_bh(&conn->taskqueuelock);17641745 iscsi_conn_queue_work(conn);17651746 }17661747···29202897 INIT_LIST_HEAD(&conn->mgmtqueue);29212898 INIT_LIST_HEAD(&conn->cmdqueue);29222899 INIT_LIST_HEAD(&conn->requeue);29002900+ spin_lock_init(&conn->taskqueuelock);29232901 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);2924290229252903 /* allocate login_task used for the login/text sequences */
+3-1
drivers/scsi/lpfc/lpfc.h
···9999#define FC_MAX_ADPTMSG 64100100101101#define MAX_HBAEVT 32102102+#define MAX_HBAS_NO_RESET 16102103103104/* Number of MSI-X vectors the driver uses */104105#define LPFC_MSIX_VECTORS 2105106106107/* lpfc wait event data ready flag */107107-#define LPFC_DATA_READY (1<<0)108108+#define LPFC_DATA_READY 0 /* bit 0 */108109109110/* queue dump line buffer size */110111#define LPFC_LBUF_SZ 128···693692 * capability694693 */695694#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */695695+#define NVME_XRI_ABORT_EVENT 0x100000696696697697 uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/698698 struct lpfc_dmabuf slim2p;
+8-1
drivers/scsi/lpfc/lpfc_attr.c
···30103010static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,30113011 lpfc_poll_show, lpfc_poll_store);3012301230133013+int lpfc_no_hba_reset_cnt;30143014+unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {30153015+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};30163016+module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);30173017+MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");30183018+30133019LPFC_ATTR(sli_mode, 0, 0, 3,30143020 "SLI mode selector:"30153021 " 0 - auto (SLI-3 if supported),"···44574451 return -EINVAL;4458445244594453 phba->cfg_fcp_imax = (uint32_t)val;44604460- for (i = 0; i < phba->io_channel_irqs; i++)44544454+44554455+ for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)44614456 lpfc_modify_hba_eq_delay(phba, i);4462445744634458 return strlen(buf);
+3-1
drivers/scsi/lpfc/lpfc_crtn.h
···384384extern struct device_attribute *lpfc_hba_attrs[];385385extern struct device_attribute *lpfc_vport_attrs[];386386extern struct scsi_host_template lpfc_template;387387-extern struct scsi_host_template lpfc_template_s3;387387+extern struct scsi_host_template lpfc_template_no_hr;388388extern struct scsi_host_template lpfc_template_nvme;389389extern struct scsi_host_template lpfc_vport_template;390390extern struct fc_function_template lpfc_transport_functions;···554554 struct lpfc_wcqe_complete *abts_cmpl);555555extern int lpfc_enable_nvmet_cnt;556556extern unsigned long long lpfc_enable_nvmet[];557557+extern int lpfc_no_hba_reset_cnt;558558+extern unsigned long lpfc_no_hba_reset[];
···11+12/*******************************************************************23 * This file is part of the Emulex Linux Device Driver for *34 * Fibre Channel Host Bus Adapters. *···953952 start_sglq = sglq;954953 while (!found) {955954 if (!sglq)956956- return NULL;955955+ break;957956 if (ndlp && ndlp->active_rrqs_xri_bitmap &&958957 test_bit(sglq->sli4_lxritag,959958 ndlp->active_rrqs_xri_bitmap)) {···1221412213}12215122141221612215/**1221612216+ * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event1221712217+ * @phba: pointer to lpfc hba data structure.1221812218+ *1221912219+ * This routine is invoked by the worker thread to process all the pending1222012220+ * SLI4 NVME abort XRI events.1222112221+ **/1222212222+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)1222312223+{1222412224+ struct lpfc_cq_event *cq_event;1222512225+1222612226+ /* First, declare the fcp xri abort event has been handled */1222712227+ spin_lock_irq(&phba->hbalock);1222812228+ phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;1222912229+ spin_unlock_irq(&phba->hbalock);1223012230+ /* Now, handle all the fcp xri abort events */1223112231+ while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {1223212232+ /* Get the first event from the head of the event queue */1223312233+ spin_lock_irq(&phba->hbalock);1223412234+ list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,1223512235+ cq_event, struct lpfc_cq_event, list);1223612236+ spin_unlock_irq(&phba->hbalock);1223712237+ /* Notify aborted XRI for NVME work queue */1223812238+ if (phba->nvmet_support) {1223912239+ lpfc_sli4_nvmet_xri_aborted(phba,1224012240+ &cq_event->cqe.wcqe_axri);1224112241+ } else {1224212242+ lpfc_sli4_nvme_xri_aborted(phba,1224312243+ &cq_event->cqe.wcqe_axri);1224412244+ }1224512245+ /* Free the event processed back to the free pool */1224612246+ lpfc_sli4_cq_event_release(phba, cq_event);1224712247+ }1224812248+}1224912249+1225012250+/**1221712251 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event1221812252 * @phba: pointer to lpfc hba data structure.1221912253 *···1274512709 spin_unlock_irqrestore(&phba->hbalock, iflags);1274612710 workposted = true;1274712711 break;1271212712+ case LPFC_NVME:1271312713+ spin_lock_irqsave(&phba->hbalock, iflags);1271412714+ list_add_tail(&cq_event->list,1271512715+ &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);1271612716+ /* Set the nvme xri abort event flag */1271712717+ phba->hba_flag |= NVME_XRI_ABORT_EVENT;1271812718+ spin_unlock_irqrestore(&phba->hbalock, iflags);1271912719+ workposted = true;1272012720+ break;1274812721 default:1274912722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,1275012750- "0603 Invalid work queue CQE subtype (x%x)\n",1275112751- cq->subtype);1272312723+ "0603 Invalid CQ subtype %d: "1272412724+ "%08x %08x %08x %08x\n",1272512725+ cq->subtype, wcqe->word0, wcqe->parameter,1272612726+ wcqe->word2, wcqe->word3);1272712727+ lpfc_sli4_cq_event_release(phba, cq_event);1275212728 workposted = false;1275312729 break;1275412730 }···1387513827 * @startq: The starting FCP EQ to modify1387613828 *1387713829 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.1383013830+ * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be1383113831+ * updated in one mailbox command.1387813832 *1387913833 * The @phba struct is used to send mailbox command to HBA. The @startq1388013834 * is used to get the starting FCP EQ to change.···1392913879 eq_delay->u.request.eq[cnt].phase = 0;1393013880 eq_delay->u.request.eq[cnt].delay_multi = dmult;1393113881 cnt++;1393213932- if (cnt >= LPFC_MAX_EQ_DELAY)1388213882+ if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)1393313883 break;1393413884 }1393513885 eq_delay->u.request.num_eq = cnt;···1523515185 drq = drqp[idx];1523615186 cq = cqp[idx];15237151871523815238- if (hrq->entry_count != drq->entry_count) {1523915239- status = -EINVAL;1524015240- goto out;1524115241- }1524215242-1524315188 /* sanity check on queue memory */1524415189 if (!hrq || !drq || !cq) {1524515190 status = -ENODEV;1519115191+ goto out;1519215192+ }1519315193+1519415194+ if (hrq->entry_count != drq->entry_count) {1519515195+ status = -EINVAL;1524615196 goto out;1524715197 }1524815198
···2020 * included with this package. *2121 *******************************************************************/22222323-#define LPFC_DRIVER_VERSION "11.2.0.7"2323+#define LPFC_DRIVER_VERSION "11.2.0.10"2424#define LPFC_DRIVER_NAME "lpfc"25252626/* Used for SLI 2/3 */
···28592859 sas_device_priv_data->sas_target->handle);28602860 sas_device_priv_data->block = 1;2861286128622862- r = scsi_internal_device_block(sdev);28622862+ r = scsi_internal_device_block(sdev, false);28632863 if (r == -EINVAL)28642864 sdev_printk(KERN_WARNING, sdev,28652865 "device_block failed with return(%d) for handle(0x%04x)\n",···28952895 "performing a block followed by an unblock\n",28962896 r, sas_device_priv_data->sas_target->handle);28972897 sas_device_priv_data->block = 1;28982898- r = scsi_internal_device_block(sdev);28982898+ r = scsi_internal_device_block(sdev, false);28992899 if (r)29002900 sdev_printk(KERN_WARNING, sdev, "retried device_block "29012901 "failed with return(%d) for handle(0x%04x)\n",···46774677 struct MPT3SAS_DEVICE *sas_device_priv_data;46784678 u32 response_code = 0;46794679 unsigned long flags;46804680- unsigned int sector_sz;4681468046824681 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);46834682···47414742 }4742474347434744 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);47444744-47454745- /* In case of bogus fw or device, we could end up having47464746- * unaligned partial completion. We can force alignment here,47474747- * then scsi-ml does not need to handle this misbehavior.47484748- */47494749- sector_sz = scmd->device->sector_size;47504750- if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&47514751- xfer_cnt % sector_sz)) {47524752- sdev_printk(KERN_INFO, scmd->device,47534753- "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",47544754- xfer_cnt, sector_sz);47554755- xfer_cnt = round_down(xfer_cnt, sector_sz);47564756- }47574757-47584745 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);47594746 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)47604747 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
···27072707 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);27082708 ql_dbg(level, vha, id,27092709 "----- -----------------------------------------------\n");27102710- for (cnt = 0; cnt < size; cnt++, buf++) {27112711- if (cnt % 16 == 0)27122712- ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);27132713- printk(" %02x", *buf);27142714- if (cnt % 16 == 15)27152715- printk("\n");27102710+ for (cnt = 0; cnt < size; cnt += 16) {27112711+ ql_dbg(level, vha, id, "%04x: ", cnt);27122712+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,27132713+ buf + cnt, min(16U, size - cnt), false);27162714 }27172717- if (cnt % 16 != 0)27182718- printk("\n");27192715}
+10-4
drivers/scsi/scsi_lib.c
···29322932/**29332933 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state29342934 * @sdev: device to block29352935+ * @wait: Whether or not to wait until ongoing .queuecommand() /29362936+ * .queue_rq() calls have finished.29352937 *29362938 * Block request made by scsi lld's to temporarily stop all29372939 * scsi commands on the specified device. May sleep.···29512949 * remove the rport mutex lock and unlock calls from srp_queuecommand().29522950 */29532951int29542954-scsi_internal_device_block(struct scsi_device *sdev)29522952+scsi_internal_device_block(struct scsi_device *sdev, bool wait)29552953{29562954 struct request_queue *q = sdev->request_queue;29572955 unsigned long flags;···29712969 * request queue. 29722970 */29732971 if (q->mq_ops) {29742974- blk_mq_quiesce_queue(q);29722972+ if (wait)29732973+ blk_mq_quiesce_queue(q);29742974+ else29752975+ blk_mq_stop_hw_queues(q);29752976 } else {29762977 spin_lock_irqsave(q->queue_lock, flags);29772978 blk_stop_queue(q);29782979 spin_unlock_irqrestore(q->queue_lock, flags);29792979- scsi_wait_for_queuecommand(sdev);29802980+ if (wait)29812981+ scsi_wait_for_queuecommand(sdev);29802982 }2981298329822984 return 0;···30423036static void30433037device_block(struct scsi_device *sdev, void *data)30443038{30453045- scsi_internal_device_block(sdev);30393039+ scsi_internal_device_block(sdev, true);30463040}3047304130483042static int
-3
drivers/scsi/scsi_priv.h
···188188 */189189190190#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */191191-extern int scsi_internal_device_block(struct scsi_device *sdev);192192-extern int scsi_internal_device_unblock(struct scsi_device *sdev,193193- enum scsi_device_state new_state);194191195192#endif /* _SCSI_PRIV_H */
+17
drivers/scsi/sd.c
···17831783{17841784 int result = SCpnt->result;17851785 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);17861786+ unsigned int sector_size = SCpnt->device->sector_size;17871787+ unsigned int resid;17861788 struct scsi_sense_hdr sshdr;17871789 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);17881790 struct request *req = SCpnt->request;···18151813 scsi_set_resid(SCpnt, blk_rq_bytes(req));18161814 }18171815 break;18161816+ default:18171817+ /*18181818+ * In case of bogus fw or device, we could end up having18191819+ * an unaligned partial completion. Check this here and force18201820+ * alignment.18211821+ */18221822+ resid = scsi_get_resid(SCpnt);18231823+ if (resid & (sector_size - 1)) {18241824+ sd_printk(KERN_INFO, sdkp,18251825+ "Unaligned partial completion (resid=%u, sector_sz=%u)\n",18261826+ resid, sector_size);18271827+ resid = min(scsi_bufflen(SCpnt),18281828+ round_up(resid, sector_size));18291829+ scsi_set_resid(SCpnt, resid);18301830+ }18181831 }1819183218201833 if (result) {
+17-10
drivers/scsi/storvsc_drv.c
···400400 */401401static int storvsc_timeout = 180;402402403403-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;404404-405403#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)406404static struct scsi_transport_template *fc_transport_template;407405#endif···13811383 return ret;13821384}1383138513861386+static int storvsc_device_alloc(struct scsi_device *sdevice)13871387+{13881388+ /*13891389+ * Set blist flag to permit the reading of the VPD pages even when13901390+ * the target may claim SPC-2 compliance. MSFT targets currently13911391+ * claim SPC-2 compliance while they implement post SPC-2 features.13921392+ * With this flag we can correctly handle WRITE_SAME_16 issues.13931393+ *13941394+ * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but13951395+ * still supports REPORT LUN.13961396+ */13971397+ sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;13981398+13991399+ return 0;14001400+}14011401+13841402static int storvsc_device_configure(struct scsi_device *sdevice)13851403{13861404···14081394 blk_queue_virt_boundary(sdevice->request_queue, PAGE_SIZE - 1);1409139514101396 sdevice->no_write_same = 1;14111411-14121412- /*14131413- * Add blist flags to permit the reading of the VPD pages even when14141414- * the target may claim SPC-2 compliance. MSFT targets currently14151415- * claim SPC-2 compliance while they implement post SPC-2 features.14161416- * With this patch we can correctly handle WRITE_SAME_16 issues.14171417- */14181418- sdevice->sdev_bflags |= msft_blist_flags;1419139714201398 /*14211399 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3···16671661 .eh_host_reset_handler = storvsc_host_reset_handler,16681662 .proc_name = "storvsc_host",16691663 .eh_timed_out = storvsc_eh_timed_out,16641664+ .slave_alloc = storvsc_device_alloc,16701665 .slave_configure = storvsc_device_configure,16711666 .cmd_per_lun = 255,16721667 .this_id = -1,
···156156 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \157157 pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))158158159159+/**160160+ * hlist_nulls_for_each_entry_safe -161161+ * iterate over list of given type safe against removal of list entry162162+ * @tpos: the type * to use as a loop cursor.163163+ * @pos: the &struct hlist_nulls_node to use as a loop cursor.164164+ * @head: the head for your list.165165+ * @member: the name of the hlist_nulls_node within the struct.166166+ */167167+#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \168168+ for (({barrier();}), \169169+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \170170+ (!is_a_nulls(pos)) && \171171+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \172172+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)159173#endif160174#endif
+2-1
include/net/inet_common.h
···2020 int addr_len, int flags, int is_sendmsg);2121int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,2222 int addr_len, int flags);2323-int inet_accept(struct socket *sock, struct socket *newsock, int flags);2323+int inet_accept(struct socket *sock, struct socket *newsock, int flags,2424+ bool kern);2425int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);2526ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,2627 size_t size, int flags);
+1-1
include/net/inet_connection_sock.h
···258258 return (unsigned long)min_t(u64, when, max_when);259259}260260261261-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);261261+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);262262263263int inet_csk_get_port(struct sock *sk, unsigned short snum);264264
···229229 /* Only log the first time events_limit is incremented. */230230 if (atomic64_inc_return(&pids->events_limit) == 1) {231231 pr_info("cgroup: fork rejected by pids controller in ");232232- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));232232+ pr_cont_cgroup_path(css->cgroup);233233 pr_cont("\n");234234 }235235 cgroup_file_notify(&pids->events_file);
+103-98
kernel/sched/core.c
···8686cpumask_var_t cpu_isolated_map;87878888/*8989- * this_rq_lock - lock this runqueue and disable interrupts.9090- */9191-static struct rq *this_rq_lock(void)9292- __acquires(rq->lock)9393-{9494- struct rq *rq;9595-9696- local_irq_disable();9797- rq = this_rq();9898- raw_spin_lock(&rq->lock);9999-100100- return rq;101101-}102102-103103-/*10489 * __task_rq_lock - lock the rq @p resides on.10590 */10691struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)···218233 return;219234220235#ifdef CONFIG_SCHED_DEBUG236236+ if (sched_feat(WARN_DOUBLE_CLOCK))237237+ SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);221238 rq->clock_update_flags |= RQCF_UPDATED;222239#endif240240+223241 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;224242 if (delta < 0)225243 return;···249261static enum hrtimer_restart hrtick(struct hrtimer *timer)250262{251263 struct rq *rq = container_of(timer, struct rq, hrtick_timer);264264+ struct rq_flags rf;252265253266 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());254267255255- raw_spin_lock(&rq->lock);268268+ rq_lock(rq, &rf);256269 update_rq_clock(rq);257270 rq->curr->sched_class->task_tick(rq, rq->curr, 1);258258- raw_spin_unlock(&rq->lock);271271+ rq_unlock(rq, &rf);259272260273 return HRTIMER_NORESTART;261274}···276287static void __hrtick_start(void *arg)277288{278289 struct rq *rq = arg;290290+ struct rq_flags rf;279291280280- raw_spin_lock(&rq->lock);292292+ rq_lock(rq, &rf);281293 __hrtick_restart(rq);282294 rq->hrtick_csd_pending = 0;283283- raw_spin_unlock(&rq->lock);295295+ rq_unlock(rq, &rf);284296}285297286298/*···752762753763static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)754764{755755- update_rq_clock(rq);765765+ if (!(flags & ENQUEUE_NOCLOCK))766766+ update_rq_clock(rq);767767+756768 if (!(flags & ENQUEUE_RESTORE))757769 sched_info_queued(rq, p);770770+758771 p->sched_class->enqueue_task(rq, p, flags);759772}760773761774static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)762775{763763- update_rq_clock(rq);776776+ if (!(flags & DEQUEUE_NOCLOCK))777777+ update_rq_clock(rq);778778+764779 if (!(flags & DEQUEUE_SAVE))765780 sched_info_dequeued(rq, p);781781+766782 p->sched_class->dequeue_task(rq, p, flags);767783}768784···942946 *943947 * Returns (locked) new rq. Old rq's lock is released.944948 */945945-static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)949949+static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,950950+ struct task_struct *p, int new_cpu)946951{947952 lockdep_assert_held(&rq->lock);948953949954 p->on_rq = TASK_ON_RQ_MIGRATING;950950- dequeue_task(rq, p, 0);955955+ dequeue_task(rq, p, DEQUEUE_NOCLOCK);951956 set_task_cpu(p, new_cpu);952952- raw_spin_unlock(&rq->lock);957957+ rq_unlock(rq, rf);953958954959 rq = cpu_rq(new_cpu);955960956956- raw_spin_lock(&rq->lock);961961+ rq_lock(rq, rf);957962 BUG_ON(task_cpu(p) != new_cpu);958963 enqueue_task(rq, p, 0);959964 p->on_rq = TASK_ON_RQ_QUEUED;···977980 * So we race with normal scheduler movements, but that's OK, as long978981 * as the task is no longer on this CPU.979982 */980980-static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)983983+static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,984984+ struct task_struct *p, int dest_cpu)981985{982986 if (unlikely(!cpu_active(dest_cpu)))983987 return rq;···987989 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))988990 return rq;989991990990- rq = move_queued_task(rq, p, dest_cpu);992992+ update_rq_clock(rq);993993+ rq = move_queued_task(rq, rf, p, dest_cpu);991994992995 return rq;993996}···10031004 struct migration_arg *arg = data;10041005 struct task_struct *p = arg->task;10051006 struct rq *rq = this_rq();10071007+ struct rq_flags rf;1006100810071009 /*10081010 * The original target CPU might have gone down and we might···10181018 sched_ttwu_pending();1019101910201020 raw_spin_lock(&p->pi_lock);10211021- raw_spin_lock(&rq->lock);10211021+ rq_lock(rq, &rf);10221022 /*10231023 * If task_rq(p) != rq, it cannot be migrated here, because we're10241024 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because···10261026 */10271027 if (task_rq(p) == rq) {10281028 if (task_on_rq_queued(p))10291029- rq = __migrate_task(rq, p, arg->dest_cpu);10291029+ rq = __migrate_task(rq, &rf, p, arg->dest_cpu);10301030 else10311031 p->wake_cpu = arg->dest_cpu;10321032 }10331033- raw_spin_unlock(&rq->lock);10331033+ rq_unlock(rq, &rf);10341034 raw_spin_unlock(&p->pi_lock);1035103510361036 local_irq_enable();···10631063 * holding rq->lock.10641064 */10651065 lockdep_assert_held(&rq->lock);10661066- dequeue_task(rq, p, DEQUEUE_SAVE);10661066+ dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);10671067 }10681068 if (running)10691069 put_prev_task(rq, p);···10711071 p->sched_class->set_cpus_allowed(p, new_mask);1072107210731073 if (queued)10741074- enqueue_task(rq, p, ENQUEUE_RESTORE);10741074+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);10751075 if (running)10761076 set_curr_task(rq, p);10771077}···11501150 * OK, since we're going to drop the lock immediately11511151 * afterwards anyway.11521152 */11531153- rq_unpin_lock(rq, &rf);11541154- rq = move_queued_task(rq, p, dest_cpu);11551155- rq_repin_lock(rq, &rf);11531153+ rq = move_queued_task(rq, &rf, p, dest_cpu);11561154 }11571155out:11581156 task_rq_unlock(rq, p, &rf);···12151217{12161218 if (task_on_rq_queued(p)) {12171219 struct rq *src_rq, *dst_rq;12201220+ struct rq_flags srf, drf;1218122112191222 src_rq = task_rq(p);12201223 dst_rq = cpu_rq(cpu);12241224+12251225+ rq_pin_lock(src_rq, &srf);12261226+ rq_pin_lock(dst_rq, &drf);1221122712221228 p->on_rq = TASK_ON_RQ_MIGRATING;12231229 deactivate_task(src_rq, p, 0);···12291227 activate_task(dst_rq, p, 0);12301228 p->on_rq = TASK_ON_RQ_QUEUED;12311229 check_preempt_curr(dst_rq, p, 0);12301230+12311231+ rq_unpin_lock(dst_rq, &drf);12321232+ rq_unpin_lock(src_rq, &srf);12331233+12321234 } else {12331235 /*12341236 * Task isn't running anymore; make it appear like we migrated···16861680ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,16871681 struct rq_flags *rf)16881682{16891689- int en_flags = ENQUEUE_WAKEUP;16831683+ int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;1690168416911685 lockdep_assert_held(&rq->lock);16921686···17321726 struct rq *rq = this_rq();17331727 struct llist_node *llist = llist_del_all(&rq->wake_list);17341728 struct task_struct *p;17351735- unsigned long flags;17361729 struct rq_flags rf;1737173017381731 if (!llist)17391732 return;1740173317411741- raw_spin_lock_irqsave(&rq->lock, flags);17421742- rq_pin_lock(rq, &rf);17341734+ rq_lock_irqsave(rq, &rf);17351735+ update_rq_clock(rq);1743173617441737 while (llist) {17451738 int wake_flags = 0;···17521747 ttwu_do_activate(rq, p, wake_flags, &rf);17531748 }1754174917551755- rq_unpin_lock(rq, &rf);17561756- raw_spin_unlock_irqrestore(&rq->lock, flags);17501750+ rq_unlock_irqrestore(rq, &rf);17571751}1758175217591753void scheduler_ipi(void)···18101806void wake_up_if_idle(int cpu)18111807{18121808 struct rq *rq = cpu_rq(cpu);18131813- unsigned long flags;18091809+ struct rq_flags rf;1814181018151811 rcu_read_lock();18161812···18201816 if (set_nr_if_polling(rq->idle)) {18211817 trace_sched_wake_idle_without_ipi(cpu);18221818 } else {18231823- raw_spin_lock_irqsave(&rq->lock, flags);18191819+ rq_lock_irqsave(rq, &rf);18241820 if (is_idle_task(rq->curr))18251821 smp_send_reschedule(cpu);18261822 /* Else CPU is not idle, do nothing here: */18271827- raw_spin_unlock_irqrestore(&rq->lock, flags);18231823+ rq_unlock_irqrestore(rq, &rf);18281824 }1829182518301826out:···18501846 }18511847#endif1852184818531853- raw_spin_lock(&rq->lock);18541854- rq_pin_lock(rq, &rf);18491849+ rq_lock(rq, &rf);18501850+ update_rq_clock(rq);18551851 ttwu_do_activate(rq, p, wake_flags, &rf);18561856- rq_unpin_lock(rq, &rf);18571857- raw_spin_unlock(&rq->lock);18521852+ rq_unlock(rq, &rf);18581853}1859185418601855/*···21002097 * disabled avoiding further scheduler activity on it and we've21012098 * not yet picked a replacement task.21022099 */21032103- rq_unpin_lock(rq, rf);21042104- raw_spin_unlock(&rq->lock);21002100+ rq_unlock(rq, rf);21052101 raw_spin_lock(&p->pi_lock);21062106- raw_spin_lock(&rq->lock);21072107- rq_repin_lock(rq, rf);21022102+ rq_relock(rq, rf);21082103 }2109210421102105 if (!(p->state & TASK_NORMAL))···21152114 delayacct_blkio_end();21162115 atomic_dec(&rq->nr_iowait);21172116 }21182118- ttwu_activate(rq, p, ENQUEUE_WAKEUP);21172117+ ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK);21192118 }2120211921212120 ttwu_do_wakeup(rq, p, 0, rf);···25562555 update_rq_clock(rq);25572556 post_init_entity_util_avg(&p->se);2558255725592559- activate_task(rq, p, 0);25582558+ activate_task(rq, p, ENQUEUE_NOCLOCK);25602559 p->on_rq = TASK_ON_RQ_QUEUED;25612560 trace_sched_wakeup_new(p);25622561 check_preempt_curr(rq, p, WF_FORK);···30943093 int cpu = smp_processor_id();30953094 struct rq *rq = cpu_rq(cpu);30963095 struct task_struct *curr = rq->curr;30963096+ struct rq_flags rf;3097309730983098 sched_clock_tick();3099309931003100- raw_spin_lock(&rq->lock);31003100+ rq_lock(rq, &rf);31013101+31013102 update_rq_clock(rq);31023103 curr->sched_class->task_tick(rq, curr, 0);31033104 cpu_load_update_active(rq);31043105 calc_global_load_tick(rq);31053105- raw_spin_unlock(&rq->lock);31063106+31073107+ rq_unlock(rq, &rf);3106310831073109 perf_event_task_tick();31083110···33903386 * done by the caller to avoid the race with signal_wake_up().33913387 */33923388 smp_mb__before_spinlock();33933393- raw_spin_lock(&rq->lock);33943394- rq_pin_lock(rq, &rf);33893389+ rq_lock(rq, &rf);3395339033963391 /* Promote REQ to ACT */33973392 rq->clock_update_flags <<= 1;33933393+ update_rq_clock(rq);3398339433993395 switch_count = &prev->nivcsw;34003396 if (!preempt && prev->state) {34013397 if (unlikely(signal_pending_state(prev->state, prev))) {34023398 prev->state = TASK_RUNNING;34033399 } else {34043404- deactivate_task(rq, prev, DEQUEUE_SLEEP);34003400+ deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);34053401 prev->on_rq = 0;3406340234073403 if (prev->in_iowait) {···34253421 switch_count = &prev->nvcsw;34263422 }3427342334283428- if (task_on_rq_queued(prev))34293429- update_rq_clock(rq);34303430-34313424 next = pick_next_task(rq, prev, &rf);34323425 clear_tsk_need_resched(prev);34333426 clear_preempt_need_resched();···34403439 rq = context_switch(rq, prev, next, &rf);34413440 } else {34423441 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);34433443- rq_unpin_lock(rq, &rf);34443444- raw_spin_unlock_irq(&rq->lock);34423442+ rq_unlock_irq(rq, &rf);34453443 }3446344434473445 balance_callback(rq);···36843684 */36853685void rt_mutex_setprio(struct task_struct *p, int prio)36863686{36873687- int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE;36873687+ int oldprio, queued, running, queue_flag =36883688+ DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;36883689 const struct sched_class *prev_class;36893690 struct rq_flags rf;36903691 struct rq *rq;···38063805 queued = task_on_rq_queued(p);38073806 running = task_current(rq, p);38083807 if (queued)38093809- dequeue_task(rq, p, DEQUEUE_SAVE);38083808+ dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);38103809 if (running)38113810 put_prev_task(rq, p);38123811···38173816 delta = p->prio - old_prio;3818381738193818 if (queued) {38203820- enqueue_task(rq, p, ENQUEUE_RESTORE);38193819+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);38213820 /*38223821 * If the task increased its priority or is running and38233822 * lowered its priority, then reschedule its CPU:···41274126 const struct sched_class *prev_class;41284127 struct rq_flags rf;41294128 int reset_on_fork;41304130- int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE;41294129+ int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;41314130 struct rq *rq;4132413141334132 /* May grab non-irq protected spin_locks: */···49244923 */49254924SYSCALL_DEFINE0(sched_yield)49264925{49274927- struct rq *rq = this_rq_lock();49264926+ struct rq_flags rf;49274927+ struct rq *rq;49284928+49294929+ local_irq_disable();49304930+ rq = this_rq();49314931+ rq_lock(rq, &rf);4928493249294933 schedstat_inc(rq->yld_count);49304934 current->sched_class->yield_task(rq);···49384932 * Since we are going to call schedule() anyway, there's49394933 * no need to preempt or enable interrupts:49404934 */49414941- __release(rq->lock);49424942- spin_release(&rq->lock.dep_map, 1, _THIS_IP_);49434943- do_raw_spin_unlock(&rq->lock);49354935+ preempt_disable();49364936+ rq_unlock(rq, &rf);49444937 sched_preempt_enable_no_resched();4945493849464939 schedule();···55195514 p->numa_preferred_nid = nid;5520551555215516 if (queued)55225522- enqueue_task(rq, p, ENQUEUE_RESTORE);55175517+ enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);55235518 if (running)55245519 set_curr_task(rq, p);55255520 task_rq_unlock(rq, p, &rf);···55845579 * there's no concurrency possible, we hold the required locks anyway55855580 * because of lock validation efforts.55865581 */55875587-static void migrate_tasks(struct rq *dead_rq)55825582+static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf)55885583{55895584 struct rq *rq = dead_rq;55905585 struct task_struct *next, *stop = rq->stop;55915591- struct rq_flags rf;55865586+ struct rq_flags orf = *rf;55925587 int dest_cpu;5593558855945589 /*···56075602 * class method both need to have an up-to-date56085603 * value of rq->clock[_task]56095604 */56105610- rq_pin_lock(rq, &rf);56115605 update_rq_clock(rq);56125612- rq_unpin_lock(rq, &rf);5613560656145607 for (;;) {56155608 /*···56205617 /*56215618 * pick_next_task() assumes pinned rq->lock:56225619 */56235623- rq_repin_lock(rq, &rf);56245624- next = pick_next_task(rq, &fake_task, &rf);56205620+ next = pick_next_task(rq, &fake_task, rf);56255621 BUG_ON(!next);56265622 next->sched_class->put_prev_task(rq, next);56275623···56335631 * because !cpu_active at this point, which means load-balance56345632 * will not interfere. Also, stop-machine.56355633 */56365636- rq_unpin_lock(rq, &rf);56375637- raw_spin_unlock(&rq->lock);56345634+ rq_unlock(rq, rf);56385635 raw_spin_lock(&next->pi_lock);56395639- raw_spin_lock(&rq->lock);56365636+ rq_relock(rq, rf);5640563756415638 /*56425639 * Since we're inside stop-machine, _nothing_ should have···5649564856505649 /* Find suitable destination for @next, with force if needed. */56515650 dest_cpu = select_fallback_rq(dead_rq->cpu, next);56525652-56535653- rq = __migrate_task(rq, next, dest_cpu);56515651+ rq = __migrate_task(rq, rf, next, dest_cpu);56545652 if (rq != dead_rq) {56555655- raw_spin_unlock(&rq->lock);56535653+ rq_unlock(rq, rf);56565654 rq = dead_rq;56575657- raw_spin_lock(&rq->lock);56555655+ *rf = orf;56565656+ rq_relock(rq, rf);56585657 }56595658 raw_spin_unlock(&next->pi_lock);56605659 }···57675766int sched_cpu_activate(unsigned int cpu)57685767{57695768 struct rq *rq = cpu_rq(cpu);57705770- unsigned long flags;57695769+ struct rq_flags rf;5771577057725771 set_cpu_active(cpu, true);57735772···57855784 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the57865785 * domains.57875786 */57885788- raw_spin_lock_irqsave(&rq->lock, flags);57875787+ rq_lock_irqsave(rq, &rf);57895788 if (rq->rd) {57905789 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));57915790 set_rq_online(rq);57925791 }57935793- raw_spin_unlock_irqrestore(&rq->lock, flags);57925792+ rq_unlock_irqrestore(rq, &rf);5794579357955794 update_max_interval();57965795···58485847int sched_cpu_dying(unsigned int cpu)58495848{58505849 struct rq *rq = cpu_rq(cpu);58515851- unsigned long flags;58505850+ struct rq_flags rf;5852585158535852 /* Handle pending wakeups and then migrate everything off */58545853 sched_ttwu_pending();58555855- raw_spin_lock_irqsave(&rq->lock, flags);58545854+58555855+ rq_lock_irqsave(rq, &rf);58565856 if (rq->rd) {58575857 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));58585858 set_rq_offline(rq);58595859 }58605860- migrate_tasks(rq);58605860+ migrate_tasks(rq, &rf);58615861 BUG_ON(rq->nr_running != 1);58625862- raw_spin_unlock_irqrestore(&rq->lock, flags);58625862+ rq_unlock_irqrestore(rq, &rf);58635863+58635864 calc_load_migrate(rq);58645865 update_max_interval();58655866 nohz_balance_exit_idle(cpu);···64156412 */64166413void sched_move_task(struct task_struct *tsk)64176414{64186418- int queued, running;64156415+ int queued, running, queue_flags =64166416+ DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;64196417 struct rq_flags rf;64206418 struct rq *rq;64216419···64276423 queued = task_on_rq_queued(tsk);6428642464296425 if (queued)64306430- dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);64266426+ dequeue_task(rq, tsk, queue_flags);64316427 if (running)64326428 put_prev_task(rq, tsk);6433642964346430 sched_change_group(tsk, TASK_MOVE_GROUP);6435643164366432 if (queued)64376437- enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE);64336433+ enqueue_task(rq, tsk, queue_flags);64386434 if (running)64396435 set_curr_task(rq, tsk);64406436···70127008 for_each_online_cpu(i) {70137009 struct cfs_rq *cfs_rq = tg->cfs_rq[i];70147010 struct rq *rq = cfs_rq->rq;70117011+ struct rq_flags rf;7015701270167016- raw_spin_lock_irq(&rq->lock);70137013+ rq_lock_irq(rq, &rf);70177014 cfs_rq->runtime_enabled = runtime_enabled;70187015 cfs_rq->runtime_remaining = 0;7019701670207017 if (cfs_rq->throttled)70217018 unthrottle_cfs_rq(cfs_rq);70227022- raw_spin_unlock_irq(&rq->lock);70197019+ rq_unlock_irq(rq, &rf);70237020 }70247021 if (runtime_was_enabled && !runtime_enabled)70257022 cfs_bandwidth_usage_dec();
+57-6
kernel/sched/deadline.c
···445445 *446446 * This function returns true if:447447 *448448- * runtime / (deadline - t) > dl_runtime / dl_period ,448448+ * runtime / (deadline - t) > dl_runtime / dl_deadline ,449449 *450450 * IOW we can't recycle current parameters.451451 *452452- * Notice that the bandwidth check is done against the period. For452452+ * Notice that the bandwidth check is done against the deadline. For453453 * task with deadline equal to period this is the same of using454454- * dl_deadline instead of dl_period in the equation above.454454+ * dl_period instead of dl_deadline in the equation above.455455 */456456static bool dl_entity_overflow(struct sched_dl_entity *dl_se,457457 struct sched_dl_entity *pi_se, u64 t)···476476 * of anything below microseconds resolution is actually fiction477477 * (but still we want to give the user that illusion >;).478478 */479479- left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);479479+ left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);480480 right = ((dl_se->deadline - t) >> DL_SCALE) *481481 (pi_se->dl_runtime >> DL_SCALE);482482···505505 }506506}507507508508+static inline u64 dl_next_period(struct sched_dl_entity *dl_se)509509+{510510+ return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;511511+}512512+508513/*509514 * If the entity depleted all its runtime, and if we want it to sleep510515 * while waiting for some new execution time to become available, we511511- * set the bandwidth enforcement timer to the replenishment instant516516+ * set the bandwidth replenishment timer to the replenishment instant512517 * and try to activate it.513518 *514519 * Notice that it is important for the caller to know if the timer···535530 * that it is actually coming from rq->clock and not from536531 * hrtimer's time base reading.537532 */538538- act = ns_to_ktime(dl_se->deadline);533533+ act = ns_to_ktime(dl_next_period(dl_se));539534 now = hrtimer_cb_get_time(timer);540535 delta = ktime_to_ns(now) - rq_clock(rq);541536 act = ktime_add_ns(act, delta);···643638 lockdep_unpin_lock(&rq->lock, rf.cookie);644639 rq = dl_task_offline_migration(rq, p);645640 rf.cookie = lockdep_pin_lock(&rq->lock);641641+ update_rq_clock(rq);646642647643 /*648644 * Now that the task has been migrated to the new RQ and we···693687694688 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);695689 timer->function = dl_task_timer;690690+}691691+692692+/*693693+ * During the activation, CBS checks if it can reuse the current task's694694+ * runtime and period. If the deadline of the task is in the past, CBS695695+ * cannot use the runtime, and so it replenishes the task. This rule696696+ * works fine for implicit deadline tasks (deadline == period), and the697697+ * CBS was designed for implicit deadline tasks. However, a task with698698+ * constrained deadline (deadine < period) might be awakened after the699699+ * deadline, but before the next period. In this case, replenishing the700700+ * task would allow it to run for runtime / deadline. As in this case701701+ * deadline < period, CBS enables a task to run for more than the702702+ * runtime / period. In a very loaded system, this can cause a domino703703+ * effect, making other tasks miss their deadlines.704704+ *705705+ * To avoid this problem, in the activation of a constrained deadline706706+ * task after the deadline but before the next period, throttle the707707+ * task and set the replenishing timer to the begin of the next period,708708+ * unless it is boosted.709709+ */710710+static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)711711+{712712+ struct task_struct *p = dl_task_of(dl_se);713713+ struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));714714+715715+ if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&716716+ dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {717717+ if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))718718+ return;719719+ dl_se->dl_throttled = 1;720720+ }696721}697722698723static···959922 __dequeue_dl_entity(dl_se);960923}961924925925+static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)926926+{927927+ return dl_se->dl_deadline < dl_se->dl_period;928928+}929929+962930static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)963931{964932 struct task_struct *pi_task = rt_mutex_get_top_task(p);···988946 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);989947 return;990948 }949949+950950+ /*951951+ * Check if a constrained deadline task was activated952952+ * after the deadline but before the next period.953953+ * If that is the case, the task will be throttled and954954+ * the replenishment timer will be set to the next period.955955+ */956956+ if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))957957+ dl_check_constrained_dl(&p->dl);991958992959 /*993960 * If p is throttled, we do nothing. In fact, if it exhausted
+238-149
kernel/sched/fair.c
···27672767 * Approximate:27682768 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)27692769 */27702770-static __always_inline u64 decay_load(u64 val, u64 n)27702770+static u64 decay_load(u64 val, u64 n)27712771{27722772 unsigned int local_n;27732773···27952795 return val;27962796}2797279727982798-/*27992799- * For updates fully spanning n periods, the contribution to runnable28002800- * average will be: \Sum 1024*y^n28012801- *28022802- * We can compute this reasonably efficiently by combining:28032803- * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}28042804- */28052805-static u32 __compute_runnable_contrib(u64 n)27982798+static u32 __accumulate_sum(u64 periods, u32 period_contrib, u32 remainder)28062799{28072807- u32 contrib = 0;28002800+ u32 c1, c2, c3 = remainder; /* y^0 == 1 */2808280128092809- if (likely(n <= LOAD_AVG_PERIOD))28102810- return runnable_avg_yN_sum[n];28112811- else if (unlikely(n >= LOAD_AVG_MAX_N))28022802+ if (!periods)28032803+ return remainder - period_contrib;28042804+28052805+ if (unlikely(periods >= LOAD_AVG_MAX_N))28122806 return LOAD_AVG_MAX;2813280728142814- /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */28152815- contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];28162816- n %= LOAD_AVG_PERIOD;28172817- contrib = decay_load(contrib, n);28182818- return contrib + runnable_avg_yN_sum[n];28082808+ /*28092809+ * c1 = d1 y^(p+1)28102810+ */28112811+ c1 = decay_load((u64)(1024 - period_contrib), periods);28122812+28132813+ periods -= 1;28142814+ /*28152815+ * For updates fully spanning n periods, the contribution to runnable28162816+ * average will be:28172817+ *28182818+ * c2 = 1024 \Sum y^n28192819+ *28202820+ * We can compute this reasonably efficiently by combining:28212821+ *28222822+ * y^PERIOD = 1/2 with precomputed 1024 \Sum y^n {for: n < PERIOD}28232823+ */28242824+ if (likely(periods <= LOAD_AVG_PERIOD)) {28252825+ c2 = runnable_avg_yN_sum[periods];28262826+ } else {28272827+ c2 = __accumulated_sum_N32[periods/LOAD_AVG_PERIOD];28282828+ periods %= LOAD_AVG_PERIOD;28292829+ c2 = decay_load(c2, periods);28302830+ c2 += runnable_avg_yN_sum[periods];28312831+ }28322832+28332833+ return c1 + c2 + c3;28192834}2820283528212836#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)28372837+28382838+/*28392839+ * Accumulate the three separate parts of the sum; d1 the remainder28402840+ * of the last (incomplete) period, d2 the span of full periods and d328412841+ * the remainder of the (incomplete) current period.28422842+ *28432843+ * d1 d2 d328442844+ * ^ ^ ^28452845+ * | | |28462846+ * |<->|<----------------->|<--->|28472847+ * ... |---x---|------| ... |------|-----x (now)28482848+ *28492849+ * p28502850+ * u' = (u + d1) y^(p+1) + 1024 \Sum y^n + d3 y^028512851+ * n=128522852+ *28532853+ * = u y^(p+1) + (Step 1)28542854+ *28552855+ * p28562856+ * d1 y^(p+1) + 1024 \Sum y^n + d3 y^0 (Step 2)28572857+ * n=128582858+ */28592859+static __always_inline u3228602860+accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,28612861+ unsigned long weight, int running, struct cfs_rq *cfs_rq)28622862+{28632863+ unsigned long scale_freq, scale_cpu;28642864+ u64 periods;28652865+ u32 contrib;28662866+28672867+ scale_freq = arch_scale_freq_capacity(NULL, cpu);28682868+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);28692869+28702870+ delta += sa->period_contrib;28712871+ periods = delta / 1024; /* A period is 1024us (~1ms) */28722872+28732873+ /*28742874+ * Step 1: decay old *_sum if we crossed period boundaries.28752875+ */28762876+ if (periods) {28772877+ sa->load_sum = decay_load(sa->load_sum, periods);28782878+ if (cfs_rq) {28792879+ cfs_rq->runnable_load_sum =28802880+ decay_load(cfs_rq->runnable_load_sum, periods);28812881+ }28822882+ sa->util_sum = decay_load((u64)(sa->util_sum), periods);28832883+ }28842884+28852885+ /*28862886+ * Step 228872887+ */28882888+ delta %= 1024;28892889+ contrib = __accumulate_sum(periods, sa->period_contrib, delta);28902890+ sa->period_contrib = delta;28912891+28922892+ contrib = cap_scale(contrib, scale_freq);28932893+ if (weight) {28942894+ sa->load_sum += weight * contrib;28952895+ if (cfs_rq)28962896+ cfs_rq->runnable_load_sum += weight * contrib;28972897+ }28982898+ if (running)28992899+ sa->util_sum += contrib * scale_cpu;29002900+29012901+ return periods;29022902+}2822290328232904/*28242905 * We can represent the historical contribution to runnable average as the···29302849 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]29312850 */29322851static __always_inline int29332933-__update_load_avg(u64 now, int cpu, struct sched_avg *sa,28522852+___update_load_avg(u64 now, int cpu, struct sched_avg *sa,29342853 unsigned long weight, int running, struct cfs_rq *cfs_rq)29352854{29362936- u64 delta, scaled_delta, periods;29372937- u32 contrib;29382938- unsigned int delta_w, scaled_delta_w, decayed = 0;29392939- unsigned long scale_freq, scale_cpu;28552855+ u64 delta;2940285629412857 delta = now - sa->last_update_time;29422858 /*···29542876 return 0;29552877 sa->last_update_time = now;2956287829572957- scale_freq = arch_scale_freq_capacity(NULL, cpu);29582958- scale_cpu = arch_scale_cpu_capacity(NULL, cpu);28792879+ /*28802880+ * Now we know we crossed measurement unit boundaries. The *_avg28812881+ * accrues by two steps:28822882+ *28832883+ * Step 1: accumulate *_sum since last_update_time. If we haven't28842884+ * crossed period boundaries, finish.28852885+ */28862886+ if (!accumulate_sum(delta, cpu, sa, weight, running, cfs_rq))28872887+ return 0;2959288829602960- /* delta_w is the amount already accumulated against our next period */29612961- delta_w = sa->period_contrib;29622962- if (delta + delta_w >= 1024) {29632963- decayed = 1;29642964-29652965- /* how much left for next period will start over, we don't know yet */29662966- sa->period_contrib = 0;29672967-29682968- /*29692969- * Now that we know we're crossing a period boundary, figure29702970- * out how much from delta we need to complete the current29712971- * period and accrue it.29722972- */29732973- delta_w = 1024 - delta_w;29742974- scaled_delta_w = cap_scale(delta_w, scale_freq);29752975- if (weight) {29762976- sa->load_sum += weight * scaled_delta_w;29772977- if (cfs_rq) {29782978- cfs_rq->runnable_load_sum +=29792979- weight * scaled_delta_w;29802980- }29812981- }29822982- if (running)29832983- sa->util_sum += scaled_delta_w * scale_cpu;29842984-29852985- delta -= delta_w;29862986-29872987- /* Figure out how many additional periods this update spans */29882988- periods = delta / 1024;29892989- delta %= 1024;29902990-29912991- sa->load_sum = decay_load(sa->load_sum, periods + 1);29922992- if (cfs_rq) {29932993- cfs_rq->runnable_load_sum =29942994- decay_load(cfs_rq->runnable_load_sum, periods + 1);29952995- }29962996- sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);29972997-29982998- /* Efficiently calculate \sum (1..n_period) 1024*y^i */29992999- contrib = __compute_runnable_contrib(periods);30003000- contrib = cap_scale(contrib, scale_freq);30013001- if (weight) {30023002- sa->load_sum += weight * contrib;30033003- if (cfs_rq)30043004- cfs_rq->runnable_load_sum += weight * contrib;30053005- }30063006- if (running)30073007- sa->util_sum += contrib * scale_cpu;28892889+ /*28902890+ * Step 2: update *_avg.28912891+ */28922892+ sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);28932893+ if (cfs_rq) {28942894+ cfs_rq->runnable_load_avg =28952895+ div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);30082896 }28972897+ sa->util_avg = sa->util_sum / LOAD_AVG_MAX;3009289830103010- /* Remainder of delta accrued against u_0` */30113011- scaled_delta = cap_scale(delta, scale_freq);30123012- if (weight) {30133013- sa->load_sum += weight * scaled_delta;30143014- if (cfs_rq)30153015- cfs_rq->runnable_load_sum += weight * scaled_delta;30163016- }30173017- if (running)30183018- sa->util_sum += scaled_delta * scale_cpu;28992899+ return 1;29002900+}3019290130203020- sa->period_contrib += delta;29022902+static int29032903+__update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se)29042904+{29052905+ return ___update_load_avg(now, cpu, &se->avg, 0, 0, NULL);29062906+}3021290730223022- if (decayed) {30233023- sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);30243024- if (cfs_rq) {30253025- cfs_rq->runnable_load_avg =30263026- div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);30273027- }30283028- sa->util_avg = sa->util_sum / LOAD_AVG_MAX;30293029- }29082908+static int29092909+__update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se)29102910+{29112911+ return ___update_load_avg(now, cpu, &se->avg,29122912+ se->on_rq * scale_load_down(se->load.weight),29132913+ cfs_rq->curr == se, NULL);29142914+}3030291530313031- return decayed;29162916+static int29172917+__update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)29182918+{29192919+ return ___update_load_avg(now, cpu, &cfs_rq->avg,29202920+ scale_load_down(cfs_rq->load.weight),29212921+ cfs_rq->curr != NULL, cfs_rq);30322922}3033292330342924/*···30603014void set_task_rq_fair(struct sched_entity *se,30613015 struct cfs_rq *prev, struct cfs_rq *next)30623016{30173017+ u64 p_last_update_time;30183018+ u64 n_last_update_time;30193019+30633020 if (!sched_feat(ATTACH_AGE_LOAD))30643021 return;30653022···30733024 * time. This will result in the wakee task is less decayed, but giving30743025 * the wakee more load sounds not bad.30753026 */30763076- if (se->avg.last_update_time && prev) {30773077- u64 p_last_update_time;30783078- u64 n_last_update_time;30273027+ if (!(se->avg.last_update_time && prev))30283028+ return;3079302930803030#ifndef CONFIG_64BIT30313031+ {30813032 u64 p_last_update_time_copy;30823033 u64 n_last_update_time_copy;30833034···3092304330933044 } while (p_last_update_time != p_last_update_time_copy ||30943045 n_last_update_time != n_last_update_time_copy);30953095-#else30963096- p_last_update_time = prev->avg.last_update_time;30973097- n_last_update_time = next->avg.last_update_time;30983098-#endif30993099- __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),31003100- &se->avg, 0, 0, NULL);31013101- se->avg.last_update_time = n_last_update_time;31023046 }30473047+#else30483048+ p_last_update_time = prev->avg.last_update_time;30493049+ n_last_update_time = next->avg.last_update_time;30503050+#endif30513051+ __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);30523052+ se->avg.last_update_time = n_last_update_time;31033053}3104305431053055/* Take into account change of utilization of a child task group */···32213173 return 1;32223174}3223317531763176+/*31773177+ * Check if we need to update the load and the utilization of a blocked31783178+ * group_entity:31793179+ */31803180+static inline bool skip_blocked_update(struct sched_entity *se)31813181+{31823182+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);31833183+31843184+ /*31853185+ * If sched_entity still have not zero load or utilization, we have to31863186+ * decay it:31873187+ */31883188+ if (se->avg.load_avg || se->avg.util_avg)31893189+ return false;31903190+31913191+ /*31923192+ * If there is a pending propagation, we have to update the load and31933193+ * the utilization of the sched_entity:31943194+ */31953195+ if (gcfs_rq->propagate_avg)31963196+ return false;31973197+31983198+ /*31993199+ * Otherwise, the load and the utilization of the sched_entity is32003200+ * already zero and there is no pending propagation, so it will be a32013201+ * waste of time to try to decay it:32023202+ */32033203+ return true;32043204+}32053205+32243206#else /* CONFIG_FAIR_GROUP_SCHED */3225320732263208static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}···33433265 set_tg_cfs_propagate(cfs_rq);33443266 }3345326733463346- decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,33473347- scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);32683268+ decayed = __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);3348326933493270#ifndef CONFIG_64BIT33503271 smp_wmb();···33753298 * Track task load average for carrying it to new CPU after migrated, and33763299 * track group sched_entity load average for task_h_load calc in migration33773300 */33783378- if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {33793379- __update_load_avg(now, cpu, &se->avg,33803380- se->on_rq * scale_load_down(se->load.weight),33813381- cfs_rq->curr == se, NULL);33823382- }33013301+ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))33023302+ __update_load_avg_se(now, cpu, cfs_rq, se);3383330333843304 decayed = update_cfs_rq_load_avg(now, cfs_rq, true);33853305 decayed |= propagate_entity_load_avg(se);···34813407 u64 last_update_time;3482340834833409 last_update_time = cfs_rq_last_update_time(cfs_rq);34843484- __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);34103410+ __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);34853411}3486341234873413/*···43454271 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,43464272 throttled_list) {43474273 struct rq *rq = rq_of(cfs_rq);42744274+ struct rq_flags rf;4348427543494349- raw_spin_lock(&rq->lock);42764276+ rq_lock(rq, &rf);43504277 if (!cfs_rq_throttled(cfs_rq))43514278 goto next;43524279···43644289 unthrottle_cfs_rq(cfs_rq);4365429043664291next:43674367- raw_spin_unlock(&rq->lock);42924292+ rq_unlock(rq, &rf);4368429343694294 if (!remaining)43704295 break;···51725097 unsigned long curr_jiffies = READ_ONCE(jiffies);51735098 struct rq *this_rq = this_rq();51745099 unsigned long load;51005100+ struct rq_flags rf;5175510151765102 if (curr_jiffies == this_rq->last_load_update_tick)51775103 return;5178510451795105 load = weighted_cpuload(cpu_of(this_rq));51805180- raw_spin_lock(&this_rq->lock);51065106+ rq_lock(this_rq, &rf);51815107 update_rq_clock(this_rq);51825108 cpu_load_update_nohz(this_rq, curr_jiffies, load);51835183- raw_spin_unlock(&this_rq->lock);51095109+ rq_unlock(this_rq, &rf);51845110}51855111#else /* !CONFIG_NO_HZ_COMMON */51865112static inline void cpu_load_update_nohz(struct rq *this_rq,···68456769 lockdep_assert_held(&env->src_rq->lock);6846677068476771 p->on_rq = TASK_ON_RQ_MIGRATING;68486848- deactivate_task(env->src_rq, p, 0);67726772+ deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);68496773 set_task_cpu(p, env->dst_cpu);68506774}68516775···69786902 lockdep_assert_held(&rq->lock);6979690369806904 BUG_ON(task_rq(p) != rq);69816981- activate_task(rq, p, 0);69056905+ activate_task(rq, p, ENQUEUE_NOCLOCK);69826906 p->on_rq = TASK_ON_RQ_QUEUED;69836907 check_preempt_curr(rq, p, 0);69846908}···69896913 */69906914static void attach_one_task(struct rq *rq, struct task_struct *p)69916915{69926992- raw_spin_lock(&rq->lock);69166916+ struct rq_flags rf;69176917+69186918+ rq_lock(rq, &rf);69196919+ update_rq_clock(rq);69936920 attach_task(rq, p);69946994- raw_spin_unlock(&rq->lock);69216921+ rq_unlock(rq, &rf);69956922}6996692369976924/*···70056926{70066927 struct list_head *tasks = &env->tasks;70076928 struct task_struct *p;69296929+ struct rq_flags rf;7008693070097009- raw_spin_lock(&env->dst_rq->lock);69316931+ rq_lock(env->dst_rq, &rf);69326932+ update_rq_clock(env->dst_rq);7010693370116934 while (!list_empty(tasks)) {70126935 p = list_first_entry(tasks, struct task_struct, se.group_node);···70176936 attach_task(env->dst_rq, p);70186937 }7019693870207020- raw_spin_unlock(&env->dst_rq->lock);69396939+ rq_unlock(env->dst_rq, &rf);70216940}7022694170236942#ifdef CONFIG_FAIR_GROUP_SCHED···70256944{70266945 struct rq *rq = cpu_rq(cpu);70276946 struct cfs_rq *cfs_rq;70287028- unsigned long flags;69476947+ struct rq_flags rf;7029694870307030- raw_spin_lock_irqsave(&rq->lock, flags);69496949+ rq_lock_irqsave(rq, &rf);70316950 update_rq_clock(rq);7032695170336952 /*···70356954 * list_add_leaf_cfs_rq() for details.70366955 */70376956 for_each_leaf_cfs_rq(rq, cfs_rq) {69576957+ struct sched_entity *se;69586958+70386959 /* throttled entities do not contribute to load */70396960 if (throttled_hierarchy(cfs_rq))70406961 continue;···70446961 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))70456962 update_tg_load_avg(cfs_rq, 0);7046696370477047- /* Propagate pending load changes to the parent */70487048- if (cfs_rq->tg->se[cpu])70497049- update_load_avg(cfs_rq->tg->se[cpu], 0);69646964+ /* Propagate pending load changes to the parent, if any: */69656965+ se = cfs_rq->tg->se[cpu];69666966+ if (se && !skip_blocked_update(se))69676967+ update_load_avg(se, 0);70506968 }70517051- raw_spin_unlock_irqrestore(&rq->lock, flags);69696969+ rq_unlock_irqrestore(rq, &rf);70526970}7053697170546972/*···71037019{71047020 struct rq *rq = cpu_rq(cpu);71057021 struct cfs_rq *cfs_rq = &rq->cfs;71067106- unsigned long flags;70227022+ struct rq_flags rf;7107702371087108- raw_spin_lock_irqsave(&rq->lock, flags);70247024+ rq_lock_irqsave(rq, &rf);71097025 update_rq_clock(rq);71107026 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);71117111- raw_spin_unlock_irqrestore(&rq->lock, flags);70277027+ rq_unlock_irqrestore(rq, &rf);71127028}7113702971147030static unsigned long task_h_load(struct task_struct *p)···76097525{76107526 struct sched_domain *child = env->sd->child;76117527 struct sched_group *sg = env->sd->groups;75287528+ struct sg_lb_stats *local = &sds->local_stat;76127529 struct sg_lb_stats tmp_sgs;76137530 int load_idx, prefer_sibling = 0;76147531 bool overload = false;···76267541 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));76277542 if (local_group) {76287543 sds->local = sg;76297629- sgs = &sds->local_stat;75447544+ sgs = local;7630754576317546 if (env->idle != CPU_NEWLY_IDLE ||76327547 time_after_eq(jiffies, sg->sgc->next_update))···76507565 * the tasks on the system).76517566 */76527567 if (prefer_sibling && sds->local &&76537653- group_has_capacity(env, &sds->local_stat) &&76547654- (sgs->sum_nr_running > 1)) {75687568+ group_has_capacity(env, local) &&75697569+ (sgs->sum_nr_running > local->sum_nr_running + 1)) {76557570 sgs->group_no_capacity = 1;76567571 sgs->group_type = group_classify(sg, sgs);76577572 }···81278042 struct sched_domain *sd_parent = sd->parent;81288043 struct sched_group *group;81298044 struct rq *busiest;81308130- unsigned long flags;80458045+ struct rq_flags rf;81318046 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);8132804781338048 struct lb_env env = {···81908105 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);8191810681928107more_balance:81938193- raw_spin_lock_irqsave(&busiest->lock, flags);81088108+ rq_lock_irqsave(busiest, &rf);81948109 update_rq_clock(busiest);8195811081968111 /*···82078122 * See task_rq_lock() family for the details.82088123 */8209812482108210- raw_spin_unlock(&busiest->lock);81258125+ rq_unlock(busiest, &rf);8211812682128127 if (cur_ld_moved) {82138128 attach_tasks(&env);82148129 ld_moved += cur_ld_moved;82158130 }8216813182178217- local_irq_restore(flags);81328132+ local_irq_restore(rf.flags);8218813382198134 if (env.flags & LBF_NEED_BREAK) {82208135 env.flags &= ~LBF_NEED_BREAK;···82928207 sd->nr_balance_failed++;8293820882948209 if (need_active_balance(&env)) {82108210+ unsigned long flags;82118211+82958212 raw_spin_lock_irqsave(&busiest->lock, flags);8296821382978214 /* don't kick the active_load_balance_cpu_stop,···85318444 struct rq *target_rq = cpu_rq(target_cpu);85328445 struct sched_domain *sd;85338446 struct task_struct *p = NULL;84478447+ struct rq_flags rf;8534844885358535- raw_spin_lock_irq(&busiest_rq->lock);84498449+ rq_lock_irq(busiest_rq, &rf);8536845085378451 /* make sure the requested cpu hasn't gone down in the meantime */85388452 if (unlikely(busiest_cpu != smp_processor_id() ||···85848496 rcu_read_unlock();85858497out_unlock:85868498 busiest_rq->active_balance = 0;85878587- raw_spin_unlock(&busiest_rq->lock);84998499+ rq_unlock(busiest_rq, &rf);8588850085898501 if (p)85908502 attach_one_task(target_rq, p);···88828794 * do the balance.88838795 */88848796 if (time_after_eq(jiffies, rq->next_balance)) {88858885- raw_spin_lock_irq(&rq->lock);87978797+ struct rq_flags rf;87988798+87998799+ rq_lock_irq(rq, &rf);88868800 update_rq_clock(rq);88878801 cpu_load_update_idle(rq);88888888- raw_spin_unlock_irq(&rq->lock);88028802+ rq_unlock_irq(rq, &rf);88038803+88898804 rebalance_domains(rq, CPU_IDLE);88908805 }88918806···90798988 struct cfs_rq *cfs_rq;90808989 struct sched_entity *se = &p->se, *curr;90818990 struct rq *rq = this_rq();89918991+ struct rq_flags rf;9082899290839083- raw_spin_lock(&rq->lock);89938993+ rq_lock(rq, &rf);90848994 update_rq_clock(rq);9085899590868996 cfs_rq = task_cfs_rq(current);···91029010 }9103901191049012 se->vruntime -= cfs_rq->min_vruntime;91059105- raw_spin_unlock(&rq->lock);90139013+ rq_unlock(rq, &rf);91069014}9107901591089016/*···94649372int sched_group_set_shares(struct task_group *tg, unsigned long shares)94659373{94669374 int i;94679467- unsigned long flags;9468937594699376 /*94709377 * We can't change the weight of the root cgroup.···94809389 tg->shares = shares;94819390 for_each_possible_cpu(i) {94829391 struct rq *rq = cpu_rq(i);94839483- struct sched_entity *se;93929392+ struct sched_entity *se = tg->se[i];93939393+ struct rq_flags rf;9484939494859485- se = tg->se[i];94869395 /* Propagate contribution to hierarchy */94879487- raw_spin_lock_irqsave(&rq->lock, flags);94889488-94899489- /* Possible calls to update_curr() need rq clock */93969396+ rq_lock_irqsave(rq, &rf);94909397 update_rq_clock(rq);94919398 for_each_sched_entity(se) {94929399 update_load_avg(se, UPDATE_TG);94939400 update_cfs_shares(se);94949401 }94959495- raw_spin_unlock_irqrestore(&rq->lock, flags);94029402+ rq_unlock_irqrestore(rq, &rf);94969403 }9497940494989405done:
+7
kernel/sched/features.h
···5656 */5757SCHED_FEAT(SIS_AVG_CPU, false)58585959+/*6060+ * Issue a WARN when we do multiple update_rq_clock() calls6161+ * in a single rq->lock section. Default disabled because the6262+ * annotations are not complete.6363+ */6464+SCHED_FEAT(WARN_DOUBLE_CLOCK, false)6565+5966#ifdef HAVE_RT_PUSH_IPI6067/*6168 * In order to avoid a thundering herd attack of CPUs that are
+12-8
kernel/sched/loadavg.c
···169169 * If the folding window started, make sure we start writing in the170170 * next idle-delta.171171 */172172- if (!time_before(jiffies, calc_load_update))172172+ if (!time_before(jiffies, READ_ONCE(calc_load_update)))173173 idx++;174174175175 return idx & 1;···202202 struct rq *this_rq = this_rq();203203204204 /*205205- * If we're still before the sample window, we're done.205205+ * If we're still before the pending sample window, we're done.206206 */207207+ this_rq->calc_load_update = READ_ONCE(calc_load_update);207208 if (time_before(jiffies, this_rq->calc_load_update))208209 return;209210···213212 * accounted through the nohz accounting, so skip the entire deal and214213 * sync up for the next window.215214 */216216- this_rq->calc_load_update = calc_load_update;217215 if (time_before(jiffies, this_rq->calc_load_update + 10))218216 this_rq->calc_load_update += LOAD_FREQ;219217}···308308 */309309static void calc_global_nohz(void)310310{311311+ unsigned long sample_window;311312 long delta, active, n;312313313313- if (!time_before(jiffies, calc_load_update + 10)) {314314+ sample_window = READ_ONCE(calc_load_update);315315+ if (!time_before(jiffies, sample_window + 10)) {314316 /*315317 * Catch-up, fold however many we are behind still316318 */317317- delta = jiffies - calc_load_update - 10;319319+ delta = jiffies - sample_window - 10;318320 n = 1 + (delta / LOAD_FREQ);319321320322 active = atomic_long_read(&calc_load_tasks);···326324 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);327325 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);328326329329- calc_load_update += n * LOAD_FREQ;327327+ WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);330328 }331329332330 /*···354352 */355353void calc_global_load(unsigned long ticks)356354{355355+ unsigned long sample_window;357356 long active, delta;358357359359- if (time_before(jiffies, calc_load_update + 10))358358+ sample_window = READ_ONCE(calc_load_update);359359+ if (time_before(jiffies, sample_window + 10))360360 return;361361362362 /*···375371 avenrun[1] = calc_load(avenrun[1], EXP_5, active);376372 avenrun[2] = calc_load(avenrun[2], EXP_15, active);377373378378- calc_load_update += LOAD_FREQ;374374+ WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);379375380376 /*381377 * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+81
kernel/sched/rt.c
···19271927#define RT_PUSH_IPI_EXECUTING 119281928#define RT_PUSH_IPI_RESTART 21929192919301930+/*19311931+ * When a high priority task schedules out from a CPU and a lower priority19321932+ * task is scheduled in, a check is made to see if there's any RT tasks19331933+ * on other CPUs that are waiting to run because a higher priority RT task19341934+ * is currently running on its CPU. In this case, the CPU with multiple RT19351935+ * tasks queued on it (overloaded) needs to be notified that a CPU has opened19361936+ * up that may be able to run one of its non-running queued RT tasks.19371937+ *19381938+ * On large CPU boxes, there's the case that several CPUs could schedule19391939+ * a lower priority task at the same time, in which case it will look for19401940+ * any overloaded CPUs that it could pull a task from. To do this, the runqueue19411941+ * lock must be taken from that overloaded CPU. Having 10s of CPUs all fighting19421942+ * for a single overloaded CPU's runqueue lock can produce a large latency.19431943+ * (This has actually been observed on large boxes running cyclictest).19441944+ * Instead of taking the runqueue lock of the overloaded CPU, each of the19451945+ * CPUs that scheduled a lower priority task simply sends an IPI to the19461946+ * overloaded CPU. An IPI is much cheaper than taking an runqueue lock with19471947+ * lots of contention. The overloaded CPU will look to push its non-running19481948+ * RT task off, and if it does, it can then ignore the other IPIs coming19491949+ * in, and just pass those IPIs off to any other overloaded CPU.19501950+ *19511951+ * When a CPU schedules a lower priority task, it only sends an IPI to19521952+ * the "next" CPU that has overloaded RT tasks. This prevents IPI storms,19531953+ * as having 10 CPUs scheduling lower priority tasks and 10 CPUs with19541954+ * RT overloaded tasks, would cause 100 IPIs to go out at once.19551955+ *19561956+ * The overloaded RT CPU, when receiving an IPI, will try to push off its19571957+ * overloaded RT tasks and then send an IPI to the next CPU that has19581958+ * overloaded RT tasks. This stops when all CPUs with overloaded RT tasks19591959+ * have completed. Just because a CPU may have pushed off its own overloaded19601960+ * RT task does not mean it should stop sending the IPI around to other19611961+ * overloaded CPUs. There may be another RT task waiting to run on one of19621962+ * those CPUs that are of higher priority than the one that was just19631963+ * pushed.19641964+ *19651965+ * An optimization that could possibly be made is to make a CPU array similar19661966+ * to the cpupri array mask of all running RT tasks, but for the overloaded19671967+ * case, then the IPI could be sent to only the CPU with the highest priority19681968+ * RT task waiting, and that CPU could send off further IPIs to the CPU with19691969+ * the next highest waiting task. Since the overloaded case is much less likely19701970+ * to happen, the complexity of this implementation may not be worth it.19711971+ * Instead, just send an IPI around to all overloaded CPUs.19721972+ *19731973+ * The rq->rt.push_flags holds the status of the IPI that is going around.19741974+ * A run queue can only send out a single IPI at a time. The possible flags19751975+ * for rq->rt.push_flags are:19761976+ *19771977+ * (None or zero): No IPI is going around for the current rq19781978+ * RT_PUSH_IPI_EXECUTING: An IPI for the rq is being passed around19791979+ * RT_PUSH_IPI_RESTART: The priority of the running task for the rq19801980+ * has changed, and the IPI should restart19811981+ * circulating the overloaded CPUs again.19821982+ *19831983+ * rq->rt.push_cpu contains the CPU that is being sent the IPI. It is updated19841984+ * before sending to the next CPU.19851985+ *19861986+ * Instead of having all CPUs that schedule a lower priority task send19871987+ * an IPI to the same "first" CPU in the RT overload mask, they send it19881988+ * to the next overloaded CPU after their own CPU. This helps distribute19891989+ * the work when there's more than one overloaded CPU and multiple CPUs19901990+ * scheduling in lower priority tasks.19911991+ *19921992+ * When a rq schedules a lower priority task than what was currently19931993+ * running, the next CPU with overloaded RT tasks is examined first.19941994+ * That is, if CPU 1 and 5 are overloaded, and CPU 3 schedules a lower19951995+ * priority task, it will send an IPI first to CPU 5, then CPU 5 will19961996+ * send to CPU 1 if it is still overloaded. CPU 1 will clear the19971997+ * rq->rt.push_flags if RT_PUSH_IPI_RESTART is not set.19981998+ *19991999+ * The first CPU to notice IPI_RESTART is set, will clear that flag and then20002000+ * send an IPI to the next overloaded CPU after the rq->cpu and not the next20012001+ * CPU after push_cpu. That is, if CPU 1, 4 and 5 are overloaded when CPU 320022002+ * schedules a lower priority task, and the IPI_RESTART gets set while the20032003+ * handling is being done on CPU 5, it will clear the flag and send it back to20042004+ * CPU 4 instead of CPU 1.20052005+ *20062006+ * Note, the above logic can be disabled by turning off the sched_feature20072007+ * RT_PUSH_IPI. Then the rq lock of the overloaded CPU will simply be20082008+ * taken by the CPU requesting a pull and the waiting RT task will be pulled20092009+ * by that CPU. This may be fine for machines with few CPUs.20102010+ */19302011static void tell_cpu_to_push(struct rq *rq)19312012{19322013 int cpu;
···14551455 if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,14561456 P4D_SHIFT, next, write, pages, nr))14571457 return 0;14581458- } else if (!gup_p4d_range(p4d, addr, next, write, pages, nr))14581458+ } else if (!gup_pud_range(p4d, addr, next, write, pages, nr))14591459 return 0;14601460 } while (p4dp++, addr = next, addr != end);14611461
+3-4
mm/percpu-vm.c
···21212222/**2323 * pcpu_get_pages - get temp pages array2424- * @chunk: chunk of interest2524 *2625 * Returns pointer to array of pointers to struct page which can be indexed2726 * with pcpu_page_idx(). Note that there is only one array and accesses···2930 * RETURNS:3031 * Pointer to temp pages array on success.3132 */3232-static struct page **pcpu_get_pages(struct pcpu_chunk *chunk_alloc)3333+static struct page **pcpu_get_pages(void)3334{3435 static struct page **pages;3536 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);···274275{275276 struct page **pages;276277277277- pages = pcpu_get_pages(chunk);278278+ pages = pcpu_get_pages();278279 if (!pages)279280 return -ENOMEM;280281···312313 * successful population attempt so the temp pages array must313314 * be available now.314315 */315315- pages = pcpu_get_pages(chunk);316316+ pages = pcpu_get_pages();316317 BUG_ON(!pages);317318318319 /* unmap and free */
+4-1
mm/percpu.c
···10111011 mutex_unlock(&pcpu_alloc_mutex);10121012 }1013101310141014- if (chunk != pcpu_reserved_chunk)10141014+ if (chunk != pcpu_reserved_chunk) {10151015+ spin_lock_irqsave(&pcpu_lock, flags);10151016 pcpu_nr_empty_pop_pages -= occ_pages;10171017+ spin_unlock_irqrestore(&pcpu_lock, flags);10181018+ }1016101910171020 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)10181021 pcpu_schedule_balance_work();
+3-2
net/atm/svc.c
···318318 return error;319319}320320321321-static int svc_accept(struct socket *sock, struct socket *newsock, int flags)321321+static int svc_accept(struct socket *sock, struct socket *newsock, int flags,322322+ bool kern)322323{323324 struct sock *sk = sock->sk;324325 struct sk_buff *skb;···330329331330 lock_sock(sk);332331333333- error = svc_create(sock_net(sk), newsock, 0, 0);332332+ error = svc_create(sock_net(sk), newsock, 0, kern);334333 if (error)335334 goto out;336335
+2-1
net/ax25/af_ax25.c
···13201320 return err;13211321}1322132213231323-static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)13231323+static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,13241324+ bool kern)13241325{13251326 struct sk_buff *skb;13261327 struct sock *newsk;
+1-1
net/bluetooth/l2cap_sock.c
···301301}302302303303static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,304304- int flags)304304+ int flags, bool kern)305305{306306 DEFINE_WAIT_FUNC(wait, woken_wake_function);307307 struct sock *sk = sock->sk, *nsk;
+2-1
net/bluetooth/rfcomm/sock.c
···471471 return err;472472}473473474474-static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)474474+static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags,475475+ bool kern)475476{476477 DEFINE_WAIT_FUNC(wait, woken_wake_function);477478 struct sock *sk = sock->sk, *nsk;
+1-1
net/bluetooth/sco.c
···627627}628628629629static int sco_sock_accept(struct socket *sock, struct socket *newsock,630630- int flags)630630+ int flags, bool kern)631631{632632 DEFINE_WAIT_FUNC(wait, woken_wake_function);633633 struct sock *sk = sock->sk, *ch;
···521521}522522523523524524-/* PF_BRIDGE/LOCAL_IN ************************************************/525525-/* The packet is locally destined, which requires a real526526- * dst_entry, so detach the fake one. On the way up, the527527- * packet would pass through PRE_ROUTING again (which already528528- * took place when the packet entered the bridge), but we529529- * register an IPv4 PRE_ROUTING 'sabotage' hook that will530530- * prevent this from happening. */531531-static unsigned int br_nf_local_in(void *priv,532532- struct sk_buff *skb,533533- const struct nf_hook_state *state)534534-{535535- br_drop_fake_rtable(skb);536536- return NF_ACCEPT;537537-}538538-539524/* PF_BRIDGE/FORWARD *************************************************/540525static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)541526{···890905 .hook = br_nf_pre_routing,891906 .pf = NFPROTO_BRIDGE,892907 .hooknum = NF_BR_PRE_ROUTING,893893- .priority = NF_BR_PRI_BRNF,894894- },895895- {896896- .hook = br_nf_local_in,897897- .pf = NFPROTO_BRIDGE,898898- .hooknum = NF_BR_LOCAL_IN,899908 .priority = NF_BR_PRI_BRNF,900909 },901910 {
···953953 while (--i >= new_num) {954954 struct kobject *kobj = &dev->_rx[i].kobj;955955956956- if (!list_empty(&dev_net(dev)->exit_list))956956+ if (!atomic_read(&dev_net(dev)->count))957957 kobj->uevent_suppress = 1;958958 if (dev->sysfs_rx_queue_group)959959 sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);···13711371 while (--i >= new_num) {13721372 struct netdev_queue *queue = dev->_tx + i;1373137313741374- if (!list_empty(&dev_net(dev)->exit_list))13741374+ if (!atomic_read(&dev_net(dev)->count))13751375 queue->kobj.uevent_suppress = 1;13761376#ifdef CONFIG_BQL13771377 sysfs_remove_group(&queue->kobj, &dql_group);···15581558{15591559 struct device *dev = &(ndev->dev);1560156015611561- if (!list_empty(&dev_net(ndev)->exit_list))15611561+ if (!atomic_read(&dev_net(ndev)->count))15621562 dev_set_uevent_suppress(dev, 1);1563156315641564 kobject_get(&dev->kobj);
+16-14
net/core/skbuff.c
···38283828 if (!skb_may_tx_timestamp(sk, false))38293829 return;3830383038313831- /* take a reference to prevent skb_orphan() from freeing the socket */38323832- sock_hold(sk);38333833-38343834- *skb_hwtstamps(skb) = *hwtstamps;38353835- __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);38363836-38373837- sock_put(sk);38313831+ /* Take a reference to prevent skb_orphan() from freeing the socket,38323832+ * but only if the socket refcount is not zero.38333833+ */38343834+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {38353835+ *skb_hwtstamps(skb) = *hwtstamps;38363836+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);38373837+ sock_put(sk);38383838+ }38383839}38393840EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);38403841···38943893{38953894 struct sock *sk = skb->sk;38963895 struct sock_exterr_skb *serr;38973897- int err;38963896+ int err = 1;3898389738993898 skb->wifi_acked_valid = 1;39003899 skb->wifi_acked = acked;···39043903 serr->ee.ee_errno = ENOMSG;39053904 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;3906390539073907- /* take a reference to prevent skb_orphan() from freeing the socket */39083908- sock_hold(sk);39093909-39103910- err = sock_queue_err_skb(sk, skb);39063906+ /* Take a reference to prevent skb_orphan() from freeing the socket,39073907+ * but only if the socket refcount is not zero.39083908+ */39093909+ if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {39103910+ err = sock_queue_err_skb(sk, skb);39113911+ sock_put(sk);39123912+ }39113913 if (err)39123914 kfree_skb(skb);39133913-39143914- sock_put(sk);39153915}39163916EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);39173917
+57-49
net/core/sock.c
···197197198198/*199199 * Each address family might have different locking rules, so we have200200- * one slock key per address family:200200+ * one slock key per address family and separate keys for internal and201201+ * userspace sockets.201202 */202203static struct lock_class_key af_family_keys[AF_MAX];204204+static struct lock_class_key af_family_kern_keys[AF_MAX];203205static struct lock_class_key af_family_slock_keys[AF_MAX];206206+static struct lock_class_key af_family_kern_slock_keys[AF_MAX];204207205208/*206209 * Make lock validator output more readable. (we pre-construct these207210 * strings build-time, so that runtime initialization of socket208211 * locks is fast):209212 */213213+214214+#define _sock_locks(x) \215215+ x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \216216+ x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \217217+ x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \218218+ x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \219219+ x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \220220+ x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \221221+ x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \222222+ x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \223223+ x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \224224+ x "27" , x "28" , x "AF_CAN" , \225225+ x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \226226+ x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \227227+ x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \228228+ x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \229229+ x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"230230+210231static const char *const af_family_key_strings[AF_MAX+1] = {211211- "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,212212- "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",213213- "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,214214- "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,215215- "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,216216- "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,217217- "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,218218- "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,219219- "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,220220- "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,221221- "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,222222- "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,223223- "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,224224- "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,225225- "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"232232+ _sock_locks("sk_lock-")226233};227234static const char *const af_family_slock_key_strings[AF_MAX+1] = {228228- "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,229229- "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",230230- "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,231231- "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,232232- "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,233233- "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,234234- "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,235235- "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" ,236236- "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,237237- "slock-27" , "slock-28" , "slock-AF_CAN" ,238238- "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,239239- "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,240240- "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,241241- "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,242242- "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"235235+ _sock_locks("slock-")243236};244237static const char *const af_family_clock_key_strings[AF_MAX+1] = {245245- "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,246246- "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",247247- "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,248248- "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,249249- "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,250250- "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,251251- "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,252252- "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" ,253253- "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,254254- "clock-27" , "clock-28" , "clock-AF_CAN" ,255255- "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,256256- "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,257257- "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,258258- "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,259259- "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"238238+ _sock_locks("clock-")239239+};240240+241241+static const char *const af_family_kern_key_strings[AF_MAX+1] = {242242+ _sock_locks("k-sk_lock-")243243+};244244+static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {245245+ _sock_locks("k-slock-")246246+};247247+static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {248248+ _sock_locks("k-clock-")260249};261250262251/*···253264 * so split the lock classes by using a per-AF key:254265 */255266static struct lock_class_key af_callback_keys[AF_MAX];267267+static struct lock_class_key af_kern_callback_keys[AF_MAX];256268257269/* Take into consideration the size of the struct sk_buff overhead in the258270 * determination of these values, since that is non-constant across···12831293 */12841294static inline void sock_lock_init(struct sock *sk)12851295{12861286- sock_lock_init_class_and_name(sk,12961296+ if (sk->sk_kern_sock)12971297+ sock_lock_init_class_and_name(12981298+ sk,12991299+ af_family_kern_slock_key_strings[sk->sk_family],13001300+ af_family_kern_slock_keys + sk->sk_family,13011301+ af_family_kern_key_strings[sk->sk_family],13021302+ af_family_kern_keys + sk->sk_family);13031303+ else13041304+ sock_lock_init_class_and_name(13051305+ sk,12871306 af_family_slock_key_strings[sk->sk_family],12881307 af_family_slock_keys + sk->sk_family,12891308 af_family_key_strings[sk->sk_family],···13981399 * why we need sk_prot_creator -acme13991400 */14001401 sk->sk_prot = sk->sk_prot_creator = prot;14021402+ sk->sk_kern_sock = kern;14011403 sock_lock_init(sk);14021404 sk->sk_net_refcnt = kern ? 0 : 1;14031405 if (likely(sk->sk_net_refcnt))···22772277}22782278EXPORT_SYMBOL(sock_no_socketpair);2279227922802280-int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)22802280+int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,22812281+ bool kern)22812282{22822283 return -EOPNOTSUPP;22832284}···24822481 }2483248224842483 rwlock_init(&sk->sk_callback_lock);24852485- lockdep_set_class_and_name(&sk->sk_callback_lock,24842484+ if (sk->sk_kern_sock)24852485+ lockdep_set_class_and_name(24862486+ &sk->sk_callback_lock,24872487+ af_kern_callback_keys + sk->sk_family,24882488+ af_family_kern_clock_key_strings[sk->sk_family]);24892489+ else24902490+ lockdep_set_class_and_name(24912491+ &sk->sk_callback_lock,24862492 af_callback_keys + sk->sk_family,24872493 af_family_clock_key_strings[sk->sk_family]);24882494
+1
net/dccp/ccids/ccid2.c
···749749 for (i = 0; i < hc->tx_seqbufc; i++)750750 kfree(hc->tx_seqbuf[i]);751751 hc->tx_seqbufc = 0;752752+ dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);752753}753754754755static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
+2-1
net/dccp/ipv4.c
···289289290290 switch (type) {291291 case ICMP_REDIRECT:292292- dccp_do_redirect(skb, sk);292292+ if (!sock_owned_by_user(sk))293293+ dccp_do_redirect(skb, sk);293294 goto out;294295 case ICMP_SOURCE_QUENCH:295296 /* Just silently ignore these. */
+5-3
net/dccp/ipv6.c
···122122 np = inet6_sk(sk);123123124124 if (type == NDISC_REDIRECT) {125125- struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);125125+ if (!sock_owned_by_user(sk)) {126126+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);126127127127- if (dst)128128- dst->ops->redirect(dst, sk, skb);128128+ if (dst)129129+ dst->ops->redirect(dst, sk, skb);130130+ }129131 goto out;130132 }131133
+16-8
net/dccp/minisocks.c
···142142 struct dccp_request_sock *dreq = dccp_rsk(req);143143 bool own_req;144144145145+ /* TCP/DCCP listeners became lockless.146146+ * DCCP stores complex state in its request_sock, so we need147147+ * a protection for them, now this code runs without being protected148148+ * by the parent (listener) lock.149149+ */150150+ spin_lock_bh(&dreq->dreq_lock);151151+145152 /* Check for retransmitted REQUEST */146153 if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {147154···163156 inet_rtx_syn_ack(sk, req);164157 }165158 /* Network Duplicate, discard packet */166166- return NULL;159159+ goto out;167160 }168161169162 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;···189182190183 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,191184 req, &own_req);192192- if (!child)193193- goto listen_overflow;185185+ if (child) {186186+ child = inet_csk_complete_hashdance(sk, child, req, own_req);187187+ goto out;188188+ }194189195195- return inet_csk_complete_hashdance(sk, child, req, own_req);196196-197197-listen_overflow:198198- dccp_pr_debug("listen_overflow!\n");199190 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;200191drop:201192 if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)202193 req->rsk_ops->send_reset(sk, skb);203194204195 inet_csk_reqsk_queue_drop(sk, req);205205- return NULL;196196+out:197197+ spin_unlock_bh(&dreq->dreq_lock);198198+ return child;206199}207200208201EXPORT_SYMBOL_GPL(dccp_check_req);···253246{254247 struct dccp_request_sock *dreq = dccp_rsk(req);255248249249+ spin_lock_init(&dreq->dreq_lock);256250 inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;257251 inet_rsk(req)->ir_num = ntohs(dccp_hdr(skb)->dccph_dport);258252 inet_rsk(req)->acked = 0;
···689689 * Accept a pending connection. The TCP layer now gives BSD semantics.690690 */691691692692-int inet_accept(struct socket *sock, struct socket *newsock, int flags)692692+int inet_accept(struct socket *sock, struct socket *newsock, int flags,693693+ bool kern)693694{694695 struct sock *sk1 = sock->sk;695696 int err = -EINVAL;696696- struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err);697697+ struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);697698698699 if (!sk2)699700 goto do_err;···14881487 int proto = iph->protocol;14891488 int err = -ENOSYS;1490148914911491- if (skb->encapsulation)14901490+ if (skb->encapsulation) {14911491+ skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));14921492 skb_set_inner_network_header(skb, nhoff);14931493+ }1493149414941495 csum_replace2(&iph->check, iph->tot_len, newlen);14951496 iph->tot_len = newlen;
+1-1
net/ipv4/inet_connection_sock.c
···424424/*425425 * This will accept the next outstanding connection.426426 */427427-struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)427427+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)428428{429429 struct inet_connection_sock *icsk = inet_csk(sk);430430 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
···279279 */280280void tcp_v4_mtu_reduced(struct sock *sk)281281{282282- struct dst_entry *dst;283282 struct inet_sock *inet = inet_sk(sk);284284- u32 mtu = tcp_sk(sk)->mtu_info;283283+ struct dst_entry *dst;284284+ u32 mtu;285285286286+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))287287+ return;288288+ mtu = tcp_sk(sk)->mtu_info;286289 dst = inet_csk_update_pmtu(sk, mtu);287290 if (!dst)288291 return;···431428432429 switch (type) {433430 case ICMP_REDIRECT:434434- do_redirect(icmp_skb, sk);431431+ if (!sock_owned_by_user(sk))432432+ do_redirect(icmp_skb, sk);435433 goto out;436434 case ICMP_SOURCE_QUENCH:437435 /* Just silently ignore these. */
+4-2
net/ipv4/tcp_timer.c
···249249250250 sk_mem_reclaim_partial(sk);251251252252- if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))252252+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||253253+ !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))253254 goto out;254255255256 if (time_after(icsk->icsk_ack.timeout, jiffies)) {···553552 struct inet_connection_sock *icsk = inet_csk(sk);554553 int event;555554556556- if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)555555+ if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||556556+ !icsk->icsk_pending)557557 goto out;558558559559 if (time_after(icsk->icsk_timeout, jiffies)) {
+5-5
net/ipv6/af_inet6.c
···920920 err = register_pernet_subsys(&inet6_net_ops);921921 if (err)922922 goto register_pernet_fail;923923- err = icmpv6_init();924924- if (err)925925- goto icmp_fail;926923 err = ip6_mr_init();927924 if (err)928925 goto ipmr_fail;926926+ err = icmpv6_init();927927+ if (err)928928+ goto icmp_fail;929929 err = ndisc_init();930930 if (err)931931 goto ndisc_fail;···10611061 ndisc_cleanup();10621062ndisc_fail:10631063 ip6_mr_cleanup();10641064-ipmr_fail:10651065- icmpv6_cleanup();10661064icmp_fail:10671065 unregister_pernet_subsys(&inet6_net_ops);10661066+ipmr_fail:10671067+ icmpv6_cleanup();10681068register_pernet_fail:10691069 sock_unregister(PF_INET6);10701070 rtnl_unregister_all(PF_INET6);
+2
net/ipv6/ip6_fib.c
···923923 ins = &rt->dst.rt6_next;924924 iter = *ins;925925 while (iter) {926926+ if (iter->rt6i_metric > rt->rt6i_metric)927927+ break;926928 if (rt6_qualify_for_ecmp(iter)) {927929 *ins = iter->dst.rt6_next;928930 fib6_purge_rt(iter, fn, info->nl_net);
···133133134134 new_sock->type = sock->type;135135 new_sock->ops = sock->ops;136136- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);136136+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true);137137 if (ret < 0)138138 goto out;139139···223223 * before it has been accepted and the accepter has set up their224224 * data_ready.. we only want to queue listen work for our listening225225 * socket226226+ *227227+ * (*ready)() may be null if we are racing with netns delete, and228228+ * the listen socket is being torn down.226229 */227230 if (sk->sk_state == TCP_LISTEN)228231 rds_tcp_accept_work(sk);···234231235232out:236233 read_unlock_bh(&sk->sk_callback_lock);237237- ready(sk);234234+ if (ready)235235+ ready(sk);238236}239237240238struct socket *rds_tcp_listen_init(struct net *net)···275271 return NULL;276272}277273278278-void rds_tcp_listen_stop(struct socket *sock)274274+void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)279275{280276 struct sock *sk;281277···296292297293 /* wait for accepts to stop and close the socket */298294 flush_workqueue(rds_wq);295295+ flush_work(acceptor);299296 sock_release(sock);300297}
+2-1
net/rose/af_rose.c
···871871 return err;872872}873873874874-static int rose_accept(struct socket *sock, struct socket *newsock, int flags)874874+static int rose_accept(struct socket *sock, struct socket *newsock, int flags,875875+ bool kern)875876{876877 struct sk_buff *skb;877878 struct sock *newsk;
+19-8
net/rxrpc/input.c
···420420 u16 skew)421421{422422 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);423423+ enum rxrpc_call_state state;423424 unsigned int offset = sizeof(struct rxrpc_wire_header);424425 unsigned int ix;425426 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;···435434 _proto("Rx DATA %%%u { #%u f=%02x }",436435 sp->hdr.serial, seq, sp->hdr.flags);437436438438- if (call->state >= RXRPC_CALL_COMPLETE)437437+ state = READ_ONCE(call->state);438438+ if (state >= RXRPC_CALL_COMPLETE)439439 return;440440441441 /* Received data implicitly ACKs all of the request packets we sent442442 * when we're acting as a client.443443 */444444- if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||445445- call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&444444+ if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||445445+ state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&446446 !rxrpc_receiving_reply(call))447447 return;448448···652650 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);653651 struct rxrpc_peer *peer;654652 unsigned int mtu;653653+ bool wake = false;655654 u32 rwind = ntohl(ackinfo->rwind);656655657656 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",···660657 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),661658 rwind, ntohl(ackinfo->jumbo_max));662659663663- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)664664- rwind = RXRPC_RXTX_BUFF_SIZE - 1;665665- call->tx_winsize = rwind;660660+ if (call->tx_winsize != rwind) {661661+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)662662+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;663663+ if (rwind > call->tx_winsize)664664+ wake = true;665665+ call->tx_winsize = rwind;666666+ }667667+666668 if (call->cong_ssthresh > rwind)667669 call->cong_ssthresh = rwind;668670···681673 spin_unlock_bh(&peer->lock);682674 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);683675 }676676+677677+ if (wake)678678+ wake_up(&call->waitq);684679}685680686681/*···810799 return rxrpc_proto_abort("AK0", call, 0);811800812801 /* Ignore ACKs unless we are or have just been transmitting. */813813- switch (call->state) {802802+ switch (READ_ONCE(call->state)) {814803 case RXRPC_CALL_CLIENT_SEND_REQUEST:815804 case RXRPC_CALL_CLIENT_AWAIT_REPLY:816805 case RXRPC_CALL_SERVER_SEND_REPLY:···951940static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,952941 struct rxrpc_call *call)953942{954954- switch (call->state) {943943+ switch (READ_ONCE(call->state)) {955944 case RXRPC_CALL_SERVER_AWAIT_ACK:956945 rxrpc_call_completed(call);957946 break;
+2-2
net/rxrpc/recvmsg.c
···527527 msg->msg_namelen = len;528528 }529529530530- switch (call->state) {530530+ switch (READ_ONCE(call->state)) {531531 case RXRPC_CALL_SERVER_ACCEPTING:532532 ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);533533 break;···640640641641 mutex_lock(&call->user_mutex);642642643643- switch (call->state) {643643+ switch (READ_ONCE(call->state)) {644644 case RXRPC_CALL_CLIENT_RECV_REPLY:645645 case RXRPC_CALL_SERVER_RECV_REQUEST:646646 case RXRPC_CALL_SERVER_ACK_REQUEST:
+31-18
net/rxrpc/sendmsg.c
···488488int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)489489 __releases(&rx->sk.sk_lock.slock)490490{491491+ enum rxrpc_call_state state;491492 enum rxrpc_command cmd;492493 struct rxrpc_call *call;493494 unsigned long user_call_ID = 0;···527526 return PTR_ERR(call);528527 /* ... and we have the call lock. */529528 } else {530530- ret = -EBUSY;531531- if (call->state == RXRPC_CALL_UNINITIALISED ||532532- call->state == RXRPC_CALL_CLIENT_AWAIT_CONN ||533533- call->state == RXRPC_CALL_SERVER_PREALLOC ||534534- call->state == RXRPC_CALL_SERVER_SECURING ||535535- call->state == RXRPC_CALL_SERVER_ACCEPTING)529529+ switch (READ_ONCE(call->state)) {530530+ case RXRPC_CALL_UNINITIALISED:531531+ case RXRPC_CALL_CLIENT_AWAIT_CONN:532532+ case RXRPC_CALL_SERVER_PREALLOC:533533+ case RXRPC_CALL_SERVER_SECURING:534534+ case RXRPC_CALL_SERVER_ACCEPTING:535535+ ret = -EBUSY;536536 goto error_release_sock;537537+ default:538538+ break;539539+ }537540538541 ret = mutex_lock_interruptible(&call->user_mutex);539542 release_sock(&rx->sk);···547542 }548543 }549544545545+ state = READ_ONCE(call->state);550546 _debug("CALL %d USR %lx ST %d on CONN %p",551551- call->debug_id, call->user_call_ID, call->state, call->conn);547547+ call->debug_id, call->user_call_ID, state, call->conn);552548553553- if (call->state >= RXRPC_CALL_COMPLETE) {549549+ if (state >= RXRPC_CALL_COMPLETE) {554550 /* it's too late for this call */555551 ret = -ESHUTDOWN;556552 } else if (cmd == RXRPC_CMD_SEND_ABORT) {···561555 } else if (cmd != RXRPC_CMD_SEND_DATA) {562556 ret = -EINVAL;563557 } else if (rxrpc_is_client_call(call) &&564564- call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {558558+ state != RXRPC_CALL_CLIENT_SEND_REQUEST) {565559 /* request phase complete for this client call */566560 ret = -EPROTO;567561 } else if (rxrpc_is_service_call(call) &&568568- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&569569- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {562562+ state != RXRPC_CALL_SERVER_ACK_REQUEST &&563563+ state != RXRPC_CALL_SERVER_SEND_REPLY) {570564 /* Reply phase not begun or not complete for service call. */571565 ret = -EPROTO;572566 } else {···611605 _debug("CALL %d USR %lx ST %d on CONN %p",612606 call->debug_id, call->user_call_ID, call->state, call->conn);613607614614- if (call->state >= RXRPC_CALL_COMPLETE) {615615- ret = -ESHUTDOWN; /* it's too late for this call */616616- } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&617617- call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&618618- call->state != RXRPC_CALL_SERVER_SEND_REPLY) {619619- ret = -EPROTO; /* request phase complete for this client call */620620- } else {608608+ switch (READ_ONCE(call->state)) {609609+ case RXRPC_CALL_CLIENT_SEND_REQUEST:610610+ case RXRPC_CALL_SERVER_ACK_REQUEST:611611+ case RXRPC_CALL_SERVER_SEND_REPLY:621612 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);613613+ break;614614+ case RXRPC_CALL_COMPLETE:615615+ read_lock_bh(&call->state_lock);616616+ ret = -call->error;617617+ read_unlock_bh(&call->state_lock);618618+ break;619619+ default:620620+ /* Request phase complete for this client call */621621+ ret = -EPROTO;622622+ break;622623 }623624624625 mutex_unlock(&call->user_mutex);
+3
net/sched/act_connmark.c
···113113 if (ret < 0)114114 return ret;115115116116+ if (!tb[TCA_CONNMARK_PARMS])117117+ return -EINVAL;118118+116119 parm = nla_data(tb[TCA_CONNMARK_PARMS]);117120118121 if (!tcf_hash_check(tn, parm->index, a, bind)) {
···640640641641/* Create and initialize a new sk for the socket to be returned by accept(). */642642static struct sock *sctp_v6_create_accept_sk(struct sock *sk,643643- struct sctp_association *asoc)643643+ struct sctp_association *asoc,644644+ bool kern)644645{645646 struct sock *newsk;646647 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);647648 struct sctp6_sock *newsctp6sk;648649 struct ipv6_txoptions *opt;649650650650- newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);651651+ newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);651652 if (!newsk)652653 goto out;653654
+3-2
net/sctp/protocol.c
···575575576576/* Create and initialize a new sk for the socket returned by accept(). */577577static struct sock *sctp_v4_create_accept_sk(struct sock *sk,578578- struct sctp_association *asoc)578578+ struct sctp_association *asoc,579579+ bool kern)579580{580581 struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,581581- sk->sk_prot, 0);582582+ sk->sk_prot, kern);582583 struct inet_sock *newinet;583584584585 if (!newsk)
+2-2
net/sctp/socket.c
···41164116 * descriptor will be returned from accept() to represent the newly41174117 * formed association.41184118 */41194119-static struct sock *sctp_accept(struct sock *sk, int flags, int *err)41194119+static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)41204120{41214121 struct sctp_sock *sp;41224122 struct sctp_endpoint *ep;···41514151 */41524152 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);4153415341544154- newsk = sp->pf->create_accept_sk(sk, asoc);41544154+ newsk = sp->pf->create_accept_sk(sk, asoc, kern);41554155 if (!newsk) {41564156 error = -ENOMEM;41574157 goto out;
+1-1
net/smc/af_smc.c
···944944}945945946946static int smc_accept(struct socket *sock, struct socket *new_sock,947947- int flags)947947+ int flags, bool kern)948948{949949 struct sock *sk = sock->sk, *nsk;950950 DECLARE_WAITQUEUE(wait, current);
+3-2
net/socket.c
···15061506 if (err)15071507 goto out_fd;1508150815091509- err = sock->ops->accept(sock, newsock, sock->file->f_flags);15091509+ err = sock->ops->accept(sock, newsock, sock->file->f_flags, false);15101510 if (err < 0)15111511 goto out_fd;15121512···17311731 /* We assume all kernel code knows the size of sockaddr_storage */17321732 msg.msg_namelen = 0;17331733 msg.msg_iocb = NULL;17341734+ msg.msg_flags = 0;17341735 if (sock->file->f_flags & O_NONBLOCK)17351736 flags |= MSG_DONTWAIT;17361737 err = sock_recvmsg(sock, &msg, flags);···32393238 if (err < 0)32403239 goto done;3241324032423242- err = sock->ops->accept(sock, *newsock, flags);32413241+ err = sock->ops->accept(sock, *newsock, flags, true);32433242 if (err < 0) {32443243 sock_release(*newsock);32453244 *newsock = NULL;
+5-3
net/tipc/socket.c
···115115static void tipc_write_space(struct sock *sk);116116static void tipc_sock_destruct(struct sock *sk);117117static int tipc_release(struct socket *sock);118118-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);118118+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,119119+ bool kern);119120static void tipc_sk_timeout(unsigned long data);120121static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,121122 struct tipc_name_seq const *seq);···20302029 *20312030 * Returns 0 on success, errno otherwise20322031 */20332033-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)20322032+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,20332033+ bool kern)20342034{20352035 struct sock *new_sk, *sk = sock->sk;20362036 struct sk_buff *buf;···2053205120542052 buf = skb_peek(&sk->sk_receive_queue);2055205320562056- res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 0);20542054+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);20572055 if (res)20582056 goto exit;20592057 security_sk_clone(sock->sk, new_sock->sk);
+3-2
net/unix/af_unix.c
···636636static int unix_stream_connect(struct socket *, struct sockaddr *,637637 int addr_len, int flags);638638static int unix_socketpair(struct socket *, struct socket *);639639-static int unix_accept(struct socket *, struct socket *, int);639639+static int unix_accept(struct socket *, struct socket *, int, bool);640640static int unix_getname(struct socket *, struct sockaddr *, int *, int);641641static unsigned int unix_poll(struct file *, struct socket *, poll_table *);642642static unsigned int unix_dgram_poll(struct file *, struct socket *,···14021402 set_bit(SOCK_PASSSEC, &new->flags);14031403}1404140414051405-static int unix_accept(struct socket *sock, struct socket *newsock, int flags)14051405+static int unix_accept(struct socket *sock, struct socket *newsock, int flags,14061406+ bool kern)14061407{14071408 struct sock *sk = sock->sk;14081409 struct sock *tsk;
+2-1
net/vmw_vsock/af_vsock.c
···12501250 return err;12511251}1252125212531253-static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)12531253+static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,12541254+ bool kern)12541255{12551256 struct sock *listener;12561257 int err;
+2-1
net/x25/af_x25.c
···852852 return rc;853853}854854855855-static int x25_accept(struct socket *sock, struct socket *newsock, int flags)855855+static int x25_accept(struct socket *sock, struct socket *newsock, int flags,856856+ bool kern)856857{857858 struct sock *sk = sock->sk;858859 struct sock *newsk;
+9-10
net/xfrm/xfrm_policy.c
···12431243}1244124412451245static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,12461246- const struct flowi *fl)12461246+ const struct flowi *fl, u16 family)12471247{12481248 struct xfrm_policy *pol;12491249···12511251 again:12521252 pol = rcu_dereference(sk->sk_policy[dir]);12531253 if (pol != NULL) {12541254- bool match = xfrm_selector_match(&pol->selector, fl,12551255- sk->sk_family);12541254+ bool match = xfrm_selector_match(&pol->selector, fl, family);12561255 int err = 0;1257125612581257 if (match) {···22382239 sk = sk_const_to_full_sk(sk);22392240 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {22402241 num_pols = 1;22412241- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);22422242+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);22422243 err = xfrm_expand_policies(fl, family, pols,22432244 &num_pols, &num_xfrms);22442245 if (err < 0)···25172518 pol = NULL;25182519 sk = sk_to_full_sk(sk);25192520 if (sk && sk->sk_policy[dir]) {25202520- pol = xfrm_sk_policy_lookup(sk, dir, &fl);25212521+ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);25212522 if (IS_ERR(pol)) {25222523 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);25232524 return 0;···30683069{30693070 int rv;3070307130723072+ /* Initialize the per-net locks here */30733073+ spin_lock_init(&net->xfrm.xfrm_state_lock);30743074+ spin_lock_init(&net->xfrm.xfrm_policy_lock);30753075+ mutex_init(&net->xfrm.xfrm_cfg_mutex);30763076+30713077 rv = xfrm_statistics_init(net);30723078 if (rv < 0)30733079 goto out_statistics;···30883084 rv = flow_cache_init(net);30893085 if (rv < 0)30903086 goto out;30913091-30923092- /* Initialize the per-net locks here */30933093- spin_lock_init(&net->xfrm.xfrm_state_lock);30943094- spin_lock_init(&net->xfrm.xfrm_policy_lock);30953095- mutex_init(&net->xfrm.xfrm_cfg_mutex);3096308730973088 return 0;30983089
+18
tools/include/uapi/linux/bpf_perf_event.h
···11+/* Copyright (c) 2016 Facebook22+ *33+ * This program is free software; you can redistribute it and/or44+ * modify it under the terms of version 2 of the GNU General Public55+ * License as published by the Free Software Foundation.66+ */77+#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__88+#define _UAPI__LINUX_BPF_PERF_EVENT_H__99+1010+#include <linux/types.h>1111+#include <linux/ptrace.h>1212+1313+struct bpf_perf_event_data {1414+ struct pt_regs regs;1515+ __u64 sample_period;1616+};1717+1818+#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */