Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'platform-drivers-x86-v5.2-1' of git://git.infradead.org/linux-platform-drivers-x86

Pull x86 platform driver updates from Andy Shevchenko:
"Gathered pile of patches for Platform Drivers x86. No surprises and no
merge conflicts. Business as usual.

Summary:

- New driver of power button for Basin Cove PMIC.

- ASUS WMI driver has got a Fn lock mode switch support.

- Resolve a never end story with non working Wi-Fi on newer Lenovo
Ideapad computers. Now the black list is replaced with white list.

- New facility to debug S0ix failures on Intel Atom platforms. The
Intel PMC and accompanying drivers are cleaned up.

- Mellanox got a new TmFifo driver. Besides tachometer sensor and
watchdog are enabled on Mellanox platforms.

- The information of embedded controller is now recognized on new
Thinkpads. Bluetooth driver on Thinkpads is blacklisted for some
models.

- Touchscreen DMI driver extended to support 'jumper ezpad 6 pro b'
and Myria MY8307 2-in-1.

- Additionally few small fixes here and there for WMI and ACPI laptop
drivers.

- The following is an automated git shortlog grouped by driver:

- alienware-wmi:
- printing the wrong error code
- fix kfree on potentially uninitialized pointer

- asus-wmi:
- Add fn-lock mode switch support

- dell-laptop:
- fix rfkill functionality

- dell-rbtn:
- Add missing #include

- ideapad-laptop:
- Remove no_hw_rfkill_list

- intel_pmc_core:
- Allow to dump debug registers on S0ix failure
- Convert to a platform_driver
- Mark local function static

- intel_pmc_ipc:
- Don't map non-used optional resources
- Apply same width for offset definitions
- Use BIT() macro
- adding error handling

- intel_punit_ipc:
- Revert "Fix resource ioremap warning"

- mlx-platform:
- Add mlx-wdt platform driver activation
- Add support for tachometer speed register
- Add TmFifo driver for Mellanox BlueField Soc

- sony-laptop:
- Fix unintentional fall-through

- thinkpad_acpi:
- cleanup for Thinkpad ACPI led
- Mark expected switch fall-throughs
- fix spelling mistake "capabilites" -> "capabilities"
- Read EC information on newer models
- Disable Bluetooth for some machines

- touchscreen_dmi:
- Add info for 'jumper ezpad 6 pro b' touchscreen
- Add info for Myria MY8307 2-in-1"

* tag 'platform-drivers-x86-v5.2-1' of git://git.infradead.org/linux-platform-drivers-x86: (26 commits)
platform/x86: Add support for Basin Cove power button
platform/x86: asus-wmi: Add fn-lock mode switch support
platform/x86: ideapad-laptop: Remove no_hw_rfkill_list
platform/x86: touchscreen_dmi: Add info for 'jumper ezpad 6 pro b' touchscreen
platform/x86: thinkpad_acpi: cleanup for Thinkpad ACPI led
platform/x86: thinkpad_acpi: Mark expected switch fall-throughs
platform/x86: sony-laptop: Fix unintentional fall-through
platform/x86: alienware-wmi: printing the wrong error code
platform/x86: intel_pmc_core: Allow to dump debug registers on S0ix failure
platform/x86: intel_pmc_core: Convert to a platform_driver
platform/x86: mlx-platform: Add mlx-wdt platform driver activation
platform/x86: mlx-platform: Add support for tachometer speed register
platform/mellanox: Add TmFifo driver for Mellanox BlueField Soc
platform/x86: thinkpad_acpi: fix spelling mistake "capabilites" -> "capabilities"
platform/x86: intel_punit_ipc: Revert "Fix resource ioremap warning"
platform/x86: intel_pmc_ipc: Don't map non-used optional resources
platform/x86: intel_pmc_ipc: Apply same width for offset definitions
platform/x86: intel_pmc_ipc: Use BIT() macro
platform/x86: alienware-wmi: fix kfree on potentially uninitialized pointer
platform/x86: dell-laptop: fix rfkill functionality
...

+2149 -383
+11 -1
drivers/platform/mellanox/Kconfig
··· 5 5 6 6 menuconfig MELLANOX_PLATFORM 7 7 bool "Platform support for Mellanox hardware" 8 - depends on X86 || ARM || COMPILE_TEST 8 + depends on X86 || ARM || ARM64 || COMPILE_TEST 9 9 ---help--- 10 10 Say Y here to get to see options for platform support for 11 11 Mellanox systems. This option alone does not add any kernel code. ··· 33 33 are defined per system type bases and include the registers related 34 34 to system resets operation, system reset causes monitoring and some 35 35 kinds of mux selection. 36 + 37 + config MLXBF_TMFIFO 38 + tristate "Mellanox BlueField SoC TmFifo platform driver" 39 + depends on ARM64 40 + depends on ACPI 41 + depends on VIRTIO_CONSOLE && VIRTIO_NET 42 + help 43 + Say y here to enable TmFifo support. The TmFifo driver provides 44 + platform driver support for the TmFifo which supports console 45 + and networking based on the virtio framework. 36 46 37 47 endif # MELLANOX_PLATFORM
+1
drivers/platform/mellanox/Makefile
··· 3 3 # Makefile for linux/drivers/platform/mellanox 4 4 # Mellanox Platform-Specific Drivers 5 5 # 6 + obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o 6 7 obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o 7 8 obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
+63
drivers/platform/mellanox/mlxbf-tmfifo-regs.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2019, Mellanox Technologies. All rights reserved. 4 + */ 5 + 6 + #ifndef __MLXBF_TMFIFO_REGS_H__ 7 + #define __MLXBF_TMFIFO_REGS_H__ 8 + 9 + #include <linux/types.h> 10 + #include <linux/bits.h> 11 + 12 + #define MLXBF_TMFIFO_TX_DATA 0x00 13 + #define MLXBF_TMFIFO_TX_STS 0x08 14 + #define MLXBF_TMFIFO_TX_STS__LENGTH 0x0001 15 + #define MLXBF_TMFIFO_TX_STS__COUNT_SHIFT 0 16 + #define MLXBF_TMFIFO_TX_STS__COUNT_WIDTH 9 17 + #define MLXBF_TMFIFO_TX_STS__COUNT_RESET_VAL 0 18 + #define MLXBF_TMFIFO_TX_STS__COUNT_RMASK GENMASK_ULL(8, 0) 19 + #define MLXBF_TMFIFO_TX_STS__COUNT_MASK GENMASK_ULL(8, 0) 20 + #define MLXBF_TMFIFO_TX_CTL 0x10 21 + #define MLXBF_TMFIFO_TX_CTL__LENGTH 0x0001 22 + #define MLXBF_TMFIFO_TX_CTL__LWM_SHIFT 0 23 + #define MLXBF_TMFIFO_TX_CTL__LWM_WIDTH 8 24 + #define MLXBF_TMFIFO_TX_CTL__LWM_RESET_VAL 128 25 + #define MLXBF_TMFIFO_TX_CTL__LWM_RMASK GENMASK_ULL(7, 0) 26 + #define MLXBF_TMFIFO_TX_CTL__LWM_MASK GENMASK_ULL(7, 0) 27 + #define MLXBF_TMFIFO_TX_CTL__HWM_SHIFT 8 28 + #define MLXBF_TMFIFO_TX_CTL__HWM_WIDTH 8 29 + #define MLXBF_TMFIFO_TX_CTL__HWM_RESET_VAL 128 30 + #define MLXBF_TMFIFO_TX_CTL__HWM_RMASK GENMASK_ULL(7, 0) 31 + #define MLXBF_TMFIFO_TX_CTL__HWM_MASK GENMASK_ULL(15, 8) 32 + #define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_SHIFT 32 33 + #define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_WIDTH 9 34 + #define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RESET_VAL 256 35 + #define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0) 36 + #define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32) 37 + #define MLXBF_TMFIFO_RX_DATA 0x00 38 + #define MLXBF_TMFIFO_RX_STS 0x08 39 + #define MLXBF_TMFIFO_RX_STS__LENGTH 0x0001 40 + #define MLXBF_TMFIFO_RX_STS__COUNT_SHIFT 0 41 + #define MLXBF_TMFIFO_RX_STS__COUNT_WIDTH 9 42 + #define MLXBF_TMFIFO_RX_STS__COUNT_RESET_VAL 0 43 + #define MLXBF_TMFIFO_RX_STS__COUNT_RMASK GENMASK_ULL(8, 0) 44 + #define MLXBF_TMFIFO_RX_STS__COUNT_MASK GENMASK_ULL(8, 0) 45 + #define MLXBF_TMFIFO_RX_CTL 0x10 46 + #define MLXBF_TMFIFO_RX_CTL__LENGTH 0x0001 47 + #define MLXBF_TMFIFO_RX_CTL__LWM_SHIFT 0 48 + #define MLXBF_TMFIFO_RX_CTL__LWM_WIDTH 8 49 + #define MLXBF_TMFIFO_RX_CTL__LWM_RESET_VAL 128 50 + #define MLXBF_TMFIFO_RX_CTL__LWM_RMASK GENMASK_ULL(7, 0) 51 + #define MLXBF_TMFIFO_RX_CTL__LWM_MASK GENMASK_ULL(7, 0) 52 + #define MLXBF_TMFIFO_RX_CTL__HWM_SHIFT 8 53 + #define MLXBF_TMFIFO_RX_CTL__HWM_WIDTH 8 54 + #define MLXBF_TMFIFO_RX_CTL__HWM_RESET_VAL 128 55 + #define MLXBF_TMFIFO_RX_CTL__HWM_RMASK GENMASK_ULL(7, 0) 56 + #define MLXBF_TMFIFO_RX_CTL__HWM_MASK GENMASK_ULL(15, 8) 57 + #define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_SHIFT 32 58 + #define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_WIDTH 9 59 + #define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RESET_VAL 256 60 + #define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0) 61 + #define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32) 62 + 63 + #endif /* !defined(__MLXBF_TMFIFO_REGS_H__) */
+1281
drivers/platform/mellanox/mlxbf-tmfifo.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Mellanox BlueField SoC TmFifo driver 4 + * 5 + * Copyright (C) 2019 Mellanox Technologies 6 + */ 7 + 8 + #include <linux/acpi.h> 9 + #include <linux/bitfield.h> 10 + #include <linux/circ_buf.h> 11 + #include <linux/efi.h> 12 + #include <linux/irq.h> 13 + #include <linux/module.h> 14 + #include <linux/mutex.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/types.h> 17 + 18 + #include <linux/virtio_config.h> 19 + #include <linux/virtio_console.h> 20 + #include <linux/virtio_ids.h> 21 + #include <linux/virtio_net.h> 22 + #include <linux/virtio_ring.h> 23 + 24 + #include "mlxbf-tmfifo-regs.h" 25 + 26 + /* Vring size. */ 27 + #define MLXBF_TMFIFO_VRING_SIZE SZ_1K 28 + 29 + /* Console Tx buffer size. */ 30 + #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K 31 + 32 + /* Console Tx buffer reserved space. */ 33 + #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8 34 + 35 + /* House-keeping timer interval. */ 36 + #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10) 37 + 38 + /* Virtual devices sharing the TM FIFO. */ 39 + #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1) 40 + 41 + /* 42 + * Reserve 1/16 of TmFifo space, so console messages are not starved by 43 + * the networking traffic. 44 + */ 45 + #define MLXBF_TMFIFO_RESERVE_RATIO 16 46 + 47 + /* Message with data needs at least two words (for header & data). */ 48 + #define MLXBF_TMFIFO_DATA_MIN_WORDS 2 49 + 50 + struct mlxbf_tmfifo; 51 + 52 + /** 53 + * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring 54 + * @va: virtual address of the ring 55 + * @dma: dma address of the ring 56 + * @vq: pointer to the virtio virtqueue 57 + * @desc: current descriptor of the pending packet 58 + * @desc_head: head descriptor of the pending packet 59 + * @cur_len: processed length of the current descriptor 60 + * @rem_len: remaining length of the pending packet 61 + * @pkt_len: total length of the pending packet 62 + * @next_avail: next avail descriptor id 63 + * @num: vring size (number of descriptors) 64 + * @align: vring alignment size 65 + * @index: vring index 66 + * @vdev_id: vring virtio id (VIRTIO_ID_xxx) 67 + * @fifo: pointer to the tmfifo structure 68 + */ 69 + struct mlxbf_tmfifo_vring { 70 + void *va; 71 + dma_addr_t dma; 72 + struct virtqueue *vq; 73 + struct vring_desc *desc; 74 + struct vring_desc *desc_head; 75 + int cur_len; 76 + int rem_len; 77 + u32 pkt_len; 78 + u16 next_avail; 79 + int num; 80 + int align; 81 + int index; 82 + int vdev_id; 83 + struct mlxbf_tmfifo *fifo; 84 + }; 85 + 86 + /* Interrupt types. */ 87 + enum { 88 + MLXBF_TM_RX_LWM_IRQ, 89 + MLXBF_TM_RX_HWM_IRQ, 90 + MLXBF_TM_TX_LWM_IRQ, 91 + MLXBF_TM_TX_HWM_IRQ, 92 + MLXBF_TM_MAX_IRQ 93 + }; 94 + 95 + /* Ring types (Rx & Tx). */ 96 + enum { 97 + MLXBF_TMFIFO_VRING_RX, 98 + MLXBF_TMFIFO_VRING_TX, 99 + MLXBF_TMFIFO_VRING_MAX 100 + }; 101 + 102 + /** 103 + * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device 104 + * @vdev: virtio device, in which the vdev.id.device field has the 105 + * VIRTIO_ID_xxx id to distinguish the virtual device. 106 + * @status: status of the device 107 + * @features: supported features of the device 108 + * @vrings: array of tmfifo vrings of this device 109 + * @config.cons: virtual console config - 110 + * select if vdev.id.device is VIRTIO_ID_CONSOLE 111 + * @config.net: virtual network config - 112 + * select if vdev.id.device is VIRTIO_ID_NET 113 + * @tx_buf: tx buffer used to buffer data before writing into the FIFO 114 + */ 115 + struct mlxbf_tmfifo_vdev { 116 + struct virtio_device vdev; 117 + u8 status; 118 + u64 features; 119 + struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX]; 120 + union { 121 + struct virtio_console_config cons; 122 + struct virtio_net_config net; 123 + } config; 124 + struct circ_buf tx_buf; 125 + }; 126 + 127 + /** 128 + * mlxbf_tmfifo_irq_info - Structure of the interrupt information 129 + * @fifo: pointer to the tmfifo structure 130 + * @irq: interrupt number 131 + * @index: index into the interrupt array 132 + */ 133 + struct mlxbf_tmfifo_irq_info { 134 + struct mlxbf_tmfifo *fifo; 135 + int irq; 136 + int index; 137 + }; 138 + 139 + /** 140 + * mlxbf_tmfifo - Structure of the TmFifo 141 + * @vdev: array of the virtual devices running over the TmFifo 142 + * @lock: lock to protect the TmFifo access 143 + * @rx_base: mapped register base address for the Rx FIFO 144 + * @tx_base: mapped register base address for the Tx FIFO 145 + * @rx_fifo_size: number of entries of the Rx FIFO 146 + * @tx_fifo_size: number of entries of the Tx FIFO 147 + * @pend_events: pending bits for deferred events 148 + * @irq_info: interrupt information 149 + * @work: work struct for deferred process 150 + * @timer: background timer 151 + * @vring: Tx/Rx ring 152 + * @spin_lock: spin lock 153 + * @is_ready: ready flag 154 + */ 155 + struct mlxbf_tmfifo { 156 + struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX]; 157 + struct mutex lock; /* TmFifo lock */ 158 + void __iomem *rx_base; 159 + void __iomem *tx_base; 160 + int rx_fifo_size; 161 + int tx_fifo_size; 162 + unsigned long pend_events; 163 + struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ]; 164 + struct work_struct work; 165 + struct timer_list timer; 166 + struct mlxbf_tmfifo_vring *vring[2]; 167 + spinlock_t spin_lock; /* spin lock */ 168 + bool is_ready; 169 + }; 170 + 171 + /** 172 + * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header 173 + * @type: message type 174 + * @len: payload length in network byte order. Messages sent into the FIFO 175 + * will be read by the other side as data stream in the same byte order. 176 + * The length needs to be encoded into network order so both sides 177 + * could understand it. 178 + */ 179 + struct mlxbf_tmfifo_msg_hdr { 180 + u8 type; 181 + __be16 len; 182 + u8 unused[5]; 183 + } __packed __aligned(sizeof(u64)); 184 + 185 + /* 186 + * Default MAC. 187 + * This MAC address will be read from EFI persistent variable if configured. 188 + * It can also be reconfigured with standard Linux tools. 189 + */ 190 + static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { 191 + 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01 192 + }; 193 + 194 + /* EFI variable name of the MAC address. */ 195 + static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; 196 + 197 + /* Maximum L2 header length. */ 198 + #define MLXBF_TMFIFO_NET_L2_OVERHEAD 36 199 + 200 + /* Supported virtio-net features. */ 201 + #define MLXBF_TMFIFO_NET_FEATURES \ 202 + (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \ 203 + BIT_ULL(VIRTIO_NET_F_MAC)) 204 + 205 + #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev) 206 + 207 + /* Free vrings of the FIFO device. */ 208 + static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo, 209 + struct mlxbf_tmfifo_vdev *tm_vdev) 210 + { 211 + struct mlxbf_tmfifo_vring *vring; 212 + int i, size; 213 + 214 + for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 215 + vring = &tm_vdev->vrings[i]; 216 + if (vring->va) { 217 + size = vring_size(vring->num, vring->align); 218 + dma_free_coherent(tm_vdev->vdev.dev.parent, size, 219 + vring->va, vring->dma); 220 + vring->va = NULL; 221 + if (vring->vq) { 222 + vring_del_virtqueue(vring->vq); 223 + vring->vq = NULL; 224 + } 225 + } 226 + } 227 + } 228 + 229 + /* Allocate vrings for the FIFO. */ 230 + static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, 231 + struct mlxbf_tmfifo_vdev *tm_vdev) 232 + { 233 + struct mlxbf_tmfifo_vring *vring; 234 + struct device *dev; 235 + dma_addr_t dma; 236 + int i, size; 237 + void *va; 238 + 239 + for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 240 + vring = &tm_vdev->vrings[i]; 241 + vring->fifo = fifo; 242 + vring->num = MLXBF_TMFIFO_VRING_SIZE; 243 + vring->align = SMP_CACHE_BYTES; 244 + vring->index = i; 245 + vring->vdev_id = tm_vdev->vdev.id.device; 246 + dev = &tm_vdev->vdev.dev; 247 + 248 + size = vring_size(vring->num, vring->align); 249 + va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); 250 + if (!va) { 251 + mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 252 + dev_err(dev->parent, "dma_alloc_coherent failed\n"); 253 + return -ENOMEM; 254 + } 255 + 256 + vring->va = va; 257 + vring->dma = dma; 258 + } 259 + 260 + return 0; 261 + } 262 + 263 + /* Disable interrupts of the FIFO device. */ 264 + static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo) 265 + { 266 + int i, irq; 267 + 268 + for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 269 + irq = fifo->irq_info[i].irq; 270 + fifo->irq_info[i].irq = 0; 271 + disable_irq(irq); 272 + } 273 + } 274 + 275 + /* Interrupt handler. */ 276 + static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg) 277 + { 278 + struct mlxbf_tmfifo_irq_info *irq_info = arg; 279 + 280 + if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events)) 281 + schedule_work(&irq_info->fifo->work); 282 + 283 + return IRQ_HANDLED; 284 + } 285 + 286 + /* Get the next packet descriptor from the vring. */ 287 + static struct vring_desc * 288 + mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring) 289 + { 290 + const struct vring *vr = virtqueue_get_vring(vring->vq); 291 + struct virtio_device *vdev = vring->vq->vdev; 292 + unsigned int idx, head; 293 + 294 + if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx)) 295 + return NULL; 296 + 297 + idx = vring->next_avail % vr->num; 298 + head = virtio16_to_cpu(vdev, vr->avail->ring[idx]); 299 + if (WARN_ON(head >= vr->num)) 300 + return NULL; 301 + 302 + vring->next_avail++; 303 + 304 + return &vr->desc[head]; 305 + } 306 + 307 + /* Release virtio descriptor. */ 308 + static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring, 309 + struct vring_desc *desc, u32 len) 310 + { 311 + const struct vring *vr = virtqueue_get_vring(vring->vq); 312 + struct virtio_device *vdev = vring->vq->vdev; 313 + u16 idx, vr_idx; 314 + 315 + vr_idx = virtio16_to_cpu(vdev, vr->used->idx); 316 + idx = vr_idx % vr->num; 317 + vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc); 318 + vr->used->ring[idx].len = cpu_to_virtio32(vdev, len); 319 + 320 + /* 321 + * Virtio could poll and check the 'idx' to decide whether the desc is 322 + * done or not. Add a memory barrier here to make sure the update above 323 + * completes before updating the idx. 324 + */ 325 + mb(); 326 + vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1); 327 + } 328 + 329 + /* Get the total length of the descriptor chain. */ 330 + static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, 331 + struct vring_desc *desc) 332 + { 333 + const struct vring *vr = virtqueue_get_vring(vring->vq); 334 + struct virtio_device *vdev = vring->vq->vdev; 335 + u32 len = 0, idx; 336 + 337 + while (desc) { 338 + len += virtio32_to_cpu(vdev, desc->len); 339 + if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 340 + break; 341 + idx = virtio16_to_cpu(vdev, desc->next); 342 + desc = &vr->desc[idx]; 343 + } 344 + 345 + return len; 346 + } 347 + 348 + static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring) 349 + { 350 + struct vring_desc *desc_head; 351 + u32 len = 0; 352 + 353 + if (vring->desc_head) { 354 + desc_head = vring->desc_head; 355 + len = vring->pkt_len; 356 + } else { 357 + desc_head = mlxbf_tmfifo_get_next_desc(vring); 358 + len = mlxbf_tmfifo_get_pkt_len(vring, desc_head); 359 + } 360 + 361 + if (desc_head) 362 + mlxbf_tmfifo_release_desc(vring, desc_head, len); 363 + 364 + vring->pkt_len = 0; 365 + vring->desc = NULL; 366 + vring->desc_head = NULL; 367 + } 368 + 369 + static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring, 370 + struct vring_desc *desc, bool is_rx) 371 + { 372 + struct virtio_device *vdev = vring->vq->vdev; 373 + struct virtio_net_hdr *net_hdr; 374 + 375 + net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 376 + memset(net_hdr, 0, sizeof(*net_hdr)); 377 + } 378 + 379 + /* Get and initialize the next packet. */ 380 + static struct vring_desc * 381 + mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx) 382 + { 383 + struct vring_desc *desc; 384 + 385 + desc = mlxbf_tmfifo_get_next_desc(vring); 386 + if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET) 387 + mlxbf_tmfifo_init_net_desc(vring, desc, is_rx); 388 + 389 + vring->desc_head = desc; 390 + vring->desc = desc; 391 + 392 + return desc; 393 + } 394 + 395 + /* House-keeping timer. */ 396 + static void mlxbf_tmfifo_timer(struct timer_list *t) 397 + { 398 + struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer); 399 + int rx, tx; 400 + 401 + rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events); 402 + tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 403 + 404 + if (rx || tx) 405 + schedule_work(&fifo->work); 406 + 407 + mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 408 + } 409 + 410 + /* Copy one console packet into the output buffer. */ 411 + static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons, 412 + struct mlxbf_tmfifo_vring *vring, 413 + struct vring_desc *desc) 414 + { 415 + const struct vring *vr = virtqueue_get_vring(vring->vq); 416 + struct virtio_device *vdev = &cons->vdev; 417 + u32 len, idx, seg; 418 + void *addr; 419 + 420 + while (desc) { 421 + addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 422 + len = virtio32_to_cpu(vdev, desc->len); 423 + 424 + seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 425 + MLXBF_TMFIFO_CON_TX_BUF_SIZE); 426 + if (len <= seg) { 427 + memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len); 428 + } else { 429 + memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); 430 + addr += seg; 431 + memcpy(cons->tx_buf.buf, addr, len - seg); 432 + } 433 + cons->tx_buf.head = (cons->tx_buf.head + len) % 434 + MLXBF_TMFIFO_CON_TX_BUF_SIZE; 435 + 436 + if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 437 + break; 438 + idx = virtio16_to_cpu(vdev, desc->next); 439 + desc = &vr->desc[idx]; 440 + } 441 + } 442 + 443 + /* Copy console data into the output buffer. */ 444 + static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons, 445 + struct mlxbf_tmfifo_vring *vring) 446 + { 447 + struct vring_desc *desc; 448 + u32 len, avail; 449 + 450 + desc = mlxbf_tmfifo_get_next_desc(vring); 451 + while (desc) { 452 + /* Release the packet if not enough space. */ 453 + len = mlxbf_tmfifo_get_pkt_len(vring, desc); 454 + avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail, 455 + MLXBF_TMFIFO_CON_TX_BUF_SIZE); 456 + if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) { 457 + mlxbf_tmfifo_release_desc(vring, desc, len); 458 + break; 459 + } 460 + 461 + mlxbf_tmfifo_console_output_one(cons, vring, desc); 462 + mlxbf_tmfifo_release_desc(vring, desc, len); 463 + desc = mlxbf_tmfifo_get_next_desc(vring); 464 + } 465 + } 466 + 467 + /* Get the number of available words in Rx FIFO for receiving. */ 468 + static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo) 469 + { 470 + u64 sts; 471 + 472 + sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS); 473 + return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts); 474 + } 475 + 476 + /* Get the number of available words in the TmFifo for sending. */ 477 + static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id) 478 + { 479 + int tx_reserve; 480 + u32 count; 481 + u64 sts; 482 + 483 + /* Reserve some room in FIFO for console messages. */ 484 + if (vdev_id == VIRTIO_ID_NET) 485 + tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO; 486 + else 487 + tx_reserve = 1; 488 + 489 + sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS); 490 + count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts); 491 + return fifo->tx_fifo_size - tx_reserve - count; 492 + } 493 + 494 + /* Console Tx (move data from the output buffer into the TmFifo). */ 495 + static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) 496 + { 497 + struct mlxbf_tmfifo_msg_hdr hdr; 498 + struct mlxbf_tmfifo_vdev *cons; 499 + unsigned long flags; 500 + int size, seg; 501 + void *addr; 502 + u64 data; 503 + 504 + /* Return if not enough space available. */ 505 + if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS) 506 + return; 507 + 508 + cons = fifo->vdev[VIRTIO_ID_CONSOLE]; 509 + if (!cons || !cons->tx_buf.buf) 510 + return; 511 + 512 + /* Return if no data to send. */ 513 + size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail, 514 + MLXBF_TMFIFO_CON_TX_BUF_SIZE); 515 + if (size == 0) 516 + return; 517 + 518 + /* Adjust the size to available space. */ 519 + if (size + sizeof(hdr) > avail * sizeof(u64)) 520 + size = avail * sizeof(u64) - sizeof(hdr); 521 + 522 + /* Write header. */ 523 + hdr.type = VIRTIO_ID_CONSOLE; 524 + hdr.len = htons(size); 525 + writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 526 + 527 + /* Use spin-lock to protect the 'cons->tx_buf'. */ 528 + spin_lock_irqsave(&fifo->spin_lock, flags); 529 + 530 + while (size > 0) { 531 + addr = cons->tx_buf.buf + cons->tx_buf.tail; 532 + 533 + seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 534 + MLXBF_TMFIFO_CON_TX_BUF_SIZE); 535 + if (seg >= sizeof(u64)) { 536 + memcpy(&data, addr, sizeof(u64)); 537 + } else { 538 + memcpy(&data, addr, seg); 539 + memcpy((u8 *)&data + seg, cons->tx_buf.buf, 540 + sizeof(u64) - seg); 541 + } 542 + writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 543 + 544 + if (size >= sizeof(u64)) { 545 + cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) % 546 + MLXBF_TMFIFO_CON_TX_BUF_SIZE; 547 + size -= sizeof(u64); 548 + } else { 549 + cons->tx_buf.tail = (cons->tx_buf.tail + size) % 550 + MLXBF_TMFIFO_CON_TX_BUF_SIZE; 551 + size = 0; 552 + } 553 + } 554 + 555 + spin_unlock_irqrestore(&fifo->spin_lock, flags); 556 + } 557 + 558 + /* Rx/Tx one word in the descriptor buffer. */ 559 + static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, 560 + struct vring_desc *desc, 561 + bool is_rx, int len) 562 + { 563 + struct virtio_device *vdev = vring->vq->vdev; 564 + struct mlxbf_tmfifo *fifo = vring->fifo; 565 + void *addr; 566 + u64 data; 567 + 568 + /* Get the buffer address of this desc. */ 569 + addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 570 + 571 + /* Read a word from FIFO for Rx. */ 572 + if (is_rx) 573 + data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA); 574 + 575 + if (vring->cur_len + sizeof(u64) <= len) { 576 + /* The whole word. */ 577 + if (is_rx) 578 + memcpy(addr + vring->cur_len, &data, sizeof(u64)); 579 + else 580 + memcpy(&data, addr + vring->cur_len, sizeof(u64)); 581 + vring->cur_len += sizeof(u64); 582 + } else { 583 + /* Leftover bytes. */ 584 + if (is_rx) 585 + memcpy(addr + vring->cur_len, &data, 586 + len - vring->cur_len); 587 + else 588 + memcpy(&data, addr + vring->cur_len, 589 + len - vring->cur_len); 590 + vring->cur_len = len; 591 + } 592 + 593 + /* Write the word into FIFO for Tx. */ 594 + if (!is_rx) 595 + writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 596 + } 597 + 598 + /* 599 + * Rx/Tx packet header. 600 + * 601 + * In Rx case, the packet might be found to belong to a different vring since 602 + * the TmFifo is shared by different services. In such case, the 'vring_change' 603 + * flag is set. 604 + */ 605 + static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, 606 + struct vring_desc *desc, 607 + bool is_rx, bool *vring_change) 608 + { 609 + struct mlxbf_tmfifo *fifo = vring->fifo; 610 + struct virtio_net_config *config; 611 + struct mlxbf_tmfifo_msg_hdr hdr; 612 + int vdev_id, hdr_len; 613 + 614 + /* Read/Write packet header. */ 615 + if (is_rx) { 616 + /* Drain one word from the FIFO. */ 617 + *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA); 618 + 619 + /* Skip the length 0 packets (keepalive). */ 620 + if (hdr.len == 0) 621 + return; 622 + 623 + /* Check packet type. */ 624 + if (hdr.type == VIRTIO_ID_NET) { 625 + vdev_id = VIRTIO_ID_NET; 626 + hdr_len = sizeof(struct virtio_net_hdr); 627 + config = &fifo->vdev[vdev_id]->config.net; 628 + if (ntohs(hdr.len) > config->mtu + 629 + MLXBF_TMFIFO_NET_L2_OVERHEAD) 630 + return; 631 + } else { 632 + vdev_id = VIRTIO_ID_CONSOLE; 633 + hdr_len = 0; 634 + } 635 + 636 + /* 637 + * Check whether the new packet still belongs to this vring. 638 + * If not, update the pkt_len of the new vring. 639 + */ 640 + if (vdev_id != vring->vdev_id) { 641 + struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id]; 642 + 643 + if (!tm_dev2) 644 + return; 645 + vring->desc = desc; 646 + vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; 647 + *vring_change = true; 648 + } 649 + vring->pkt_len = ntohs(hdr.len) + hdr_len; 650 + } else { 651 + /* Network virtio has an extra header. */ 652 + hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? 653 + sizeof(struct virtio_net_hdr) : 0; 654 + vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc); 655 + hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? 656 + VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; 657 + hdr.len = htons(vring->pkt_len - hdr_len); 658 + writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 659 + } 660 + 661 + vring->cur_len = hdr_len; 662 + vring->rem_len = vring->pkt_len; 663 + fifo->vring[is_rx] = vring; 664 + } 665 + 666 + /* 667 + * Rx/Tx one descriptor. 668 + * 669 + * Return true to indicate more data available. 670 + */ 671 + static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, 672 + bool is_rx, int *avail) 673 + { 674 + const struct vring *vr = virtqueue_get_vring(vring->vq); 675 + struct mlxbf_tmfifo *fifo = vring->fifo; 676 + struct virtio_device *vdev; 677 + bool vring_change = false; 678 + struct vring_desc *desc; 679 + unsigned long flags; 680 + u32 len, idx; 681 + 682 + vdev = &fifo->vdev[vring->vdev_id]->vdev; 683 + 684 + /* Get the descriptor of the next packet. */ 685 + if (!vring->desc) { 686 + desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); 687 + if (!desc) 688 + return false; 689 + } else { 690 + desc = vring->desc; 691 + } 692 + 693 + /* Beginning of a packet. Start to Rx/Tx packet header. */ 694 + if (vring->pkt_len == 0) { 695 + mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change); 696 + (*avail)--; 697 + 698 + /* Return if new packet is for another ring. */ 699 + if (vring_change) 700 + return false; 701 + goto mlxbf_tmfifo_desc_done; 702 + } 703 + 704 + /* Get the length of this desc. */ 705 + len = virtio32_to_cpu(vdev, desc->len); 706 + if (len > vring->rem_len) 707 + len = vring->rem_len; 708 + 709 + /* Rx/Tx one word (8 bytes) if not done. */ 710 + if (vring->cur_len < len) { 711 + mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len); 712 + (*avail)--; 713 + } 714 + 715 + /* Check again whether it's done. */ 716 + if (vring->cur_len == len) { 717 + vring->cur_len = 0; 718 + vring->rem_len -= len; 719 + 720 + /* Get the next desc on the chain. */ 721 + if (vring->rem_len > 0 && 722 + (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { 723 + idx = virtio16_to_cpu(vdev, desc->next); 724 + desc = &vr->desc[idx]; 725 + goto mlxbf_tmfifo_desc_done; 726 + } 727 + 728 + /* Done and release the pending packet. */ 729 + mlxbf_tmfifo_release_pending_pkt(vring); 730 + desc = NULL; 731 + fifo->vring[is_rx] = NULL; 732 + 733 + /* Notify upper layer that packet is done. */ 734 + spin_lock_irqsave(&fifo->spin_lock, flags); 735 + vring_interrupt(0, vring->vq); 736 + spin_unlock_irqrestore(&fifo->spin_lock, flags); 737 + } 738 + 739 + mlxbf_tmfifo_desc_done: 740 + /* Save the current desc. */ 741 + vring->desc = desc; 742 + 743 + return true; 744 + } 745 + 746 + /* Rx & Tx processing of a queue. */ 747 + static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx) 748 + { 749 + int avail = 0, devid = vring->vdev_id; 750 + struct mlxbf_tmfifo *fifo; 751 + bool more; 752 + 753 + fifo = vring->fifo; 754 + 755 + /* Return if vdev is not ready. */ 756 + if (!fifo->vdev[devid]) 757 + return; 758 + 759 + /* Return if another vring is running. */ 760 + if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring) 761 + return; 762 + 763 + /* Only handle console and network for now. */ 764 + if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE)) 765 + return; 766 + 767 + do { 768 + /* Get available FIFO space. */ 769 + if (avail == 0) { 770 + if (is_rx) 771 + avail = mlxbf_tmfifo_get_rx_avail(fifo); 772 + else 773 + avail = mlxbf_tmfifo_get_tx_avail(fifo, devid); 774 + if (avail <= 0) 775 + break; 776 + } 777 + 778 + /* Console output always comes from the Tx buffer. */ 779 + if (!is_rx && devid == VIRTIO_ID_CONSOLE) { 780 + mlxbf_tmfifo_console_tx(fifo, avail); 781 + break; 782 + } 783 + 784 + /* Handle one descriptor. */ 785 + more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail); 786 + } while (more); 787 + } 788 + 789 + /* Handle Rx or Tx queues. */ 790 + static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id, 791 + int irq_id, bool is_rx) 792 + { 793 + struct mlxbf_tmfifo_vdev *tm_vdev; 794 + struct mlxbf_tmfifo_vring *vring; 795 + int i; 796 + 797 + if (!test_and_clear_bit(irq_id, &fifo->pend_events) || 798 + !fifo->irq_info[irq_id].irq) 799 + return; 800 + 801 + for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) { 802 + tm_vdev = fifo->vdev[i]; 803 + if (tm_vdev) { 804 + vring = &tm_vdev->vrings[queue_id]; 805 + if (vring->vq) 806 + mlxbf_tmfifo_rxtx(vring, is_rx); 807 + } 808 + } 809 + } 810 + 811 + /* Work handler for Rx and Tx case. */ 812 + static void mlxbf_tmfifo_work_handler(struct work_struct *work) 813 + { 814 + struct mlxbf_tmfifo *fifo; 815 + 816 + fifo = container_of(work, struct mlxbf_tmfifo, work); 817 + if (!fifo->is_ready) 818 + return; 819 + 820 + mutex_lock(&fifo->lock); 821 + 822 + /* Tx (Send data to the TmFifo). */ 823 + mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX, 824 + MLXBF_TM_TX_LWM_IRQ, false); 825 + 826 + /* Rx (Receive data from the TmFifo). */ 827 + mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX, 828 + MLXBF_TM_RX_HWM_IRQ, true); 829 + 830 + mutex_unlock(&fifo->lock); 831 + } 832 + 833 + /* The notify function is called when new buffers are posted. */ 834 + static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) 835 + { 836 + struct mlxbf_tmfifo_vring *vring = vq->priv; 837 + struct mlxbf_tmfifo_vdev *tm_vdev; 838 + struct mlxbf_tmfifo *fifo; 839 + unsigned long flags; 840 + 841 + fifo = vring->fifo; 842 + 843 + /* 844 + * Virtio maintains vrings in pairs, even number ring for Rx 845 + * and odd number ring for Tx. 846 + */ 847 + if (vring->index & BIT(0)) { 848 + /* 849 + * Console could make blocking call with interrupts disabled. 850 + * In such case, the vring needs to be served right away. For 851 + * other cases, just set the TX LWM bit to start Tx in the 852 + * worker handler. 853 + */ 854 + if (vring->vdev_id == VIRTIO_ID_CONSOLE) { 855 + spin_lock_irqsave(&fifo->spin_lock, flags); 856 + tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; 857 + mlxbf_tmfifo_console_output(tm_vdev, vring); 858 + spin_unlock_irqrestore(&fifo->spin_lock, flags); 859 + } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, 860 + &fifo->pend_events)) { 861 + return true; 862 + } 863 + } else { 864 + if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events)) 865 + return true; 866 + } 867 + 868 + schedule_work(&fifo->work); 869 + 870 + return true; 871 + } 872 + 873 + /* Get the array of feature bits for this device. */ 874 + static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev) 875 + { 876 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 877 + 878 + return tm_vdev->features; 879 + } 880 + 881 + /* Confirm device features to use. */ 882 + static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev) 883 + { 884 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 885 + 886 + tm_vdev->features = vdev->features; 887 + 888 + return 0; 889 + } 890 + 891 + /* Free virtqueues found by find_vqs(). */ 892 + static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) 893 + { 894 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 895 + struct mlxbf_tmfifo_vring *vring; 896 + struct virtqueue *vq; 897 + int i; 898 + 899 + for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 900 + vring = &tm_vdev->vrings[i]; 901 + 902 + /* Release the pending packet. */ 903 + if (vring->desc) 904 + mlxbf_tmfifo_release_pending_pkt(vring); 905 + vq = vring->vq; 906 + if (vq) { 907 + vring->vq = NULL; 908 + vring_del_virtqueue(vq); 909 + } 910 + } 911 + } 912 + 913 + /* Create and initialize the virtual queues. */ 914 + static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev, 915 + unsigned int nvqs, 916 + struct virtqueue *vqs[], 917 + vq_callback_t *callbacks[], 918 + const char * const names[], 919 + const bool *ctx, 920 + struct irq_affinity *desc) 921 + { 922 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 923 + struct mlxbf_tmfifo_vring *vring; 924 + struct virtqueue *vq; 925 + int i, ret, size; 926 + 927 + if (nvqs > ARRAY_SIZE(tm_vdev->vrings)) 928 + return -EINVAL; 929 + 930 + for (i = 0; i < nvqs; ++i) { 931 + if (!names[i]) { 932 + ret = -EINVAL; 933 + goto error; 934 + } 935 + vring = &tm_vdev->vrings[i]; 936 + 937 + /* zero vring */ 938 + size = vring_size(vring->num, vring->align); 939 + memset(vring->va, 0, size); 940 + vq = vring_new_virtqueue(i, vring->num, vring->align, vdev, 941 + false, false, vring->va, 942 + mlxbf_tmfifo_virtio_notify, 943 + callbacks[i], names[i]); 944 + if (!vq) { 945 + dev_err(&vdev->dev, "vring_new_virtqueue failed\n"); 946 + ret = -ENOMEM; 947 + goto error; 948 + } 949 + 950 + vqs[i] = vq; 951 + vring->vq = vq; 952 + vq->priv = vring; 953 + } 954 + 955 + return 0; 956 + 957 + error: 958 + mlxbf_tmfifo_virtio_del_vqs(vdev); 959 + return ret; 960 + } 961 + 962 + /* Read the status byte. */ 963 + static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev) 964 + { 965 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 966 + 967 + return tm_vdev->status; 968 + } 969 + 970 + /* Write the status byte. */ 971 + static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev, 972 + u8 status) 973 + { 974 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 975 + 976 + tm_vdev->status = status; 977 + } 978 + 979 + /* Reset the device. Not much here for now. */ 980 + static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev) 981 + { 982 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 983 + 984 + tm_vdev->status = 0; 985 + } 986 + 987 + /* Read the value of a configuration field. */ 988 + static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev, 989 + unsigned int offset, 990 + void *buf, 991 + unsigned int len) 992 + { 993 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 994 + 995 + if ((u64)offset + len > sizeof(tm_vdev->config)) 996 + return; 997 + 998 + memcpy(buf, (u8 *)&tm_vdev->config + offset, len); 999 + } 1000 + 1001 + /* Write the value of a configuration field. */ 1002 + static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev, 1003 + unsigned int offset, 1004 + const void *buf, 1005 + unsigned int len) 1006 + { 1007 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1008 + 1009 + if ((u64)offset + len > sizeof(tm_vdev->config)) 1010 + return; 1011 + 1012 + memcpy((u8 *)&tm_vdev->config + offset, buf, len); 1013 + } 1014 + 1015 + static void tmfifo_virtio_dev_release(struct device *device) 1016 + { 1017 + struct virtio_device *vdev = 1018 + container_of(device, struct virtio_device, dev); 1019 + struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1020 + 1021 + kfree(tm_vdev); 1022 + } 1023 + 1024 + /* Virtio config operations. */ 1025 + static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = { 1026 + .get_features = mlxbf_tmfifo_virtio_get_features, 1027 + .finalize_features = mlxbf_tmfifo_virtio_finalize_features, 1028 + .find_vqs = mlxbf_tmfifo_virtio_find_vqs, 1029 + .del_vqs = mlxbf_tmfifo_virtio_del_vqs, 1030 + .reset = mlxbf_tmfifo_virtio_reset, 1031 + .set_status = mlxbf_tmfifo_virtio_set_status, 1032 + .get_status = mlxbf_tmfifo_virtio_get_status, 1033 + .get = mlxbf_tmfifo_virtio_get, 1034 + .set = mlxbf_tmfifo_virtio_set, 1035 + }; 1036 + 1037 + /* Create vdev for the FIFO. */ 1038 + static int mlxbf_tmfifo_create_vdev(struct device *dev, 1039 + struct mlxbf_tmfifo *fifo, 1040 + int vdev_id, u64 features, 1041 + void *config, u32 size) 1042 + { 1043 + struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL; 1044 + int ret; 1045 + 1046 + mutex_lock(&fifo->lock); 1047 + 1048 + tm_vdev = fifo->vdev[vdev_id]; 1049 + if (tm_vdev) { 1050 + dev_err(dev, "vdev %d already exists\n", vdev_id); 1051 + ret = -EEXIST; 1052 + goto fail; 1053 + } 1054 + 1055 + tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL); 1056 + if (!tm_vdev) { 1057 + ret = -ENOMEM; 1058 + goto fail; 1059 + } 1060 + 1061 + tm_vdev->vdev.id.device = vdev_id; 1062 + tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops; 1063 + tm_vdev->vdev.dev.parent = dev; 1064 + tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release; 1065 + tm_vdev->features = features; 1066 + if (config) 1067 + memcpy(&tm_vdev->config, config, size); 1068 + 1069 + if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) { 1070 + dev_err(dev, "unable to allocate vring\n"); 1071 + ret = -ENOMEM; 1072 + goto vdev_fail; 1073 + } 1074 + 1075 + /* Allocate an output buffer for the console device. */ 1076 + if (vdev_id == VIRTIO_ID_CONSOLE) 1077 + tm_vdev->tx_buf.buf = devm_kmalloc(dev, 1078 + MLXBF_TMFIFO_CON_TX_BUF_SIZE, 1079 + GFP_KERNEL); 1080 + fifo->vdev[vdev_id] = tm_vdev; 1081 + 1082 + /* Register the virtio device. */ 1083 + ret = register_virtio_device(&tm_vdev->vdev); 1084 + reg_dev = tm_vdev; 1085 + if (ret) { 1086 + dev_err(dev, "register_virtio_device failed\n"); 1087 + goto vdev_fail; 1088 + } 1089 + 1090 + mutex_unlock(&fifo->lock); 1091 + return 0; 1092 + 1093 + vdev_fail: 1094 + mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1095 + fifo->vdev[vdev_id] = NULL; 1096 + if (reg_dev) 1097 + put_device(&tm_vdev->vdev.dev); 1098 + else 1099 + kfree(tm_vdev); 1100 + fail: 1101 + mutex_unlock(&fifo->lock); 1102 + return ret; 1103 + } 1104 + 1105 + /* Delete vdev for the FIFO. */ 1106 + static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id) 1107 + { 1108 + struct mlxbf_tmfifo_vdev *tm_vdev; 1109 + 1110 + mutex_lock(&fifo->lock); 1111 + 1112 + /* Unregister vdev. */ 1113 + tm_vdev = fifo->vdev[vdev_id]; 1114 + if (tm_vdev) { 1115 + unregister_virtio_device(&tm_vdev->vdev); 1116 + mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1117 + fifo->vdev[vdev_id] = NULL; 1118 + } 1119 + 1120 + mutex_unlock(&fifo->lock); 1121 + 1122 + return 0; 1123 + } 1124 + 1125 + /* Read the configured network MAC address from efi variable. */ 1126 + static void mlxbf_tmfifo_get_cfg_mac(u8 *mac) 1127 + { 1128 + efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 1129 + unsigned long size = ETH_ALEN; 1130 + u8 buf[ETH_ALEN]; 1131 + efi_status_t rc; 1132 + 1133 + rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf); 1134 + if (rc == EFI_SUCCESS && size == ETH_ALEN) 1135 + ether_addr_copy(mac, buf); 1136 + else 1137 + ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac); 1138 + } 1139 + 1140 + /* Set TmFifo thresolds which is used to trigger interrupts. */ 1141 + static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo) 1142 + { 1143 + u64 ctl; 1144 + 1145 + /* Get Tx FIFO size and set the low/high watermark. */ 1146 + ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL); 1147 + fifo->tx_fifo_size = 1148 + FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl); 1149 + ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) | 1150 + FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK, 1151 + fifo->tx_fifo_size / 2); 1152 + ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) | 1153 + FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK, 1154 + fifo->tx_fifo_size - 1); 1155 + writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL); 1156 + 1157 + /* Get Rx FIFO size and set the low/high watermark. */ 1158 + ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL); 1159 + fifo->rx_fifo_size = 1160 + FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl); 1161 + ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) | 1162 + FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0); 1163 + ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) | 1164 + FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1); 1165 + writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL); 1166 + } 1167 + 1168 + static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo) 1169 + { 1170 + int i; 1171 + 1172 + fifo->is_ready = false; 1173 + del_timer_sync(&fifo->timer); 1174 + mlxbf_tmfifo_disable_irqs(fifo); 1175 + cancel_work_sync(&fifo->work); 1176 + for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) 1177 + mlxbf_tmfifo_delete_vdev(fifo, i); 1178 + } 1179 + 1180 + /* Probe the TMFIFO. */ 1181 + static int mlxbf_tmfifo_probe(struct platform_device *pdev) 1182 + { 1183 + struct virtio_net_config net_config; 1184 + struct device *dev = &pdev->dev; 1185 + struct mlxbf_tmfifo *fifo; 1186 + int i, rc; 1187 + 1188 + fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 1189 + if (!fifo) 1190 + return -ENOMEM; 1191 + 1192 + spin_lock_init(&fifo->spin_lock); 1193 + INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); 1194 + mutex_init(&fifo->lock); 1195 + 1196 + /* Get the resource of the Rx FIFO. */ 1197 + fifo->rx_base = devm_platform_ioremap_resource(pdev, 0); 1198 + if (IS_ERR(fifo->rx_base)) 1199 + return PTR_ERR(fifo->rx_base); 1200 + 1201 + /* Get the resource of the Tx FIFO. */ 1202 + fifo->tx_base = devm_platform_ioremap_resource(pdev, 1); 1203 + if (IS_ERR(fifo->tx_base)) 1204 + return PTR_ERR(fifo->tx_base); 1205 + 1206 + platform_set_drvdata(pdev, fifo); 1207 + 1208 + timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0); 1209 + 1210 + for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 1211 + fifo->irq_info[i].index = i; 1212 + fifo->irq_info[i].fifo = fifo; 1213 + fifo->irq_info[i].irq = platform_get_irq(pdev, i); 1214 + rc = devm_request_irq(dev, fifo->irq_info[i].irq, 1215 + mlxbf_tmfifo_irq_handler, 0, 1216 + "tmfifo", &fifo->irq_info[i]); 1217 + if (rc) { 1218 + dev_err(dev, "devm_request_irq failed\n"); 1219 + fifo->irq_info[i].irq = 0; 1220 + return rc; 1221 + } 1222 + } 1223 + 1224 + mlxbf_tmfifo_set_threshold(fifo); 1225 + 1226 + /* Create the console vdev. */ 1227 + rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0); 1228 + if (rc) 1229 + goto fail; 1230 + 1231 + /* Create the network vdev. */ 1232 + memset(&net_config, 0, sizeof(net_config)); 1233 + net_config.mtu = ETH_DATA_LEN; 1234 + net_config.status = VIRTIO_NET_S_LINK_UP; 1235 + mlxbf_tmfifo_get_cfg_mac(net_config.mac); 1236 + rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, 1237 + MLXBF_TMFIFO_NET_FEATURES, &net_config, 1238 + sizeof(net_config)); 1239 + if (rc) 1240 + goto fail; 1241 + 1242 + mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 1243 + 1244 + fifo->is_ready = true; 1245 + return 0; 1246 + 1247 + fail: 1248 + mlxbf_tmfifo_cleanup(fifo); 1249 + return rc; 1250 + } 1251 + 1252 + /* Device remove function. */ 1253 + static int mlxbf_tmfifo_remove(struct platform_device *pdev) 1254 + { 1255 + struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev); 1256 + 1257 + mlxbf_tmfifo_cleanup(fifo); 1258 + 1259 + return 0; 1260 + } 1261 + 1262 + static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = { 1263 + { "MLNXBF01", 0 }, 1264 + {} 1265 + }; 1266 + MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match); 1267 + 1268 + static struct platform_driver mlxbf_tmfifo_driver = { 1269 + .probe = mlxbf_tmfifo_probe, 1270 + .remove = mlxbf_tmfifo_remove, 1271 + .driver = { 1272 + .name = "bf-tmfifo", 1273 + .acpi_match_table = mlxbf_tmfifo_acpi_match, 1274 + }, 1275 + }; 1276 + 1277 + module_platform_driver(mlxbf_tmfifo_driver); 1278 + 1279 + MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver"); 1280 + MODULE_LICENSE("GPL v2"); 1281 + MODULE_AUTHOR("Mellanox Technologies");
+11
drivers/platform/x86/Kconfig
··· 1263 1263 To compile this driver as a module, choose M here: the module 1264 1264 will be called intel_chtdc_ti_pwrbtn. 1265 1265 1266 + config INTEL_MRFLD_PWRBTN 1267 + tristate "Intel Merrifield Basin Cove power button driver" 1268 + depends on INTEL_SOC_PMIC_MRFLD 1269 + depends on INPUT 1270 + ---help--- 1271 + This option adds a power button driver for Basin Cove PMIC 1272 + on Intel Merrifield devices. 1273 + 1274 + To compile this driver as a module, choose M here: the module 1275 + will be called intel_mrfld_pwrbtn. 1276 + 1266 1277 config I2C_MULTI_INSTANTIATE 1267 1278 tristate "I2C multi instantiate pseudo device driver" 1268 1279 depends on I2C && ACPI
+1
drivers/platform/x86/Makefile
··· 94 94 obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o 95 95 obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o 96 96 obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o 97 + obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o 97 98 obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o 98 99 obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o 99 100 obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
+9 -10
drivers/platform/x86/alienware-wmi.c
··· 522 522 523 523 input.length = (acpi_size) sizeof(*in_args); 524 524 input.pointer = in_args; 525 - if (out_data != NULL) { 525 + if (out_data) { 526 526 output.length = ACPI_ALLOCATE_BUFFER; 527 527 output.pointer = NULL; 528 528 status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0, 529 529 command, &input, &output); 530 - } else 530 + if (ACPI_SUCCESS(status)) { 531 + obj = (union acpi_object *)output.pointer; 532 + if (obj && obj->type == ACPI_TYPE_INTEGER) 533 + *out_data = (u32)obj->integer.value; 534 + } 535 + kfree(output.pointer); 536 + } else { 531 537 status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0, 532 538 command, &input, NULL); 533 - 534 - if (ACPI_SUCCESS(status) && out_data != NULL) { 535 - obj = (union acpi_object *)output.pointer; 536 - if (obj && obj->type == ACPI_TYPE_INTEGER) 537 - *out_data = (u32) obj->integer.value; 538 539 } 539 - kfree(output.pointer); 540 540 return status; 541 - 542 541 } 543 542 544 543 /* ··· 587 588 return scnprintf(buf, PAGE_SIZE, 588 589 "input [gpu] unknown\n"); 589 590 } 590 - pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data); 591 + pr_err("alienware-wmi: unknown HDMI source status: %u\n", status); 591 592 return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n"); 592 593 } 593 594
+37
drivers/platform/x86/asus-wmi.c
··· 66 66 #define NOTIFY_BRNUP_MAX 0x1f 67 67 #define NOTIFY_BRNDOWN_MIN 0x20 68 68 #define NOTIFY_BRNDOWN_MAX 0x2e 69 + #define NOTIFY_FNLOCK_TOGGLE 0x4e 69 70 #define NOTIFY_KBD_BRTUP 0xc4 70 71 #define NOTIFY_KBD_BRTDWN 0xc5 71 72 #define NOTIFY_KBD_BRTTOGGLE 0xc7 73 + 74 + #define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0) 72 75 73 76 #define ASUS_FAN_DESC "cpu_fan" 74 77 #define ASUS_FAN_MFUN 0x13 ··· 179 176 struct mutex wmi_lock; 180 177 struct workqueue_struct *hotplug_workqueue; 181 178 struct work_struct hotplug_work; 179 + 180 + bool fnlock_locked; 182 181 183 182 struct asus_wmi_debug debug; 184 183 ··· 1624 1619 return 0; 1625 1620 } 1626 1621 1622 + static bool asus_wmi_has_fnlock_key(struct asus_wmi *asus) 1623 + { 1624 + u32 result; 1625 + 1626 + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FNLOCK, &result); 1627 + 1628 + return (result & ASUS_WMI_DSTS_PRESENCE_BIT) && 1629 + !(result & ASUS_WMI_FNLOCK_BIOS_DISABLED); 1630 + } 1631 + 1632 + static void asus_wmi_fnlock_update(struct asus_wmi *asus) 1633 + { 1634 + int mode = asus->fnlock_locked; 1635 + 1636 + asus_wmi_set_devstate(ASUS_WMI_DEVID_FNLOCK, mode, NULL); 1637 + } 1638 + 1627 1639 static void asus_wmi_notify(u32 value, void *context) 1628 1640 { 1629 1641 struct asus_wmi *asus = context; ··· 1699 1677 kbd_led_set_by_kbd(asus, 0); 1700 1678 else 1701 1679 kbd_led_set_by_kbd(asus, asus->kbd_led_wk + 1); 1680 + goto exit; 1681 + } 1682 + 1683 + if (code == NOTIFY_FNLOCK_TOGGLE) { 1684 + asus->fnlock_locked = !asus->fnlock_locked; 1685 + asus_wmi_fnlock_update(asus); 1702 1686 goto exit; 1703 1687 } 1704 1688 ··· 2162 2134 } else 2163 2135 err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); 2164 2136 2137 + if (asus_wmi_has_fnlock_key(asus)) { 2138 + asus->fnlock_locked = true; 2139 + asus_wmi_fnlock_update(asus); 2140 + } 2141 + 2165 2142 status = wmi_install_notify_handler(asus->driver->event_guid, 2166 2143 asus_wmi_notify, asus); 2167 2144 if (ACPI_FAILURE(status)) { ··· 2246 2213 if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) 2247 2214 kbd_led_update(asus); 2248 2215 2216 + if (asus_wmi_has_fnlock_key(asus)) 2217 + asus_wmi_fnlock_update(asus); 2249 2218 return 0; 2250 2219 } 2251 2220 ··· 2284 2249 if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) 2285 2250 kbd_led_update(asus); 2286 2251 2252 + if (asus_wmi_has_fnlock_key(asus)) 2253 + asus_wmi_fnlock_update(asus); 2287 2254 return 0; 2288 2255 } 2289 2256
+3 -3
drivers/platform/x86/dell-laptop.c
··· 531 531 return; 532 532 } 533 533 534 - dell_fill_request(&buffer, 0, 0x2, 0, 0); 534 + dell_fill_request(&buffer, 0x2, 0, 0, 0); 535 535 ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); 536 536 hwswitch = buffer.output[1]; 537 537 ··· 562 562 return ret; 563 563 status = buffer.output[1]; 564 564 565 - dell_fill_request(&buffer, 0, 0x2, 0, 0); 565 + dell_fill_request(&buffer, 0x2, 0, 0, 0); 566 566 hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); 567 567 if (hwswitch_ret) 568 568 return hwswitch_ret; ··· 647 647 if (ret != 0) 648 648 return; 649 649 650 - dell_fill_request(&buffer, 0, 0x2, 0, 0); 650 + dell_fill_request(&buffer, 0x2, 0, 0, 0); 651 651 ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); 652 652 653 653 if (ret == 0 && (status & BIT(0)))
+2
drivers/platform/x86/dell-rbtn.c
··· 18 18 #include <linux/rfkill.h> 19 19 #include <linux/input.h> 20 20 21 + #include "dell-rbtn.h" 22 + 21 23 enum rbtn_type { 22 24 RBTN_UNKNOWN, 23 25 RBTN_TOGGLE,
+15 -306
drivers/platform/x86/ideapad-laptop.c
··· 980 980 #endif 981 981 982 982 /* 983 - * Some ideapads don't have a hardware rfkill switch, reading VPCCMD_R_RF 984 - * always results in 0 on these models, causing ideapad_laptop to wrongly 985 - * report all radios as hardware-blocked. 983 + * Some ideapads have a hardware rfkill switch, but most do not have one. 984 + * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill, 985 + * switch causing ideapad_laptop to wrongly report all radios as hw-blocked. 986 + * There used to be a long list of DMI ids for models without a hw rfkill 987 + * switch here, but that resulted in playing whack a mole. 988 + * More importantly wrongly reporting the wifi radio as hw-blocked, results in 989 + * non working wifi. Whereas not reporting it hw-blocked, when it actually is 990 + * hw-blocked results in an empty SSID list, which is a much more benign 991 + * failure mode. 992 + * So the default now is the much safer option of assuming there is no 993 + * hardware rfkill switch. This default also actually matches most hardware, 994 + * since having a hw rfkill switch is quite rare on modern hardware, so this 995 + * also leads to a much shorter list. 986 996 */ 987 - static const struct dmi_system_id no_hw_rfkill_list[] = { 988 - { 989 - .ident = "Lenovo RESCUER R720-15IKBN", 990 - .matches = { 991 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 992 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"), 993 - }, 994 - }, 995 - { 996 - .ident = "Lenovo G40-30", 997 - .matches = { 998 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 999 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"), 1000 - }, 1001 - }, 1002 - { 1003 - .ident = "Lenovo G50-30", 1004 - .matches = { 1005 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1006 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"), 1007 - }, 1008 - }, 1009 - { 1010 - .ident = "Lenovo V310-14IKB", 1011 - .matches = { 1012 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1013 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14IKB"), 1014 - }, 1015 - }, 1016 - { 1017 - .ident = "Lenovo V310-14ISK", 1018 - .matches = { 1019 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1020 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14ISK"), 1021 - }, 1022 - }, 1023 - { 1024 - .ident = "Lenovo V310-15IKB", 1025 - .matches = { 1026 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1027 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15IKB"), 1028 - }, 1029 - }, 1030 - { 1031 - .ident = "Lenovo V310-15ISK", 1032 - .matches = { 1033 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1034 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"), 1035 - }, 1036 - }, 1037 - { 1038 - .ident = "Lenovo V510-15IKB", 1039 - .matches = { 1040 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1041 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V510-15IKB"), 1042 - }, 1043 - }, 1044 - { 1045 - .ident = "Lenovo ideapad 300-15IBR", 1046 - .matches = { 1047 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1048 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IBR"), 1049 - }, 1050 - }, 1051 - { 1052 - .ident = "Lenovo ideapad 300-15IKB", 1053 - .matches = { 1054 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1055 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IKB"), 1056 - }, 1057 - }, 1058 - { 1059 - .ident = "Lenovo ideapad 300S-11IBR", 1060 - .matches = { 1061 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1062 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300S-11BR"), 1063 - }, 1064 - }, 1065 - { 1066 - .ident = "Lenovo ideapad 310-15ABR", 1067 - .matches = { 1068 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1069 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ABR"), 1070 - }, 1071 - }, 1072 - { 1073 - .ident = "Lenovo ideapad 310-15IAP", 1074 - .matches = { 1075 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1076 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IAP"), 1077 - }, 1078 - }, 1079 - { 1080 - .ident = "Lenovo ideapad 310-15IKB", 1081 - .matches = { 1082 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1083 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"), 1084 - }, 1085 - }, 1086 - { 1087 - .ident = "Lenovo ideapad 310-15ISK", 1088 - .matches = { 1089 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1090 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"), 1091 - }, 1092 - }, 1093 - { 1094 - .ident = "Lenovo ideapad 330-15ICH", 1095 - .matches = { 1096 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1097 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 330-15ICH"), 1098 - }, 1099 - }, 1100 - { 1101 - .ident = "Lenovo ideapad 530S-14ARR", 1102 - .matches = { 1103 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1104 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 530S-14ARR"), 1105 - }, 1106 - }, 1107 - { 1108 - .ident = "Lenovo ideapad S130-14IGM", 1109 - .matches = { 1110 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1111 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad S130-14IGM"), 1112 - }, 1113 - }, 1114 - { 1115 - .ident = "Lenovo ideapad Y700-14ISK", 1116 - .matches = { 1117 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1118 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-14ISK"), 1119 - }, 1120 - }, 1121 - { 1122 - .ident = "Lenovo ideapad Y700-15ACZ", 1123 - .matches = { 1124 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1125 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"), 1126 - }, 1127 - }, 1128 - { 1129 - .ident = "Lenovo ideapad Y700-15ISK", 1130 - .matches = { 1131 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1132 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"), 1133 - }, 1134 - }, 1135 - { 1136 - .ident = "Lenovo ideapad Y700 Touch-15ISK", 1137 - .matches = { 1138 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1139 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"), 1140 - }, 1141 - }, 1142 - { 1143 - .ident = "Lenovo ideapad Y700-17ISK", 1144 - .matches = { 1145 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1146 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"), 1147 - }, 1148 - }, 1149 - { 1150 - .ident = "Lenovo ideapad MIIX 720-12IKB", 1151 - .matches = { 1152 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1153 - DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 720-12IKB"), 1154 - }, 1155 - }, 1156 - { 1157 - .ident = "Lenovo Legion Y520-15IKB", 1158 - .matches = { 1159 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1160 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"), 1161 - }, 1162 - }, 1163 - { 1164 - .ident = "Lenovo Y520-15IKBM", 1165 - .matches = { 1166 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1167 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBM"), 1168 - }, 1169 - }, 1170 - { 1171 - .ident = "Lenovo Legion Y530-15ICH", 1172 - .matches = { 1173 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1174 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"), 1175 - }, 1176 - }, 1177 - { 1178 - .ident = "Lenovo Legion Y530-15ICH-1060", 1179 - .matches = { 1180 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1181 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH-1060"), 1182 - }, 1183 - }, 1184 - { 1185 - .ident = "Lenovo Legion Y720-15IKB", 1186 - .matches = { 1187 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1188 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKB"), 1189 - }, 1190 - }, 1191 - { 1192 - .ident = "Lenovo Legion Y720-15IKBN", 1193 - .matches = { 1194 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1195 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBN"), 1196 - }, 1197 - }, 1198 - { 1199 - .ident = "Lenovo Y720-15IKBM", 1200 - .matches = { 1201 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1202 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBM"), 1203 - }, 1204 - }, 1205 - { 1206 - .ident = "Lenovo Yoga 2 11 / 13 / Pro", 1207 - .matches = { 1208 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1209 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"), 1210 - }, 1211 - }, 1212 - { 1213 - .ident = "Lenovo Yoga 2 11 / 13 / Pro", 1214 - .matches = { 1215 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1216 - DMI_MATCH(DMI_BOARD_NAME, "Yoga2"), 1217 - }, 1218 - }, 1219 - { 1220 - .ident = "Lenovo Yoga 2 13", 1221 - .matches = { 1222 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1223 - DMI_MATCH(DMI_PRODUCT_VERSION, "Yoga 2 13"), 1224 - }, 1225 - }, 1226 - { 1227 - .ident = "Lenovo Yoga 3 1170 / 1470", 1228 - .matches = { 1229 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1230 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3"), 1231 - }, 1232 - }, 1233 - { 1234 - .ident = "Lenovo Yoga 3 Pro 1370", 1235 - .matches = { 1236 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1237 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"), 1238 - }, 1239 - }, 1240 - { 1241 - .ident = "Lenovo Yoga 700", 1242 - .matches = { 1243 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1244 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"), 1245 - }, 1246 - }, 1247 - { 1248 - .ident = "Lenovo Yoga 900", 1249 - .matches = { 1250 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1251 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"), 1252 - }, 1253 - }, 1254 - { 1255 - .ident = "Lenovo Yoga 900", 1256 - .matches = { 1257 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1258 - DMI_MATCH(DMI_BOARD_NAME, "VIUU4"), 1259 - }, 1260 - }, 1261 - { 1262 - .ident = "Lenovo YOGA 910-13IKB", 1263 - .matches = { 1264 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1265 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"), 1266 - }, 1267 - }, 1268 - { 1269 - .ident = "Lenovo YOGA 920-13IKB", 1270 - .matches = { 1271 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1272 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"), 1273 - }, 1274 - }, 1275 - { 1276 - .ident = "Lenovo YOGA C930-13IKB", 1277 - .matches = { 1278 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1279 - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA C930-13IKB"), 1280 - }, 1281 - }, 1282 - { 1283 - .ident = "Lenovo Zhaoyang E42-80", 1284 - .matches = { 1285 - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1286 - DMI_MATCH(DMI_PRODUCT_VERSION, "ZHAOYANG E42-80"), 1287 - }, 1288 - }, 997 + static const struct dmi_system_id hw_rfkill_list[] = { 1289 998 {} 1290 999 }; 1291 1000 ··· 1020 1311 priv->cfg = cfg; 1021 1312 priv->adev = adev; 1022 1313 priv->platform_device = pdev; 1023 - priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list); 1314 + priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list); 1024 1315 1025 1316 ret = ideapad_sysfs_init(priv); 1026 1317 if (ret)
+107
drivers/platform/x86/intel_mrfld_pwrbtn.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Power-button driver for Basin Cove PMIC 4 + * 5 + * Copyright (c) 2019, Intel Corporation. 6 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 + */ 8 + 9 + #include <linux/input.h> 10 + #include <linux/interrupt.h> 11 + #include <linux/device.h> 12 + #include <linux/mfd/intel_soc_pmic.h> 13 + #include <linux/mfd/intel_soc_pmic_mrfld.h> 14 + #include <linux/module.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/pm_wakeirq.h> 17 + #include <linux/slab.h> 18 + 19 + #define BCOVE_PBSTATUS 0x27 20 + #define BCOVE_PBSTATUS_PBLVL BIT(4) /* 1 - release, 0 - press */ 21 + 22 + static irqreturn_t mrfld_pwrbtn_interrupt(int irq, void *dev_id) 23 + { 24 + struct input_dev *input = dev_id; 25 + struct device *dev = input->dev.parent; 26 + struct regmap *regmap = dev_get_drvdata(dev); 27 + unsigned int state; 28 + int ret; 29 + 30 + ret = regmap_read(regmap, BCOVE_PBSTATUS, &state); 31 + if (ret) 32 + return IRQ_NONE; 33 + 34 + dev_dbg(dev, "PBSTATUS=0x%x\n", state); 35 + input_report_key(input, KEY_POWER, !(state & BCOVE_PBSTATUS_PBLVL)); 36 + input_sync(input); 37 + 38 + regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0); 39 + return IRQ_HANDLED; 40 + } 41 + 42 + static int mrfld_pwrbtn_probe(struct platform_device *pdev) 43 + { 44 + struct device *dev = &pdev->dev; 45 + struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent); 46 + struct input_dev *input; 47 + int irq, ret; 48 + 49 + irq = platform_get_irq(pdev, 0); 50 + if (irq < 0) 51 + return irq; 52 + 53 + input = devm_input_allocate_device(dev); 54 + if (!input) 55 + return -ENOMEM; 56 + input->name = pdev->name; 57 + input->phys = "power-button/input0"; 58 + input->id.bustype = BUS_HOST; 59 + input->dev.parent = dev; 60 + input_set_capability(input, EV_KEY, KEY_POWER); 61 + ret = input_register_device(input); 62 + if (ret) 63 + return ret; 64 + 65 + dev_set_drvdata(dev, pmic->regmap); 66 + 67 + ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_pwrbtn_interrupt, 68 + IRQF_ONESHOT | IRQF_SHARED, pdev->name, 69 + input); 70 + if (ret) 71 + return ret; 72 + 73 + regmap_update_bits(pmic->regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0); 74 + regmap_update_bits(pmic->regmap, BCOVE_MPBIRQ, BCOVE_PBIRQ_PBTN, 0); 75 + 76 + device_init_wakeup(dev, true); 77 + dev_pm_set_wake_irq(dev, irq); 78 + return 0; 79 + } 80 + 81 + static int mrfld_pwrbtn_remove(struct platform_device *pdev) 82 + { 83 + struct device *dev = &pdev->dev; 84 + 85 + dev_pm_clear_wake_irq(dev); 86 + device_init_wakeup(dev, false); 87 + return 0; 88 + } 89 + 90 + static const struct platform_device_id mrfld_pwrbtn_id_table[] = { 91 + { .name = "mrfld_bcove_pwrbtn" }, 92 + {} 93 + }; 94 + MODULE_DEVICE_TABLE(platform, mrfld_pwrbtn_id_table); 95 + 96 + static struct platform_driver mrfld_pwrbtn_driver = { 97 + .driver = { 98 + .name = "mrfld_bcove_pwrbtn", 99 + }, 100 + .probe = mrfld_pwrbtn_probe, 101 + .remove = mrfld_pwrbtn_remove, 102 + .id_table = mrfld_pwrbtn_id_table, 103 + }; 104 + module_platform_driver(mrfld_pwrbtn_driver); 105 + 106 + MODULE_DESCRIPTION("Power-button driver for Basin Cove PMIC"); 107 + MODULE_LICENSE("GPL v2");
+163 -9
drivers/platform/x86/intel_pmc_core.c
··· 19 19 #include <linux/io.h> 20 20 #include <linux/module.h> 21 21 #include <linux/pci.h> 22 + #include <linux/platform_device.h> 23 + #include <linux/suspend.h> 22 24 #include <linux/uaccess.h> 23 25 24 26 #include <asm/cpu_device_id.h> ··· 830 828 * the platform BIOS enforces 24Mhx Crystal to shutdown 831 829 * before PMC can assert SLP_S0#. 832 830 */ 833 - int quirk_xtal_ignore(const struct dmi_system_id *id) 831 + static int quirk_xtal_ignore(const struct dmi_system_id *id) 834 832 { 835 833 struct pmc_dev *pmcdev = &pmc; 836 834 u32 value; ··· 856 854 {} 857 855 }; 858 856 859 - static int __init pmc_core_probe(void) 857 + static int pmc_core_probe(struct platform_device *pdev) 860 858 { 859 + static bool device_initialized; 861 860 struct pmc_dev *pmcdev = &pmc; 862 861 const struct x86_cpu_id *cpu_id; 863 862 u64 slp_s0_addr; 864 863 int err; 864 + 865 + if (device_initialized) 866 + return -ENODEV; 865 867 866 868 cpu_id = x86_match_cpu(intel_pmc_core_ids); 867 869 if (!cpu_id) ··· 892 886 return -ENOMEM; 893 887 894 888 mutex_init(&pmcdev->lock); 889 + platform_set_drvdata(pdev, pmcdev); 895 890 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); 891 + dmi_check_system(pmc_core_dmi_table); 896 892 897 893 err = pmc_core_dbgfs_register(pmcdev); 898 894 if (err < 0) { 899 - pr_warn(" debugfs register failed.\n"); 895 + dev_warn(&pdev->dev, "debugfs register failed.\n"); 900 896 iounmap(pmcdev->regbase); 901 897 return err; 902 898 } 903 899 904 - dmi_check_system(pmc_core_dmi_table); 905 - pr_info(" initialized\n"); 900 + device_initialized = true; 901 + dev_info(&pdev->dev, " initialized\n"); 902 + 906 903 return 0; 907 904 } 908 - module_init(pmc_core_probe) 909 905 910 - static void __exit pmc_core_remove(void) 906 + static int pmc_core_remove(struct platform_device *pdev) 911 907 { 912 - struct pmc_dev *pmcdev = &pmc; 908 + struct pmc_dev *pmcdev = platform_get_drvdata(pdev); 913 909 914 910 pmc_core_dbgfs_unregister(pmcdev); 911 + platform_set_drvdata(pdev, NULL); 915 912 mutex_destroy(&pmcdev->lock); 916 913 iounmap(pmcdev->regbase); 914 + return 0; 917 915 } 918 - module_exit(pmc_core_remove) 916 + 917 + #ifdef CONFIG_PM_SLEEP 918 + 919 + static bool warn_on_s0ix_failures; 920 + module_param(warn_on_s0ix_failures, bool, 0644); 921 + MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); 922 + 923 + static int pmc_core_suspend(struct device *dev) 924 + { 925 + struct pmc_dev *pmcdev = dev_get_drvdata(dev); 926 + 927 + pmcdev->check_counters = false; 928 + 929 + /* No warnings on S0ix failures */ 930 + if (!warn_on_s0ix_failures) 931 + return 0; 932 + 933 + /* Check if the syspend will actually use S0ix */ 934 + if (pm_suspend_via_firmware()) 935 + return 0; 936 + 937 + /* Save PC10 residency for checking later */ 938 + if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter)) 939 + return -EIO; 940 + 941 + /* Save S0ix residency for checking later */ 942 + if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter)) 943 + return -EIO; 944 + 945 + pmcdev->check_counters = true; 946 + return 0; 947 + } 948 + 949 + static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev) 950 + { 951 + u64 pc10_counter; 952 + 953 + if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter)) 954 + return false; 955 + 956 + if (pc10_counter == pmcdev->pc10_counter) 957 + return true; 958 + 959 + return false; 960 + } 961 + 962 + static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev) 963 + { 964 + u64 s0ix_counter; 965 + 966 + if (pmc_core_dev_state_get(pmcdev, &s0ix_counter)) 967 + return false; 968 + 969 + if (s0ix_counter == pmcdev->s0ix_counter) 970 + return true; 971 + 972 + return false; 973 + } 974 + 975 + static int pmc_core_resume(struct device *dev) 976 + { 977 + struct pmc_dev *pmcdev = dev_get_drvdata(dev); 978 + const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps; 979 + int offset = pmcdev->map->slps0_dbg_offset; 980 + const struct pmc_bit_map *map; 981 + u32 data; 982 + 983 + if (!pmcdev->check_counters) 984 + return 0; 985 + 986 + if (!pmc_core_is_s0ix_failed(pmcdev)) 987 + return 0; 988 + 989 + if (pmc_core_is_pc10_failed(pmcdev)) { 990 + /* S0ix failed because of PC10 entry failure */ 991 + dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n", 992 + pmcdev->pc10_counter); 993 + return 0; 994 + } 995 + 996 + /* The real interesting case - S0ix failed - lets ask PMC why. */ 997 + dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n", 998 + pmcdev->s0ix_counter); 999 + while (*maps) { 1000 + map = *maps; 1001 + data = pmc_core_reg_read(pmcdev, offset); 1002 + offset += 4; 1003 + while (map->name) { 1004 + dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n", 1005 + map->name, 1006 + data & map->bit_mask ? "Yes" : "No"); 1007 + map++; 1008 + } 1009 + maps++; 1010 + } 1011 + return 0; 1012 + } 1013 + 1014 + #endif 1015 + 1016 + static const struct dev_pm_ops pmc_core_pm_ops = { 1017 + SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume) 1018 + }; 1019 + 1020 + static struct platform_driver pmc_core_driver = { 1021 + .driver = { 1022 + .name = "intel_pmc_core", 1023 + .pm = &pmc_core_pm_ops, 1024 + }, 1025 + .probe = pmc_core_probe, 1026 + .remove = pmc_core_remove, 1027 + }; 1028 + 1029 + static struct platform_device pmc_core_device = { 1030 + .name = "intel_pmc_core", 1031 + }; 1032 + 1033 + static int __init pmc_core_init(void) 1034 + { 1035 + int ret; 1036 + 1037 + if (!x86_match_cpu(intel_pmc_core_ids)) 1038 + return -ENODEV; 1039 + 1040 + ret = platform_driver_register(&pmc_core_driver); 1041 + if (ret) 1042 + return ret; 1043 + 1044 + ret = platform_device_register(&pmc_core_device); 1045 + if (ret) { 1046 + platform_driver_unregister(&pmc_core_driver); 1047 + return ret; 1048 + } 1049 + 1050 + return 0; 1051 + } 1052 + 1053 + static void __exit pmc_core_exit(void) 1054 + { 1055 + platform_device_unregister(&pmc_core_device); 1056 + platform_driver_unregister(&pmc_core_driver); 1057 + } 1058 + 1059 + module_init(pmc_core_init) 1060 + module_exit(pmc_core_exit) 919 1061 920 1062 MODULE_LICENSE("GPL v2"); 921 1063 MODULE_DESCRIPTION("Intel PMC Core Driver");
+7
drivers/platform/x86/intel_pmc_core.h
··· 241 241 * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers 242 242 * used to read MPHY PG and PLL status are available 243 243 * @mutex_lock: mutex to complete one transcation 244 + * @check_counters: On resume, check if counters are getting incremented 245 + * @pc10_counter: PC10 residency counter 246 + * @s0ix_counter: S0ix residency (step adjusted) 244 247 * 245 248 * pmc_dev contains info about power management controller device. 246 249 */ ··· 256 253 #endif /* CONFIG_DEBUG_FS */ 257 254 int pmc_xram_read_bit; 258 255 struct mutex lock; /* generic mutex lock for PMC Core */ 256 + 257 + bool check_counters; /* Check for counter increments on resume */ 258 + u64 pc10_counter; 259 + u64 s0ix_counter; 259 260 }; 260 261 261 262 #endif /* PMC_CORE_H */
+24 -22
drivers/platform/x86/intel_pmc_ipc.c
··· 40 40 * The ARC handles the interrupt and services it, writing optional data to 41 41 * the IPC1 registers, updates the IPC_STS response register with the status. 42 42 */ 43 - #define IPC_CMD 0x0 44 - #define IPC_CMD_MSI 0x100 43 + #define IPC_CMD 0x00 44 + #define IPC_CMD_MSI BIT(8) 45 45 #define IPC_CMD_SIZE 16 46 46 #define IPC_CMD_SUBCMD 12 47 47 #define IPC_STATUS 0x04 48 - #define IPC_STATUS_IRQ 0x4 49 - #define IPC_STATUS_ERR 0x2 50 - #define IPC_STATUS_BUSY 0x1 48 + #define IPC_STATUS_IRQ BIT(2) 49 + #define IPC_STATUS_ERR BIT(1) 50 + #define IPC_STATUS_BUSY BIT(0) 51 51 #define IPC_SPTR 0x08 52 52 #define IPC_DPTR 0x0C 53 53 #define IPC_WRITE_BUFFER 0x80 ··· 101 101 #define TELEM_SSRAM_SIZE 240 102 102 #define TELEM_PMC_SSRAM_OFFSET 0x1B00 103 103 #define TELEM_PUNIT_SSRAM_OFFSET 0x1A00 104 - #define TCO_PMC_OFFSET 0x8 105 - #define TCO_PMC_SIZE 0x4 104 + #define TCO_PMC_OFFSET 0x08 105 + #define TCO_PMC_SIZE 0x04 106 106 107 107 /* PMC register bit definitions */ 108 108 109 109 /* PMC_CFG_REG bit masks */ 110 - #define PMC_CFG_NO_REBOOT_MASK (1 << 4) 110 + #define PMC_CFG_NO_REBOOT_MASK BIT_MASK(4) 111 111 #define PMC_CFG_NO_REBOOT_EN (1 << 4) 112 112 #define PMC_CFG_NO_REBOOT_DIS (0 << 4) 113 113 ··· 131 131 132 132 /* punit */ 133 133 struct platform_device *punit_dev; 134 + unsigned int punit_res_count; 134 135 135 136 /* Telemetry */ 136 137 resource_size_t telem_pmc_ssram_base; ··· 683 682 .name = PUNIT_DEVICE_NAME, 684 683 .id = -1, 685 684 .res = punit_res_array, 686 - .num_res = ARRAY_SIZE(punit_res_array), 685 + .num_res = ipcdev.punit_res_count, 687 686 }; 688 687 689 688 pdev = platform_device_register_full(&pdevinfo); ··· 772 771 if (ret) { 773 772 dev_err(ipcdev.dev, "Failed to add punit platform device\n"); 774 773 platform_device_unregister(ipcdev.tco_dev); 774 + return ret; 775 775 } 776 776 777 777 if (!ipcdev.telem_res_inval) { 778 778 ret = ipc_create_telemetry_device(); 779 - if (ret) 779 + if (ret) { 780 780 dev_warn(ipcdev.dev, 781 781 "Failed to add telemetry platform device\n"); 782 + platform_device_unregister(ipcdev.punit_dev); 783 + platform_device_unregister(ipcdev.tco_dev); 784 + } 782 785 } 783 786 784 787 return ret; ··· 790 785 791 786 static int ipc_plat_get_res(struct platform_device *pdev) 792 787 { 793 - struct resource *res, *punit_res; 788 + struct resource *res, *punit_res = punit_res_array; 794 789 void __iomem *addr; 795 790 int size; 796 791 ··· 805 800 ipcdev.acpi_io_size = size; 806 801 dev_info(&pdev->dev, "io res: %pR\n", res); 807 802 808 - punit_res = punit_res_array; 803 + ipcdev.punit_res_count = 0; 804 + 809 805 /* This is index 0 to cover BIOS data register */ 810 806 res = platform_get_resource(pdev, IORESOURCE_MEM, 811 807 PLAT_RESOURCE_BIOS_DATA_INDEX); ··· 814 808 dev_err(&pdev->dev, "Failed to get res of punit BIOS data\n"); 815 809 return -ENXIO; 816 810 } 817 - *punit_res = *res; 811 + punit_res[ipcdev.punit_res_count++] = *res; 818 812 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); 819 813 820 814 /* This is index 1 to cover BIOS interface register */ ··· 824 818 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); 825 819 return -ENXIO; 826 820 } 827 - *++punit_res = *res; 821 + punit_res[ipcdev.punit_res_count++] = *res; 828 822 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); 829 823 830 824 /* This is index 2 to cover ISP data register, optional */ 831 825 res = platform_get_resource(pdev, IORESOURCE_MEM, 832 826 PLAT_RESOURCE_ISP_DATA_INDEX); 833 - ++punit_res; 834 827 if (res) { 835 - *punit_res = *res; 828 + punit_res[ipcdev.punit_res_count++] = *res; 836 829 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res); 837 830 } 838 831 839 832 /* This is index 3 to cover ISP interface register, optional */ 840 833 res = platform_get_resource(pdev, IORESOURCE_MEM, 841 834 PLAT_RESOURCE_ISP_IFACE_INDEX); 842 - ++punit_res; 843 835 if (res) { 844 - *punit_res = *res; 836 + punit_res[ipcdev.punit_res_count++] = *res; 845 837 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res); 846 838 } 847 839 848 840 /* This is index 4 to cover GTD data register, optional */ 849 841 res = platform_get_resource(pdev, IORESOURCE_MEM, 850 842 PLAT_RESOURCE_GTD_DATA_INDEX); 851 - ++punit_res; 852 843 if (res) { 853 - *punit_res = *res; 844 + punit_res[ipcdev.punit_res_count++] = *res; 854 845 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res); 855 846 } 856 847 857 848 /* This is index 5 to cover GTD interface register, optional */ 858 849 res = platform_get_resource(pdev, IORESOURCE_MEM, 859 850 PLAT_RESOURCE_GTD_IFACE_INDEX); 860 - ++punit_res; 861 851 if (res) { 862 - *punit_res = *res; 852 + punit_res[ipcdev.punit_res_count++] = *res; 863 853 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res); 864 854 } 865 855
+4 -4
drivers/platform/x86/intel_punit_ipc.c
··· 252 252 * - GTDRIVER_IPC BASE_IFACE 253 253 */ 254 254 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 255 - if (res && resource_size(res) > 1) { 255 + if (res) { 256 256 addr = devm_ioremap_resource(&pdev->dev, res); 257 257 if (!IS_ERR(addr)) 258 258 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 259 259 } 260 260 261 261 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 262 - if (res && resource_size(res) > 1) { 262 + if (res) { 263 263 addr = devm_ioremap_resource(&pdev->dev, res); 264 264 if (!IS_ERR(addr)) 265 265 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 266 266 } 267 267 268 268 res = platform_get_resource(pdev, IORESOURCE_MEM, 4); 269 - if (res && resource_size(res) > 1) { 269 + if (res) { 270 270 addr = devm_ioremap_resource(&pdev->dev, res); 271 271 if (!IS_ERR(addr)) 272 272 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 273 273 } 274 274 275 275 res = platform_get_resource(pdev, IORESOURCE_MEM, 5); 276 - if (res && resource_size(res) > 1) { 276 + if (res) { 277 277 addr = devm_ioremap_resource(&pdev->dev, res); 278 278 if (!IS_ERR(addr)) 279 279 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+226 -2
drivers/platform/x86/mlx-platform.c
··· 56 56 #define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88 57 57 #define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89 58 58 #define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a 59 + #define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7 60 + #define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8 61 + #define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9 62 + #define MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET 0xcb 63 + #define MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET 0xcd 64 + #define MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET 0xce 65 + #define MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET 0xcf 66 + #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1 67 + #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2 68 + #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3 59 69 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3 60 70 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4 61 71 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5 ··· 82 72 #define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5 83 73 #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 84 74 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 75 + #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8 85 76 #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 86 77 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb 87 78 #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda ··· 139 128 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 140 129 #define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14 141 130 131 + /* Masks and default values for watchdogs */ 132 + #define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1) 133 + #define MLXPLAT_CPLD_WD2_CLEAR_MASK (GENMASK(7, 0) & ~BIT(1)) 134 + 135 + #define MLXPLAT_CPLD_WD_TYPE1_TO_MASK GENMASK(7, 4) 136 + #define MLXPLAT_CPLD_WD_TYPE2_TO_MASK 0 137 + #define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1) 138 + #define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4)) 139 + #define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7)) 140 + #define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30 141 + #define MLXPLAT_CPLD_WD_MAX_DEVS 2 142 + 142 143 /* mlxplat_priv - platform private data 143 144 * @pdev_i2c - i2c controller platform device 144 145 * @pdev_mux - array of mux platform devices ··· 158 135 * @pdev_led - led platform devices 159 136 * @pdev_io_regs - register access platform devices 160 137 * @pdev_fan - FAN platform devices 138 + * @pdev_wd - array of watchdog platform devices 161 139 */ 162 140 struct mlxplat_priv { 163 141 struct platform_device *pdev_i2c; ··· 167 143 struct platform_device *pdev_led; 168 144 struct platform_device *pdev_io_regs; 169 145 struct platform_device *pdev_fan; 146 + struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS]; 170 147 }; 171 148 172 149 /* Regions for LPC I2C controller and LPC base register space */ ··· 1364 1339 .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET, 1365 1340 .bit = BIT(3), 1366 1341 }, 1342 + { 1343 + .label = "conf", 1344 + .capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET, 1345 + }, 1367 1346 }; 1368 1347 1369 1348 static struct mlxreg_core_platform_data mlxplat_default_fan_data = { 1370 1349 .data = mlxplat_mlxcpld_default_fan_data, 1371 1350 .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data), 1351 + }; 1352 + 1353 + /* Watchdog type1: hardware implementation version1 1354 + * (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems). 1355 + */ 1356 + static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type1[] = { 1357 + { 1358 + .label = "action", 1359 + .reg = MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 1360 + .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK, 1361 + .bit = 0, 1362 + }, 1363 + { 1364 + .label = "timeout", 1365 + .reg = MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET, 1366 + .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK, 1367 + .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT, 1368 + }, 1369 + { 1370 + .label = "ping", 1371 + .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET, 1372 + .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK, 1373 + .bit = 0, 1374 + }, 1375 + { 1376 + .label = "reset", 1377 + .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET, 1378 + .mask = GENMASK(7, 0) & ~BIT(6), 1379 + .bit = 6, 1380 + }, 1381 + }; 1382 + 1383 + static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type1[] = { 1384 + { 1385 + .label = "action", 1386 + .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 1387 + .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK, 1388 + .bit = 4, 1389 + }, 1390 + { 1391 + .label = "timeout", 1392 + .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET, 1393 + .mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK, 1394 + .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT, 1395 + }, 1396 + { 1397 + .label = "ping", 1398 + .reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET, 1399 + .mask = MLXPLAT_CPLD_WD1_CLEAR_MASK, 1400 + .bit = 1, 1401 + }, 1402 + }; 1403 + 1404 + static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type1[] = { 1405 + { 1406 + .data = mlxplat_mlxcpld_wd_main_regs_type1, 1407 + .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type1), 1408 + .version = MLX_WDT_TYPE1, 1409 + .identity = "mlx-wdt-main", 1410 + }, 1411 + { 1412 + .data = mlxplat_mlxcpld_wd_aux_regs_type1, 1413 + .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type1), 1414 + .version = MLX_WDT_TYPE1, 1415 + .identity = "mlx-wdt-aux", 1416 + }, 1417 + }; 1418 + 1419 + /* Watchdog type2: hardware implementation version 2 1420 + * (all systems except (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140). 1421 + */ 1422 + static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type2[] = { 1423 + { 1424 + .label = "action", 1425 + .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 1426 + .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK, 1427 + .bit = 0, 1428 + }, 1429 + { 1430 + .label = "timeout", 1431 + .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET, 1432 + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, 1433 + .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT, 1434 + }, 1435 + { 1436 + .label = "timeleft", 1437 + .reg = MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET, 1438 + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, 1439 + }, 1440 + { 1441 + .label = "ping", 1442 + .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 1443 + .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK, 1444 + .bit = 0, 1445 + }, 1446 + { 1447 + .label = "reset", 1448 + .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET, 1449 + .mask = GENMASK(7, 0) & ~BIT(6), 1450 + .bit = 6, 1451 + }, 1452 + }; 1453 + 1454 + static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type2[] = { 1455 + { 1456 + .label = "action", 1457 + .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 1458 + .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK, 1459 + .bit = 4, 1460 + }, 1461 + { 1462 + .label = "timeout", 1463 + .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET, 1464 + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, 1465 + .health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT, 1466 + }, 1467 + { 1468 + .label = "timeleft", 1469 + .reg = MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET, 1470 + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, 1471 + }, 1472 + { 1473 + .label = "ping", 1474 + .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 1475 + .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK, 1476 + .bit = 4, 1477 + }, 1478 + }; 1479 + 1480 + static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = { 1481 + { 1482 + .data = mlxplat_mlxcpld_wd_main_regs_type2, 1483 + .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type2), 1484 + .version = MLX_WDT_TYPE2, 1485 + .identity = "mlx-wdt-main", 1486 + }, 1487 + { 1488 + .data = mlxplat_mlxcpld_wd_aux_regs_type2, 1489 + .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type2), 1490 + .version = MLX_WDT_TYPE2, 1491 + .identity = "mlx-wdt-aux", 1492 + }, 1372 1493 }; 1373 1494 1374 1495 static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) ··· 1539 1368 case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET: 1540 1369 case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET: 1541 1370 case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET: 1371 + case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET: 1372 + case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET: 1373 + case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET: 1374 + case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET: 1375 + case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET: 1376 + case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET: 1377 + case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET: 1378 + case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET: 1542 1379 case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: 1543 1380 case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET: 1544 1381 return true; ··· 1590 1411 case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET: 1591 1412 case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET: 1592 1413 case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET: 1414 + case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET: 1415 + case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET: 1416 + case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET: 1417 + case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET: 1418 + case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET: 1419 + case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET: 1420 + case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET: 1421 + case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET: 1422 + case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET: 1423 + case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET: 1593 1424 case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: 1594 1425 case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET: 1595 1426 case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET: ··· 1617 1428 case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET: 1618 1429 case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: 1619 1430 case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: 1431 + case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: 1620 1432 return true; 1621 1433 } 1622 1434 return false; ··· 1657 1467 case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET: 1658 1468 case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET: 1659 1469 case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET: 1470 + case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET: 1471 + case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET: 1472 + case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET: 1473 + case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET: 1660 1474 case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: 1661 1475 case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET: 1662 1476 case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET: ··· 1678 1484 case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET: 1679 1485 case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: 1680 1486 case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: 1487 + case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: 1681 1488 return true; 1682 1489 } 1683 1490 return false; ··· 1688 1493 { MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 }, 1689 1494 { MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 }, 1690 1495 { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, 1496 + { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 }, 1691 1497 }; 1692 1498 1693 1499 struct mlxplat_mlxcpld_regmap_context { ··· 1738 1542 static struct mlxreg_core_platform_data *mlxplat_led; 1739 1543 static struct mlxreg_core_platform_data *mlxplat_regs_io; 1740 1544 static struct mlxreg_core_platform_data *mlxplat_fan; 1545 + static struct mlxreg_core_platform_data 1546 + *mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS]; 1741 1547 1742 1548 static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) 1743 1549 { ··· 1755 1557 mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; 1756 1558 mlxplat_led = &mlxplat_default_led_data; 1757 1559 mlxplat_regs_io = &mlxplat_default_regs_io_data; 1560 + mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; 1758 1561 1759 1562 return 1; 1760 1563 }; ··· 1774 1575 mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; 1775 1576 mlxplat_led = &mlxplat_msn21xx_led_data; 1776 1577 mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; 1578 + mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; 1777 1579 1778 1580 return 1; 1779 1581 }; ··· 1793 1593 mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; 1794 1594 mlxplat_led = &mlxplat_default_led_data; 1795 1595 mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; 1596 + mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; 1796 1597 1797 1598 return 1; 1798 1599 }; ··· 1812 1611 mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; 1813 1612 mlxplat_led = &mlxplat_msn21xx_led_data; 1814 1613 mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; 1614 + mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; 1815 1615 1816 1616 return 1; 1817 1617 }; ··· 1832 1630 mlxplat_led = &mlxplat_default_ng_led_data; 1833 1631 mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; 1834 1632 mlxplat_fan = &mlxplat_default_fan_data; 1633 + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) 1634 + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; 1835 1635 1836 1636 return 1; 1837 1637 }; ··· 2116 1912 } 2117 1913 } 2118 1914 1915 + /* Add WD drivers. */ 1916 + for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) { 1917 + if (mlxplat_wd_data[j]) { 1918 + mlxplat_wd_data[j]->regmap = mlxplat_hotplug->regmap; 1919 + priv->pdev_wd[j] = platform_device_register_resndata( 1920 + &mlxplat_dev->dev, "mlx-wdt", 1921 + j, NULL, 0, 1922 + mlxplat_wd_data[j], 1923 + sizeof(*mlxplat_wd_data[j])); 1924 + if (IS_ERR(priv->pdev_wd[j])) { 1925 + err = PTR_ERR(priv->pdev_wd[j]); 1926 + goto fail_platform_wd_register; 1927 + } 1928 + } 1929 + } 1930 + 2119 1931 /* Sync registers with hardware. */ 2120 1932 regcache_mark_dirty(mlxplat_hotplug->regmap); 2121 1933 err = regcache_sync(mlxplat_hotplug->regmap); 2122 1934 if (err) 2123 - goto fail_platform_fan_register; 1935 + goto fail_platform_wd_register; 2124 1936 2125 1937 return 0; 2126 1938 2127 - fail_platform_fan_register: 1939 + fail_platform_wd_register: 1940 + while (--j >= 0) 1941 + platform_device_unregister(priv->pdev_wd[j]); 2128 1942 if (mlxplat_fan) 2129 1943 platform_device_unregister(priv->pdev_fan); 2130 1944 fail_platform_io_regs_register: ··· 2168 1946 struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev); 2169 1947 int i; 2170 1948 1949 + for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--) 1950 + platform_device_unregister(priv->pdev_wd[i]); 2171 1951 if (priv->pdev_fan) 2172 1952 platform_device_unregister(priv->pdev_fan); 2173 1953 if (priv->pdev_io_regs)
+6 -4
drivers/platform/x86/sony-laptop.c
··· 4424 4424 } 4425 4425 return AE_OK; 4426 4426 } 4427 - default: 4428 - dprintk("Resource %d isn't an IRQ nor an IO port\n", 4429 - resource->type); 4430 4427 4431 4428 case ACPI_RESOURCE_TYPE_END_TAG: 4432 4429 return AE_OK; 4430 + 4431 + default: 4432 + dprintk("Resource %d isn't an IRQ nor an IO port\n", 4433 + resource->type); 4434 + return AE_CTRL_TERMINATE; 4435 + 4433 4436 } 4434 - return AE_CTRL_TERMINATE; 4435 4437 } 4436 4438 4437 4439 static int sony_pic_possible_resources(struct acpi_device *device)
+126 -22
drivers/platform/x86/thinkpad_acpi.c
··· 79 79 #include <linux/jiffies.h> 80 80 #include <linux/workqueue.h> 81 81 #include <linux/acpi.h> 82 - #include <linux/pci_ids.h> 82 + #include <linux/pci.h> 83 83 #include <linux/power_supply.h> 84 84 #include <sound/core.h> 85 85 #include <sound/control.h> ··· 4212 4212 known_ev = true; 4213 4213 break; 4214 4214 } 4215 - /* fallthrough to default */ 4215 + /* fallthrough - to default */ 4216 4216 default: 4217 4217 known_ev = false; 4218 4218 } ··· 4501 4501 bluetooth_shutdown(); 4502 4502 } 4503 4503 4504 + static const struct dmi_system_id bt_fwbug_list[] __initconst = { 4505 + { 4506 + .ident = "ThinkPad E485", 4507 + .matches = { 4508 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 4509 + DMI_MATCH(DMI_BOARD_NAME, "20KU"), 4510 + }, 4511 + }, 4512 + { 4513 + .ident = "ThinkPad E585", 4514 + .matches = { 4515 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 4516 + DMI_MATCH(DMI_BOARD_NAME, "20KV"), 4517 + }, 4518 + }, 4519 + { 4520 + .ident = "ThinkPad A285 - 20MW", 4521 + .matches = { 4522 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 4523 + DMI_MATCH(DMI_BOARD_NAME, "20MW"), 4524 + }, 4525 + }, 4526 + { 4527 + .ident = "ThinkPad A285 - 20MX", 4528 + .matches = { 4529 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 4530 + DMI_MATCH(DMI_BOARD_NAME, "20MX"), 4531 + }, 4532 + }, 4533 + { 4534 + .ident = "ThinkPad A485 - 20MU", 4535 + .matches = { 4536 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 4537 + DMI_MATCH(DMI_BOARD_NAME, "20MU"), 4538 + }, 4539 + }, 4540 + { 4541 + .ident = "ThinkPad A485 - 20MV", 4542 + .matches = { 4543 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 4544 + DMI_MATCH(DMI_BOARD_NAME, "20MV"), 4545 + }, 4546 + }, 4547 + {} 4548 + }; 4549 + 4550 + static const struct pci_device_id fwbug_cards_ids[] __initconst = { 4551 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) }, 4552 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) }, 4553 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) }, 4554 + {} 4555 + }; 4556 + 4557 + 4558 + static int __init have_bt_fwbug(void) 4559 + { 4560 + /* 4561 + * Some AMD based ThinkPads have a firmware bug that calling 4562 + * "GBDC" will cause bluetooth on Intel wireless cards blocked 4563 + */ 4564 + if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) { 4565 + vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL, 4566 + FW_BUG "disable bluetooth subdriver for Intel cards\n"); 4567 + return 1; 4568 + } else 4569 + return 0; 4570 + } 4571 + 4504 4572 static int __init bluetooth_init(struct ibm_init_struct *iibm) 4505 4573 { 4506 4574 int res; ··· 4581 4513 4582 4514 /* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, 4583 4515 G4x, R30, R31, R40e, R50e, T20-22, X20-21 */ 4584 - tp_features.bluetooth = hkey_handle && 4516 + tp_features.bluetooth = !have_bt_fwbug() && hkey_handle && 4585 4517 acpi_evalf(hkey_handle, &status, "GBDC", "qd"); 4586 4518 4587 4519 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL, ··· 5876 5808 return -EPERM; 5877 5809 if (!acpi_evalf(led_handle, NULL, NULL, "vdd", 5878 5810 (1 << led), led_sled_arg1[ledstatus])) 5879 - rc = -EIO; 5811 + return -EIO; 5880 5812 break; 5881 5813 case TPACPI_LED_OLD: 5882 5814 /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */ ··· 5900 5832 return -EPERM; 5901 5833 if (!acpi_evalf(led_handle, NULL, NULL, "vdd", 5902 5834 led, led_led_arg1[ledstatus])) 5903 - rc = -EIO; 5835 + return -EIO; 5904 5836 break; 5905 5837 default: 5906 - rc = -ENXIO; 5838 + return -ENXIO; 5907 5839 } 5908 5840 5909 5841 if (!rc) ··· 6317 6249 t = TP_EC_THERMAL_TMP8; 6318 6250 idx -= 8; 6319 6251 } 6320 - /* fallthrough */ 6321 6252 #endif 6253 + /* fallthrough */ 6322 6254 case TPACPI_THERMAL_TPEC_8: 6323 6255 if (idx <= 7) { 6324 6256 if (!acpi_ec_read(t + idx, &tmp)) ··· 9958 9890 return '\0'; 9959 9891 } 9960 9892 9893 + static void find_new_ec_fwstr(const struct dmi_header *dm, void *private) 9894 + { 9895 + char *ec_fw_string = (char *) private; 9896 + const char *dmi_data = (const char *)dm; 9897 + /* 9898 + * ThinkPad Embedded Controller Program Table on newer models 9899 + * 9900 + * Offset | Name | Width | Description 9901 + * ---------------------------------------------------- 9902 + * 0x00 | Type | BYTE | 0x8C 9903 + * 0x01 | Length | BYTE | 9904 + * 0x02 | Handle | WORD | Varies 9905 + * 0x04 | Signature | BYTEx6 | ASCII for "LENOVO" 9906 + * 0x0A | OEM struct offset | BYTE | 0x0B 9907 + * 0x0B | OEM struct number | BYTE | 0x07, for this structure 9908 + * 0x0C | OEM struct revision | BYTE | 0x01, for this format 9909 + * 0x0D | ECP version ID | STR ID | 9910 + * 0x0E | ECP release date | STR ID | 9911 + */ 9912 + 9913 + /* Return if data structure not match */ 9914 + if (dm->type != 140 || dm->length < 0x0F || 9915 + memcmp(dmi_data + 4, "LENOVO", 6) != 0 || 9916 + dmi_data[0x0A] != 0x0B || dmi_data[0x0B] != 0x07 || 9917 + dmi_data[0x0C] != 0x01) 9918 + return; 9919 + 9920 + /* fwstr is the first 8byte string */ 9921 + strncpy(ec_fw_string, dmi_data + 0x0F, 8); 9922 + } 9923 + 9961 9924 /* returns 0 - probe ok, or < 0 - probe error. 9962 9925 * Probe ok doesn't mean thinkpad found. 9963 9926 * On error, kfree() cleanup on tp->* is not performed, caller must do it */ ··· 9996 9897 struct thinkpad_id_data *tp) 9997 9898 { 9998 9899 const struct dmi_device *dev = NULL; 9999 - char ec_fw_string[18]; 9900 + char ec_fw_string[18] = {0}; 10000 9901 char const *s; 10001 9902 char t; 10002 9903 ··· 10036 9937 ec_fw_string) == 1) { 10037 9938 ec_fw_string[sizeof(ec_fw_string) - 1] = 0; 10038 9939 ec_fw_string[strcspn(ec_fw_string, " ]")] = 0; 10039 - 10040 - tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); 10041 - if (!tp->ec_version_str) 10042 - return -ENOMEM; 10043 - 10044 - t = tpacpi_parse_fw_id(ec_fw_string, 10045 - &tp->ec_model, &tp->ec_release); 10046 - if (t != 'H') { 10047 - pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n", 10048 - ec_fw_string); 10049 - pr_notice("please report this to %s\n", 10050 - TPACPI_MAIL); 10051 - } 10052 9940 break; 9941 + } 9942 + } 9943 + 9944 + /* Newer ThinkPads have different EC program info table */ 9945 + if (!ec_fw_string[0]) 9946 + dmi_walk(find_new_ec_fwstr, &ec_fw_string); 9947 + 9948 + if (ec_fw_string[0]) { 9949 + tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); 9950 + if (!tp->ec_version_str) 9951 + return -ENOMEM; 9952 + 9953 + t = tpacpi_parse_fw_id(ec_fw_string, 9954 + &tp->ec_model, &tp->ec_release); 9955 + if (t != 'H') { 9956 + pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n", 9957 + ec_fw_string); 9958 + pr_notice("please report this to %s\n", TPACPI_MAIL); 10053 9959 } 10054 9960 } 10055 9961 ··· 10269 10165 10270 10166 module_param_named(volume_capabilities, volume_capabilities, uint, 0444); 10271 10167 MODULE_PARM_DESC(volume_capabilities, 10272 - "Selects the mixer capabilites: 0=auto, 1=volume and mute, 2=mute only"); 10168 + "Selects the mixer capabilities: 0=auto, 1=volume and mute, 2=mute only"); 10273 10169 10274 10170 module_param_named(volume_control, volume_control_allowed, bool, 0444); 10275 10171 MODULE_PARM_DESC(volume_control,
+51
drivers/platform/x86/touchscreen_dmi.c
··· 249 249 .properties = jumper_ezpad_6_pro_props, 250 250 }; 251 251 252 + static const struct property_entry jumper_ezpad_6_pro_b_props[] = { 253 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), 254 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), 255 + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"), 256 + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), 257 + PROPERTY_ENTRY_U32("silead,max-fingers", 10), 258 + PROPERTY_ENTRY_BOOL("silead,home-button"), 259 + { } 260 + }; 261 + 262 + static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = { 263 + .acpi_name = "MSSL1680:00", 264 + .properties = jumper_ezpad_6_pro_b_props, 265 + }; 266 + 252 267 static const struct property_entry jumper_ezpad_mini3_props[] = { 253 268 PROPERTY_ENTRY_U32("touchscreen-min-x", 23), 254 269 PROPERTY_ENTRY_U32("touchscreen-min-y", 16), ··· 278 263 static const struct ts_dmi_data jumper_ezpad_mini3_data = { 279 264 .acpi_name = "MSSL1680:00", 280 265 .properties = jumper_ezpad_mini3_props, 266 + }; 267 + 268 + static const struct property_entry myria_my8307_props[] = { 269 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1720), 270 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), 271 + PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), 272 + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), 273 + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), 274 + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"), 275 + PROPERTY_ENTRY_U32("silead,max-fingers", 10), 276 + PROPERTY_ENTRY_BOOL("silead,home-button"), 277 + { } 278 + }; 279 + 280 + static const struct ts_dmi_data myria_my8307_data = { 281 + .acpi_name = "MSSL1680:00", 282 + .properties = myria_my8307_props, 281 283 }; 282 284 283 285 static const struct property_entry onda_obook_20_plus_props[] = { ··· 706 674 }, 707 675 }, 708 676 { 677 + /* Jumper EZpad 6 Pro B */ 678 + .driver_data = (void *)&jumper_ezpad_6_pro_b_data, 679 + .matches = { 680 + DMI_MATCH(DMI_SYS_VENDOR, "Jumper"), 681 + DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"), 682 + DMI_MATCH(DMI_BIOS_VERSION, "5.12"), 683 + /* Above matches are too generic, add bios-date match */ 684 + DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"), 685 + }, 686 + }, 687 + { 709 688 /* Jumper EZpad mini3 */ 710 689 .driver_data = (void *)&jumper_ezpad_mini3_data, 711 690 .matches = { ··· 731 688 .matches = { 732 689 DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"), 733 690 DMI_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"), 691 + }, 692 + }, 693 + { 694 + /* Myria MY8307 */ 695 + .driver_data = (void *)&myria_my8307_data, 696 + .matches = { 697 + DMI_MATCH(DMI_SYS_VENDOR, "Complet Electro Serv"), 698 + DMI_MATCH(DMI_PRODUCT_NAME, "MY8307"), 734 699 }, 735 700 }, 736 701 {
+1
include/linux/platform_data/x86/asus-wmi.h
··· 67 67 /* Input */ 68 68 #define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 69 69 #define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 70 + #define ASUS_WMI_DEVID_FNLOCK 0x00100023 70 71 71 72 /* Fan, Thermal */ 72 73 #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011