Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <asm/page.h>
4#include <linux/acpi.h>
5#include <linux/bitmap.h>
6#include <linux/dma-mapping.h>
7#include <linux/idr.h>
8#include <linux/io.h>
9#include <linux/irqreturn.h>
10#include <linux/log2.h>
11#include <linux/pm_runtime.h>
12#include <linux/seq_file.h>
13#include <linux/slab.h>
14#include <linux/uacce.h>
15#include <linux/uaccess.h>
16#include <uapi/misc/uacce/hisi_qm.h>
17#include <linux/hisi_acc_qm.h>
18#include "qm_common.h"
19
20/* eq/aeq irq enable */
21#define QM_VF_AEQ_INT_SOURCE 0x0
22#define QM_VF_AEQ_INT_MASK 0x4
23#define QM_VF_EQ_INT_SOURCE 0x8
24#define QM_VF_EQ_INT_MASK 0xc
25
26#define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
27#define QM_IRQ_TYPE_MASK GENMASK(15, 0)
28#define QM_IRQ_TYPE_SHIFT 16
29#define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
30
31/* mailbox */
32#define QM_MB_PING_ALL_VFS 0xffff
33#define QM_MB_CMD_DATA_SHIFT 32
34#define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
35#define QM_MB_STATUS_MASK GENMASK(12, 9)
36
37/* sqc shift */
38#define QM_SQ_HOP_NUM_SHIFT 0
39#define QM_SQ_PAGE_SIZE_SHIFT 4
40#define QM_SQ_BUF_SIZE_SHIFT 8
41#define QM_SQ_SQE_SIZE_SHIFT 12
42#define QM_SQ_PRIORITY_SHIFT 0
43#define QM_SQ_ORDERS_SHIFT 4
44#define QM_SQ_TYPE_SHIFT 8
45#define QM_QC_PASID_ENABLE 0x1
46#define QM_QC_PASID_ENABLE_SHIFT 7
47
48#define QM_SQ_TYPE_MASK GENMASK(3, 0)
49#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
50
51/* cqc shift */
52#define QM_CQ_HOP_NUM_SHIFT 0
53#define QM_CQ_PAGE_SIZE_SHIFT 4
54#define QM_CQ_BUF_SIZE_SHIFT 8
55#define QM_CQ_CQE_SIZE_SHIFT 12
56#define QM_CQ_PHASE_SHIFT 0
57#define QM_CQ_FLAG_SHIFT 1
58
59#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
60#define QM_QC_CQE_SIZE 4
61#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
62
63/* eqc shift */
64#define QM_EQE_AEQE_SIZE (2UL << 12)
65#define QM_EQC_PHASE_SHIFT 16
66
67#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
68#define QM_EQE_CQN_MASK GENMASK(15, 0)
69
70#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
71#define QM_AEQE_TYPE_SHIFT 17
72#define QM_AEQE_CQN_MASK GENMASK(15, 0)
73#define QM_CQ_OVERFLOW 0
74#define QM_EQ_OVERFLOW 1
75#define QM_CQE_ERROR 2
76
77#define QM_XQ_DEPTH_SHIFT 16
78#define QM_XQ_DEPTH_MASK GENMASK(15, 0)
79
80#define QM_DOORBELL_CMD_SQ 0
81#define QM_DOORBELL_CMD_CQ 1
82#define QM_DOORBELL_CMD_EQ 2
83#define QM_DOORBELL_CMD_AEQ 3
84
85#define QM_DOORBELL_BASE_V1 0x340
86#define QM_DB_CMD_SHIFT_V1 16
87#define QM_DB_INDEX_SHIFT_V1 32
88#define QM_DB_PRIORITY_SHIFT_V1 48
89#define QM_PAGE_SIZE 0x0034
90#define QM_QP_DB_INTERVAL 0x10000
91
92#define QM_MEM_START_INIT 0x100040
93#define QM_MEM_INIT_DONE 0x100044
94#define QM_VFT_CFG_RDY 0x10006c
95#define QM_VFT_CFG_OP_WR 0x100058
96#define QM_VFT_CFG_TYPE 0x10005c
97#define QM_VFT_CFG 0x100060
98#define QM_VFT_CFG_OP_ENABLE 0x100054
99#define QM_PM_CTRL 0x100148
100#define QM_IDLE_DISABLE BIT(9)
101
102#define QM_VFT_CFG_DATA_L 0x100064
103#define QM_VFT_CFG_DATA_H 0x100068
104#define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
105#define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
106#define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
107#define QM_SQC_VFT_START_SQN_SHIFT 28
108#define QM_SQC_VFT_VALID (1ULL << 44)
109#define QM_SQC_VFT_SQN_SHIFT 45
110#define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
111#define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
112#define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
113#define QM_CQC_VFT_VALID (1ULL << 28)
114
115#define QM_SQC_VFT_BASE_SHIFT_V2 28
116#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
117#define QM_SQC_VFT_NUM_SHIFT_V2 45
118#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
119
120#define QM_ABNORMAL_INT_SOURCE 0x100000
121#define QM_ABNORMAL_INT_MASK 0x100004
122#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
123#define QM_ABNORMAL_INT_STATUS 0x100008
124#define QM_ABNORMAL_INT_SET 0x10000c
125#define QM_ABNORMAL_INF00 0x100010
126#define QM_FIFO_OVERFLOW_TYPE 0xc0
127#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
128#define QM_FIFO_OVERFLOW_VF 0x3f
129#define QM_ABNORMAL_INF01 0x100014
130#define QM_DB_TIMEOUT_TYPE 0xc0
131#define QM_DB_TIMEOUT_TYPE_SHIFT 6
132#define QM_DB_TIMEOUT_VF 0x3f
133#define QM_RAS_CE_ENABLE 0x1000ec
134#define QM_RAS_FE_ENABLE 0x1000f0
135#define QM_RAS_NFE_ENABLE 0x1000f4
136#define QM_RAS_CE_THRESHOLD 0x1000f8
137#define QM_RAS_CE_TIMES_PER_IRQ 1
138#define QM_OOO_SHUTDOWN_SEL 0x1040f8
139#define QM_ECC_MBIT BIT(2)
140#define QM_DB_TIMEOUT BIT(10)
141#define QM_OF_FIFO_OF BIT(11)
142
143#define QM_RESET_WAIT_TIMEOUT 400
144#define QM_PEH_VENDOR_ID 0x1000d8
145#define ACC_VENDOR_ID_VALUE 0x5a5a
146#define QM_PEH_DFX_INFO0 0x1000fc
147#define QM_PEH_DFX_INFO1 0x100100
148#define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
149#define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
150#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
151#define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
152#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
153#define ACC_MASTER_TRANS_RETURN_RW 3
154#define ACC_MASTER_TRANS_RETURN 0x300150
155#define ACC_MASTER_GLOBAL_CTRL 0x300000
156#define ACC_AM_CFG_PORT_WR_EN 0x30001c
157#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
158#define ACC_AM_ROB_ECC_INT_STS 0x300104
159#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
160#define QM_MSI_CAP_ENABLE BIT(16)
161
162/* interfunction communication */
163#define QM_IFC_READY_STATUS 0x100128
164#define QM_IFC_INT_SET_P 0x100130
165#define QM_IFC_INT_CFG 0x100134
166#define QM_IFC_INT_SOURCE_P 0x100138
167#define QM_IFC_INT_SOURCE_V 0x0020
168#define QM_IFC_INT_MASK 0x0024
169#define QM_IFC_INT_STATUS 0x0028
170#define QM_IFC_INT_SET_V 0x002C
171#define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
172#define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
173#define QM_IFC_INT_SOURCE_MASK BIT(0)
174#define QM_IFC_INT_DISABLE BIT(0)
175#define QM_IFC_INT_STATUS_MASK BIT(0)
176#define QM_IFC_INT_SET_MASK BIT(0)
177#define QM_WAIT_DST_ACK 10
178#define QM_MAX_PF_WAIT_COUNT 10
179#define QM_MAX_VF_WAIT_COUNT 40
180#define QM_VF_RESET_WAIT_US 20000
181#define QM_VF_RESET_WAIT_CNT 3000
182#define QM_VF_RESET_WAIT_TIMEOUT_US \
183 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
184
185#define POLL_PERIOD 10
186#define POLL_TIMEOUT 1000
187#define WAIT_PERIOD_US_MAX 200
188#define WAIT_PERIOD_US_MIN 100
189#define MAX_WAIT_COUNTS 1000
190#define QM_CACHE_WB_START 0x204
191#define QM_CACHE_WB_DONE 0x208
192#define QM_FUNC_CAPS_REG 0x3100
193#define QM_CAPBILITY_VERSION GENMASK(7, 0)
194
195#define PCI_BAR_2 2
196#define PCI_BAR_4 4
197#define QMC_ALIGN(sz) ALIGN(sz, 32)
198
199#define QM_DBG_READ_LEN 256
200#define QM_PCI_COMMAND_INVALID ~0
201#define QM_RESET_STOP_TX_OFFSET 1
202#define QM_RESET_STOP_RX_OFFSET 2
203
204#define WAIT_PERIOD 20
205#define REMOVE_WAIT_DELAY 10
206
207#define QM_DRIVER_REMOVING 0
208#define QM_RST_SCHED 1
209#define QM_QOS_PARAM_NUM 2
210#define QM_QOS_MAX_VAL 1000
211#define QM_QOS_RATE 100
212#define QM_QOS_EXPAND_RATE 1000
213#define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
214#define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
215#define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
216#define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
217#define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
218#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
219#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
220#define QM_SHAPER_CBS_B 1
221#define QM_SHAPER_VFT_OFFSET 6
222#define QM_QOS_MIN_ERROR_RATE 5
223#define QM_SHAPER_MIN_CBS_S 8
224#define QM_QOS_TICK 0x300U
225#define QM_QOS_DIVISOR_CLK 0x1f40U
226#define QM_QOS_MAX_CIR_B 200
227#define QM_QOS_MIN_CIR_B 100
228#define QM_QOS_MAX_CIR_U 6
229#define QM_AUTOSUSPEND_DELAY 3000
230
231#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
232 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
233 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
234 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
235 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
236
237#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
238 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
239
240#define QM_MK_SQC_W13(priority, orders, alg_type) \
241 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
242 ((orders) << QM_SQ_ORDERS_SHIFT) | \
243 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
244
245#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
246 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
247 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
248 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
249 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
250
251#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
252 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
253
254#define INIT_QC_COMMON(qc, base, pasid) do { \
255 (qc)->head = 0; \
256 (qc)->tail = 0; \
257 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
258 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
259 (qc)->dw3 = 0; \
260 (qc)->w8 = 0; \
261 (qc)->rsvd0 = 0; \
262 (qc)->pasid = cpu_to_le16(pasid); \
263 (qc)->w11 = 0; \
264 (qc)->rsvd1 = 0; \
265} while (0)
266
267enum vft_type {
268 SQC_VFT = 0,
269 CQC_VFT,
270 SHAPER_VFT,
271};
272
273enum acc_err_result {
274 ACC_ERR_NONE,
275 ACC_ERR_NEED_RESET,
276 ACC_ERR_RECOVERED,
277};
278
279enum qm_alg_type {
280 ALG_TYPE_0,
281 ALG_TYPE_1,
282};
283
284enum qm_mb_cmd {
285 QM_PF_FLR_PREPARE = 0x01,
286 QM_PF_SRST_PREPARE,
287 QM_PF_RESET_DONE,
288 QM_VF_PREPARE_DONE,
289 QM_VF_PREPARE_FAIL,
290 QM_VF_START_DONE,
291 QM_VF_START_FAIL,
292 QM_PF_SET_QOS,
293 QM_VF_GET_QOS,
294};
295
296enum qm_basic_type {
297 QM_TOTAL_QP_NUM_CAP = 0x0,
298 QM_FUNC_MAX_QP_CAP,
299 QM_XEQ_DEPTH_CAP,
300 QM_QP_DEPTH_CAP,
301 QM_EQ_IRQ_TYPE_CAP,
302 QM_AEQ_IRQ_TYPE_CAP,
303 QM_ABN_IRQ_TYPE_CAP,
304 QM_PF2VF_IRQ_TYPE_CAP,
305 QM_PF_IRQ_NUM_CAP,
306 QM_VF_IRQ_NUM_CAP,
307};
308
309static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
310 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
311 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
312 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
313 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
314 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
315};
316
317static const struct hisi_qm_cap_info qm_cap_info_pf[] = {
318 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
319};
320
321static const struct hisi_qm_cap_info qm_cap_info_vf[] = {
322 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
323};
324
325static const struct hisi_qm_cap_info qm_basic_info[] = {
326 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
327 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
328 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800},
329 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
330 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
331 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
332 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
333 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
334 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
335 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
336};
337
338struct qm_mailbox {
339 __le16 w0;
340 __le16 queue_num;
341 __le32 base_l;
342 __le32 base_h;
343 __le32 rsvd;
344};
345
346struct qm_doorbell {
347 __le16 queue_num;
348 __le16 cmd;
349 __le16 index;
350 __le16 priority;
351};
352
353struct hisi_qm_resource {
354 struct hisi_qm *qm;
355 int distance;
356 struct list_head list;
357};
358
359/**
360 * struct qm_hw_err - Structure describing the device errors
361 * @list: hardware error list
362 * @timestamp: timestamp when the error occurred
363 */
364struct qm_hw_err {
365 struct list_head list;
366 unsigned long long timestamp;
367};
368
369struct hisi_qm_hw_ops {
370 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
371 void (*qm_db)(struct hisi_qm *qm, u16 qn,
372 u8 cmd, u16 index, u8 priority);
373 int (*debug_init)(struct hisi_qm *qm);
374 void (*hw_error_init)(struct hisi_qm *qm);
375 void (*hw_error_uninit)(struct hisi_qm *qm);
376 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
377 int (*set_msi)(struct hisi_qm *qm, bool set);
378};
379
380struct hisi_qm_hw_error {
381 u32 int_msk;
382 const char *msg;
383};
384
385static const struct hisi_qm_hw_error qm_hw_error[] = {
386 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
387 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
388 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
389 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
390 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
391 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
392 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
393 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
394 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
395 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
396 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
397 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
398 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
399 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
400 { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
401 { /* sentinel */ }
402};
403
404static const char * const qm_db_timeout[] = {
405 "sq", "cq", "eq", "aeq",
406};
407
408static const char * const qm_fifo_overflow[] = {
409 "cq", "eq", "aeq",
410};
411
412static const char * const qp_s[] = {
413 "none", "init", "start", "stop", "close",
414};
415
416struct qm_typical_qos_table {
417 u32 start;
418 u32 end;
419 u32 val;
420};
421
422/* the qos step is 100 */
423static struct qm_typical_qos_table shaper_cir_s[] = {
424 {100, 100, 4},
425 {200, 200, 3},
426 {300, 500, 2},
427 {600, 1000, 1},
428 {1100, 100000, 0},
429};
430
431static struct qm_typical_qos_table shaper_cbs_s[] = {
432 {100, 200, 9},
433 {300, 500, 11},
434 {600, 1000, 12},
435 {1100, 10000, 16},
436 {10100, 25000, 17},
437 {25100, 50000, 18},
438 {50100, 100000, 19}
439};
440
441static void qm_irqs_unregister(struct hisi_qm *qm);
442
443static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
444{
445 enum qm_state curr = atomic_read(&qm->status.flags);
446 bool avail = false;
447
448 switch (curr) {
449 case QM_INIT:
450 if (new == QM_START || new == QM_CLOSE)
451 avail = true;
452 break;
453 case QM_START:
454 if (new == QM_STOP)
455 avail = true;
456 break;
457 case QM_STOP:
458 if (new == QM_CLOSE || new == QM_START)
459 avail = true;
460 break;
461 default:
462 break;
463 }
464
465 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
466 qm_s[curr], qm_s[new]);
467
468 if (!avail)
469 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
470 qm_s[curr], qm_s[new]);
471
472 return avail;
473}
474
475static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
476 enum qp_state new)
477{
478 enum qm_state qm_curr = atomic_read(&qm->status.flags);
479 enum qp_state qp_curr = 0;
480 bool avail = false;
481
482 if (qp)
483 qp_curr = atomic_read(&qp->qp_status.flags);
484
485 switch (new) {
486 case QP_INIT:
487 if (qm_curr == QM_START || qm_curr == QM_INIT)
488 avail = true;
489 break;
490 case QP_START:
491 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
492 (qm_curr == QM_START && qp_curr == QP_STOP))
493 avail = true;
494 break;
495 case QP_STOP:
496 if ((qm_curr == QM_START && qp_curr == QP_START) ||
497 (qp_curr == QP_INIT))
498 avail = true;
499 break;
500 case QP_CLOSE:
501 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
502 (qm_curr == QM_START && qp_curr == QP_STOP) ||
503 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
504 (qm_curr == QM_STOP && qp_curr == QP_INIT))
505 avail = true;
506 break;
507 default:
508 break;
509 }
510
511 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
512 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
513
514 if (!avail)
515 dev_warn(&qm->pdev->dev,
516 "Can not change qp state from %s to %s in QM %s\n",
517 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
518
519 return avail;
520}
521
522static u32 qm_get_hw_error_status(struct hisi_qm *qm)
523{
524 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
525}
526
527static u32 qm_get_dev_err_status(struct hisi_qm *qm)
528{
529 return qm->err_ini->get_dev_hw_err_status(qm);
530}
531
532/* Check if the error causes the master ooo block */
533static bool qm_check_dev_error(struct hisi_qm *qm)
534{
535 u32 val, dev_val;
536
537 if (qm->fun_type == QM_HW_VF)
538 return false;
539
540 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask;
541 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask;
542
543 return val || dev_val;
544}
545
546static int qm_wait_reset_finish(struct hisi_qm *qm)
547{
548 int delay = 0;
549
550 /* All reset requests need to be queued for processing */
551 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
552 msleep(++delay);
553 if (delay > QM_RESET_WAIT_TIMEOUT)
554 return -EBUSY;
555 }
556
557 return 0;
558}
559
560static int qm_reset_prepare_ready(struct hisi_qm *qm)
561{
562 struct pci_dev *pdev = qm->pdev;
563 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
564
565 /*
566 * PF and VF on host doesnot support resetting at the
567 * same time on Kunpeng920.
568 */
569 if (qm->ver < QM_HW_V3)
570 return qm_wait_reset_finish(pf_qm);
571
572 return qm_wait_reset_finish(qm);
573}
574
575static void qm_reset_bit_clear(struct hisi_qm *qm)
576{
577 struct pci_dev *pdev = qm->pdev;
578 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
579
580 if (qm->ver < QM_HW_V3)
581 clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
582
583 clear_bit(QM_RESETTING, &qm->misc_ctl);
584}
585
586static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
587 u64 base, u16 queue, bool op)
588{
589 mailbox->w0 = cpu_to_le16((cmd) |
590 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
591 (0x1 << QM_MB_BUSY_SHIFT));
592 mailbox->queue_num = cpu_to_le16(queue);
593 mailbox->base_l = cpu_to_le32(lower_32_bits(base));
594 mailbox->base_h = cpu_to_le32(upper_32_bits(base));
595 mailbox->rsvd = 0;
596}
597
598/* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
599int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
600{
601 u32 val;
602
603 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
604 val, !((val >> QM_MB_BUSY_SHIFT) &
605 0x1), POLL_PERIOD, POLL_TIMEOUT);
606}
607EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
608
609/* 128 bit should be written to hardware at one time to trigger a mailbox */
610static void qm_mb_write(struct hisi_qm *qm, const void *src)
611{
612 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
613 unsigned long tmp0 = 0, tmp1 = 0;
614
615 if (!IS_ENABLED(CONFIG_ARM64)) {
616 memcpy_toio(fun_base, src, 16);
617 dma_wmb();
618 return;
619 }
620
621 asm volatile("ldp %0, %1, %3\n"
622 "stp %0, %1, %2\n"
623 "dmb oshst\n"
624 : "=&r" (tmp0),
625 "=&r" (tmp1),
626 "+Q" (*((char __iomem *)fun_base))
627 : "Q" (*((char *)src))
628 : "memory");
629}
630
631static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
632{
633 int ret;
634 u32 val;
635
636 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
637 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
638 ret = -EBUSY;
639 goto mb_busy;
640 }
641
642 qm_mb_write(qm, mailbox);
643
644 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
645 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
646 ret = -ETIMEDOUT;
647 goto mb_busy;
648 }
649
650 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
651 if (val & QM_MB_STATUS_MASK) {
652 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
653 ret = -EIO;
654 goto mb_busy;
655 }
656
657 return 0;
658
659mb_busy:
660 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
661 return ret;
662}
663
664int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
665 bool op)
666{
667 struct qm_mailbox mailbox;
668 int ret;
669
670 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
671 queue, cmd, (unsigned long long)dma_addr);
672
673 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
674
675 mutex_lock(&qm->mailbox_lock);
676 ret = qm_mb_nolock(qm, &mailbox);
677 mutex_unlock(&qm->mailbox_lock);
678
679 return ret;
680}
681EXPORT_SYMBOL_GPL(hisi_qm_mb);
682
683static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
684{
685 u64 doorbell;
686
687 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
688 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
689 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
690
691 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
692}
693
694static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
695{
696 void __iomem *io_base = qm->io_base;
697 u16 randata = 0;
698 u64 doorbell;
699
700 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
701 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
702 QM_DOORBELL_SQ_CQ_BASE_V2;
703 else
704 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
705
706 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
707 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
708 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
709 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
710
711 writeq(doorbell, io_base);
712}
713
714static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
715{
716 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
717 qn, cmd, index);
718
719 qm->ops->qm_db(qm, qn, cmd, index, priority);
720}
721
722static void qm_disable_clock_gate(struct hisi_qm *qm)
723{
724 u32 val;
725
726 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
727 if (qm->ver < QM_HW_V3)
728 return;
729
730 val = readl(qm->io_base + QM_PM_CTRL);
731 val |= QM_IDLE_DISABLE;
732 writel(val, qm->io_base + QM_PM_CTRL);
733}
734
735static int qm_dev_mem_reset(struct hisi_qm *qm)
736{
737 u32 val;
738
739 writel(0x1, qm->io_base + QM_MEM_START_INIT);
740 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
741 val & BIT(0), POLL_PERIOD,
742 POLL_TIMEOUT);
743}
744
745/**
746 * hisi_qm_get_hw_info() - Get device information.
747 * @qm: The qm which want to get information.
748 * @info_table: Array for storing device information.
749 * @index: Index in info_table.
750 * @is_read: Whether read from reg, 0: not support read from reg.
751 *
752 * This function returns device information the caller needs.
753 */
754u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
755 const struct hisi_qm_cap_info *info_table,
756 u32 index, bool is_read)
757{
758 u32 val;
759
760 switch (qm->ver) {
761 case QM_HW_V1:
762 return info_table[index].v1_val;
763 case QM_HW_V2:
764 return info_table[index].v2_val;
765 default:
766 if (!is_read)
767 return info_table[index].v3_val;
768
769 val = readl(qm->io_base + info_table[index].offset);
770 return (val >> info_table[index].shift) & info_table[index].mask;
771 }
772}
773EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info);
774
775static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
776 u16 *high_bits, enum qm_basic_type type)
777{
778 u32 depth;
779
780 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
781 *low_bits = depth & QM_XQ_DEPTH_MASK;
782 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK;
783}
784
785static u32 qm_get_irq_num(struct hisi_qm *qm)
786{
787 if (qm->fun_type == QM_HW_PF)
788 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
789
790 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
791}
792
793static int qm_pm_get_sync(struct hisi_qm *qm)
794{
795 struct device *dev = &qm->pdev->dev;
796 int ret;
797
798 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
799 return 0;
800
801 ret = pm_runtime_resume_and_get(dev);
802 if (ret < 0) {
803 dev_err(dev, "failed to get_sync(%d).\n", ret);
804 return ret;
805 }
806
807 return 0;
808}
809
810static void qm_pm_put_sync(struct hisi_qm *qm)
811{
812 struct device *dev = &qm->pdev->dev;
813
814 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
815 return;
816
817 pm_runtime_mark_last_busy(dev);
818 pm_runtime_put_autosuspend(dev);
819}
820
821static void qm_cq_head_update(struct hisi_qp *qp)
822{
823 if (qp->qp_status.cq_head == qp->cq_depth - 1) {
824 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
825 qp->qp_status.cq_head = 0;
826 } else {
827 qp->qp_status.cq_head++;
828 }
829}
830
831static void qm_poll_req_cb(struct hisi_qp *qp)
832{
833 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
834 struct hisi_qm *qm = qp->qm;
835
836 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
837 dma_rmb();
838 qp->req_cb(qp, qp->sqe + qm->sqe_size *
839 le16_to_cpu(cqe->sq_head));
840 qm_cq_head_update(qp);
841 cqe = qp->cqe + qp->qp_status.cq_head;
842 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
843 qp->qp_status.cq_head, 0);
844 atomic_dec(&qp->qp_status.used);
845 }
846
847 /* set c_flag */
848 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
849}
850
851static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
852{
853 struct hisi_qm *qm = poll_data->qm;
854 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
855 u16 eq_depth = qm->eq_depth;
856 int eqe_num = 0;
857 u16 cqn;
858
859 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
860 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
861 poll_data->qp_finish_id[eqe_num] = cqn;
862 eqe_num++;
863
864 if (qm->status.eq_head == eq_depth - 1) {
865 qm->status.eqc_phase = !qm->status.eqc_phase;
866 eqe = qm->eqe;
867 qm->status.eq_head = 0;
868 } else {
869 eqe++;
870 qm->status.eq_head++;
871 }
872
873 if (eqe_num == (eq_depth >> 1) - 1)
874 break;
875 }
876
877 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
878
879 return eqe_num;
880}
881
882static void qm_work_process(struct work_struct *work)
883{
884 struct hisi_qm_poll_data *poll_data =
885 container_of(work, struct hisi_qm_poll_data, work);
886 struct hisi_qm *qm = poll_data->qm;
887 struct hisi_qp *qp;
888 int eqe_num, i;
889
890 /* Get qp id of completed tasks and re-enable the interrupt. */
891 eqe_num = qm_get_complete_eqe_num(poll_data);
892 for (i = eqe_num - 1; i >= 0; i--) {
893 qp = &qm->qp_array[poll_data->qp_finish_id[i]];
894 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
895 continue;
896
897 if (qp->event_cb) {
898 qp->event_cb(qp);
899 continue;
900 }
901
902 if (likely(qp->req_cb))
903 qm_poll_req_cb(qp);
904 }
905}
906
907static bool do_qm_eq_irq(struct hisi_qm *qm)
908{
909 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
910 struct hisi_qm_poll_data *poll_data;
911 u16 cqn;
912
913 if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
914 return false;
915
916 if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
917 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
918 poll_data = &qm->poll_data[cqn];
919 queue_work(qm->wq, &poll_data->work);
920
921 return true;
922 }
923
924 return false;
925}
926
927static irqreturn_t qm_eq_irq(int irq, void *data)
928{
929 struct hisi_qm *qm = data;
930 bool ret;
931
932 ret = do_qm_eq_irq(qm);
933 if (ret)
934 return IRQ_HANDLED;
935
936 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
937 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
938
939 return IRQ_NONE;
940}
941
942static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
943{
944 struct hisi_qm *qm = data;
945 u32 val;
946
947 val = readl(qm->io_base + QM_IFC_INT_STATUS);
948 val &= QM_IFC_INT_STATUS_MASK;
949 if (!val)
950 return IRQ_NONE;
951
952 schedule_work(&qm->cmd_process);
953
954 return IRQ_HANDLED;
955}
956
957static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
958{
959 u32 *addr;
960
961 if (qp->is_in_kernel)
962 return;
963
964 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
965 *addr = 1;
966
967 /* make sure setup is completed */
968 smp_wmb();
969}
970
971static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
972{
973 struct hisi_qp *qp = &qm->qp_array[qp_id];
974
975 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
976 hisi_qm_stop_qp(qp);
977 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
978}
979
980static void qm_reset_function(struct hisi_qm *qm)
981{
982 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
983 struct device *dev = &qm->pdev->dev;
984 int ret;
985
986 if (qm_check_dev_error(pf_qm))
987 return;
988
989 ret = qm_reset_prepare_ready(qm);
990 if (ret) {
991 dev_err(dev, "reset function not ready\n");
992 return;
993 }
994
995 ret = hisi_qm_stop(qm, QM_FLR);
996 if (ret) {
997 dev_err(dev, "failed to stop qm when reset function\n");
998 goto clear_bit;
999 }
1000
1001 ret = hisi_qm_start(qm);
1002 if (ret)
1003 dev_err(dev, "failed to start qm when reset function\n");
1004
1005clear_bit:
1006 qm_reset_bit_clear(qm);
1007}
1008
1009static irqreturn_t qm_aeq_thread(int irq, void *data)
1010{
1011 struct hisi_qm *qm = data;
1012 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1013 u16 aeq_depth = qm->aeq_depth;
1014 u32 type, qp_id;
1015
1016 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1017 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
1018 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
1019
1020 switch (type) {
1021 case QM_EQ_OVERFLOW:
1022 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1023 qm_reset_function(qm);
1024 return IRQ_HANDLED;
1025 case QM_CQ_OVERFLOW:
1026 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1027 qp_id);
1028 fallthrough;
1029 case QM_CQE_ERROR:
1030 qm_disable_qp(qm, qp_id);
1031 break;
1032 default:
1033 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1034 type);
1035 break;
1036 }
1037
1038 if (qm->status.aeq_head == aeq_depth - 1) {
1039 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1040 aeqe = qm->aeqe;
1041 qm->status.aeq_head = 0;
1042 } else {
1043 aeqe++;
1044 qm->status.aeq_head++;
1045 }
1046 }
1047
1048 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1049
1050 return IRQ_HANDLED;
1051}
1052
1053static irqreturn_t qm_aeq_irq(int irq, void *data)
1054{
1055 struct hisi_qm *qm = data;
1056
1057 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1058 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
1059 return IRQ_NONE;
1060
1061 return IRQ_WAKE_THREAD;
1062}
1063
1064static void qm_init_qp_status(struct hisi_qp *qp)
1065{
1066 struct hisi_qp_status *qp_status = &qp->qp_status;
1067
1068 qp_status->sq_tail = 0;
1069 qp_status->cq_head = 0;
1070 qp_status->cqc_phase = true;
1071 atomic_set(&qp_status->used, 0);
1072}
1073
1074static void qm_init_prefetch(struct hisi_qm *qm)
1075{
1076 struct device *dev = &qm->pdev->dev;
1077 u32 page_type = 0x0;
1078
1079 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
1080 return;
1081
1082 switch (PAGE_SIZE) {
1083 case SZ_4K:
1084 page_type = 0x0;
1085 break;
1086 case SZ_16K:
1087 page_type = 0x1;
1088 break;
1089 case SZ_64K:
1090 page_type = 0x2;
1091 break;
1092 default:
1093 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
1094 PAGE_SIZE);
1095 }
1096
1097 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1098}
1099
1100/*
1101 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
1102 * is the expected qos calculated.
1103 * the formula:
1104 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
1105 *
1106 * IR_b * (2 ^ IR_u) * 8000
1107 * IR(Mbps) = -------------------------
1108 * Tick * (2 ^ IR_s)
1109 */
1110static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
1111{
1112 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1113 (QM_QOS_TICK * (1 << cir_s));
1114}
1115
1116static u32 acc_shaper_calc_cbs_s(u32 ir)
1117{
1118 int table_size = ARRAY_SIZE(shaper_cbs_s);
1119 int i;
1120
1121 for (i = 0; i < table_size; i++) {
1122 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
1123 return shaper_cbs_s[i].val;
1124 }
1125
1126 return QM_SHAPER_MIN_CBS_S;
1127}
1128
1129static u32 acc_shaper_calc_cir_s(u32 ir)
1130{
1131 int table_size = ARRAY_SIZE(shaper_cir_s);
1132 int i;
1133
1134 for (i = 0; i < table_size; i++) {
1135 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
1136 return shaper_cir_s[i].val;
1137 }
1138
1139 return 0;
1140}
1141
1142static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1143{
1144 u32 cir_b, cir_u, cir_s, ir_calc;
1145 u32 error_rate;
1146
1147 factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1148 cir_s = acc_shaper_calc_cir_s(ir);
1149
1150 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1151 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1152 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
1153
1154 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1155 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1156 factor->cir_b = cir_b;
1157 factor->cir_u = cir_u;
1158 factor->cir_s = cir_s;
1159 return 0;
1160 }
1161 }
1162 }
1163
1164 return -EINVAL;
1165}
1166
1167static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1168 u32 number, struct qm_shaper_factor *factor)
1169{
1170 u64 tmp = 0;
1171
1172 if (number > 0) {
1173 switch (type) {
1174 case SQC_VFT:
1175 if (qm->ver == QM_HW_V1) {
1176 tmp = QM_SQC_VFT_BUF_SIZE |
1177 QM_SQC_VFT_SQC_SIZE |
1178 QM_SQC_VFT_INDEX_NUMBER |
1179 QM_SQC_VFT_VALID |
1180 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1181 } else {
1182 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1183 QM_SQC_VFT_VALID |
1184 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1185 }
1186 break;
1187 case CQC_VFT:
1188 if (qm->ver == QM_HW_V1) {
1189 tmp = QM_CQC_VFT_BUF_SIZE |
1190 QM_CQC_VFT_SQC_SIZE |
1191 QM_CQC_VFT_INDEX_NUMBER |
1192 QM_CQC_VFT_VALID;
1193 } else {
1194 tmp = QM_CQC_VFT_VALID;
1195 }
1196 break;
1197 case SHAPER_VFT:
1198 if (factor) {
1199 tmp = factor->cir_b |
1200 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1201 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1202 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1203 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1204 }
1205 break;
1206 }
1207 }
1208
1209 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1210 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1211}
1212
1213static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1214 u32 fun_num, u32 base, u32 number)
1215{
1216 struct qm_shaper_factor *factor = NULL;
1217 unsigned int val;
1218 int ret;
1219
1220 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1221 factor = &qm->factor[fun_num];
1222
1223 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1224 val & BIT(0), POLL_PERIOD,
1225 POLL_TIMEOUT);
1226 if (ret)
1227 return ret;
1228
1229 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1230 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1231 if (type == SHAPER_VFT)
1232 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1233
1234 writel(fun_num, qm->io_base + QM_VFT_CFG);
1235
1236 qm_vft_data_cfg(qm, type, base, number, factor);
1237
1238 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1239 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1240
1241 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1242 val & BIT(0), POLL_PERIOD,
1243 POLL_TIMEOUT);
1244}
1245
1246static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1247{
1248 u32 qos = qm->factor[fun_num].func_qos;
1249 int ret, i;
1250
1251 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1252 if (ret) {
1253 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1254 return ret;
1255 }
1256 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1257 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1258 /* The base number of queue reuse for different alg type */
1259 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1260 if (ret)
1261 return ret;
1262 }
1263
1264 return 0;
1265}
1266
1267/* The config should be conducted after qm_dev_mem_reset() */
1268static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1269 u32 number)
1270{
1271 int ret, i;
1272
1273 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1274 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1275 if (ret)
1276 return ret;
1277 }
1278
1279 /* init default shaper qos val */
1280 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
1281 ret = qm_shaper_init_vft(qm, fun_num);
1282 if (ret)
1283 goto back_sqc_cqc;
1284 }
1285
1286 return 0;
1287back_sqc_cqc:
1288 for (i = SQC_VFT; i <= CQC_VFT; i++)
1289 qm_set_vft_common(qm, i, fun_num, 0, 0);
1290
1291 return ret;
1292}
1293
1294static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1295{
1296 u64 sqc_vft;
1297 int ret;
1298
1299 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1300 if (ret)
1301 return ret;
1302
1303 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1304 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1305 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1306 *number = (QM_SQC_VFT_NUM_MASK_V2 &
1307 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1308
1309 return 0;
1310}
1311
1312void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1313 dma_addr_t *dma_addr)
1314{
1315 struct device *dev = &qm->pdev->dev;
1316 void *ctx_addr;
1317
1318 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1319 if (!ctx_addr)
1320 return ERR_PTR(-ENOMEM);
1321
1322 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1323 if (dma_mapping_error(dev, *dma_addr)) {
1324 dev_err(dev, "DMA mapping error!\n");
1325 kfree(ctx_addr);
1326 return ERR_PTR(-ENOMEM);
1327 }
1328
1329 return ctx_addr;
1330}
1331
1332void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1333 const void *ctx_addr, dma_addr_t *dma_addr)
1334{
1335 struct device *dev = &qm->pdev->dev;
1336
1337 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1338 kfree(ctx_addr);
1339}
1340
1341static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1342{
1343 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1344}
1345
1346static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1347{
1348 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1349}
1350
1351static void qm_hw_error_init_v1(struct hisi_qm *qm)
1352{
1353 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1354}
1355
1356static void qm_hw_error_cfg(struct hisi_qm *qm)
1357{
1358 struct hisi_qm_err_info *err_info = &qm->err_info;
1359
1360 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
1361 /* clear QM hw residual error source */
1362 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1363
1364 /* configure error type */
1365 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
1366 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1367 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1368 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
1369}
1370
1371static void qm_hw_error_init_v2(struct hisi_qm *qm)
1372{
1373 u32 irq_unmask;
1374
1375 qm_hw_error_cfg(qm);
1376
1377 irq_unmask = ~qm->error_mask;
1378 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1379 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1380}
1381
1382static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1383{
1384 u32 irq_mask = qm->error_mask;
1385
1386 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1387 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1388}
1389
1390static void qm_hw_error_init_v3(struct hisi_qm *qm)
1391{
1392 u32 irq_unmask;
1393
1394 qm_hw_error_cfg(qm);
1395
1396 /* enable close master ooo when hardware error happened */
1397 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1398
1399 irq_unmask = ~qm->error_mask;
1400 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1401 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1402}
1403
1404static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
1405{
1406 u32 irq_mask = qm->error_mask;
1407
1408 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1409 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1410
1411 /* disable close master ooo when hardware error happened */
1412 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1413}
1414
1415static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1416{
1417 const struct hisi_qm_hw_error *err;
1418 struct device *dev = &qm->pdev->dev;
1419 u32 reg_val, type, vf_num;
1420 int i;
1421
1422 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1423 err = &qm_hw_error[i];
1424 if (!(err->int_msk & error_status))
1425 continue;
1426
1427 dev_err(dev, "%s [error status=0x%x] found\n",
1428 err->msg, err->int_msk);
1429
1430 if (err->int_msk & QM_DB_TIMEOUT) {
1431 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1432 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1433 QM_DB_TIMEOUT_TYPE_SHIFT;
1434 vf_num = reg_val & QM_DB_TIMEOUT_VF;
1435 dev_err(dev, "qm %s doorbell timeout in function %u\n",
1436 qm_db_timeout[type], vf_num);
1437 } else if (err->int_msk & QM_OF_FIFO_OF) {
1438 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1439 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1440 QM_FIFO_OVERFLOW_TYPE_SHIFT;
1441 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1442
1443 if (type < ARRAY_SIZE(qm_fifo_overflow))
1444 dev_err(dev, "qm %s fifo overflow in function %u\n",
1445 qm_fifo_overflow[type], vf_num);
1446 else
1447 dev_err(dev, "unknown error type\n");
1448 }
1449 }
1450}
1451
1452static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1453{
1454 u32 error_status, tmp;
1455
1456 /* read err sts */
1457 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1458 error_status = qm->error_mask & tmp;
1459
1460 if (error_status) {
1461 if (error_status & QM_ECC_MBIT)
1462 qm->err_status.is_qm_ecc_mbit = true;
1463
1464 qm_log_hw_error(qm, error_status);
1465 if (error_status & qm->err_info.qm_reset_mask)
1466 return ACC_ERR_NEED_RESET;
1467
1468 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1469 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1470 }
1471
1472 return ACC_ERR_RECOVERED;
1473}
1474
1475static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
1476{
1477 struct qm_mailbox mailbox;
1478 int ret;
1479
1480 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
1481 mutex_lock(&qm->mailbox_lock);
1482 ret = qm_mb_nolock(qm, &mailbox);
1483 if (ret)
1484 goto err_unlock;
1485
1486 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1487 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1488
1489err_unlock:
1490 mutex_unlock(&qm->mailbox_lock);
1491 return ret;
1492}
1493
1494static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
1495{
1496 u32 val;
1497
1498 if (qm->fun_type == QM_HW_PF)
1499 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
1500
1501 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
1502 val |= QM_IFC_INT_SOURCE_MASK;
1503 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
1504}
1505
1506static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
1507{
1508 struct device *dev = &qm->pdev->dev;
1509 u32 cmd;
1510 u64 msg;
1511 int ret;
1512
1513 ret = qm_get_mb_cmd(qm, &msg, vf_id);
1514 if (ret) {
1515 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
1516 return;
1517 }
1518
1519 cmd = msg & QM_MB_CMD_DATA_MASK;
1520 switch (cmd) {
1521 case QM_VF_PREPARE_FAIL:
1522 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
1523 break;
1524 case QM_VF_START_FAIL:
1525 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
1526 break;
1527 case QM_VF_PREPARE_DONE:
1528 case QM_VF_START_DONE:
1529 break;
1530 default:
1531 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
1532 break;
1533 }
1534}
1535
1536static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
1537{
1538 struct device *dev = &qm->pdev->dev;
1539 u32 vfs_num = qm->vfs_num;
1540 int cnt = 0;
1541 int ret = 0;
1542 u64 val;
1543 u32 i;
1544
1545 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
1546 return 0;
1547
1548 while (true) {
1549 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
1550 /* All VFs send command to PF, break */
1551 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
1552 break;
1553
1554 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
1555 ret = -EBUSY;
1556 break;
1557 }
1558
1559 msleep(QM_WAIT_DST_ACK);
1560 }
1561
1562 /* PF check VFs msg */
1563 for (i = 1; i <= vfs_num; i++) {
1564 if (val & BIT(i))
1565 qm_handle_vf_msg(qm, i);
1566 else
1567 dev_err(dev, "VF(%u) not ping PF!\n", i);
1568 }
1569
1570 /* PF clear interrupt to ack VFs */
1571 qm_clear_cmd_interrupt(qm, val);
1572
1573 return ret;
1574}
1575
1576static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
1577{
1578 u32 val;
1579
1580 val = readl(qm->io_base + QM_IFC_INT_CFG);
1581 val &= ~QM_IFC_SEND_ALL_VFS;
1582 val |= fun_num;
1583 writel(val, qm->io_base + QM_IFC_INT_CFG);
1584
1585 val = readl(qm->io_base + QM_IFC_INT_SET_P);
1586 val |= QM_IFC_INT_SET_MASK;
1587 writel(val, qm->io_base + QM_IFC_INT_SET_P);
1588}
1589
1590static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
1591{
1592 u32 val;
1593
1594 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1595 val |= QM_IFC_INT_SET_MASK;
1596 writel(val, qm->io_base + QM_IFC_INT_SET_V);
1597}
1598
1599static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
1600{
1601 struct device *dev = &qm->pdev->dev;
1602 struct qm_mailbox mailbox;
1603 int cnt = 0;
1604 u64 val;
1605 int ret;
1606
1607 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
1608 mutex_lock(&qm->mailbox_lock);
1609 ret = qm_mb_nolock(qm, &mailbox);
1610 if (ret) {
1611 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
1612 goto err_unlock;
1613 }
1614
1615 qm_trigger_vf_interrupt(qm, fun_num);
1616 while (true) {
1617 msleep(QM_WAIT_DST_ACK);
1618 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1619 /* if VF respond, PF notifies VF successfully. */
1620 if (!(val & BIT(fun_num)))
1621 goto err_unlock;
1622
1623 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
1624 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
1625 ret = -ETIMEDOUT;
1626 break;
1627 }
1628 }
1629
1630err_unlock:
1631 mutex_unlock(&qm->mailbox_lock);
1632 return ret;
1633}
1634
1635static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
1636{
1637 struct device *dev = &qm->pdev->dev;
1638 u32 vfs_num = qm->vfs_num;
1639 struct qm_mailbox mailbox;
1640 u64 val = 0;
1641 int cnt = 0;
1642 int ret;
1643 u32 i;
1644
1645 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
1646 mutex_lock(&qm->mailbox_lock);
1647 /* PF sends command to all VFs by mailbox */
1648 ret = qm_mb_nolock(qm, &mailbox);
1649 if (ret) {
1650 dev_err(dev, "failed to send command to VFs!\n");
1651 mutex_unlock(&qm->mailbox_lock);
1652 return ret;
1653 }
1654
1655 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
1656 while (true) {
1657 msleep(QM_WAIT_DST_ACK);
1658 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1659 /* If all VFs acked, PF notifies VFs successfully. */
1660 if (!(val & GENMASK(vfs_num, 1))) {
1661 mutex_unlock(&qm->mailbox_lock);
1662 return 0;
1663 }
1664
1665 if (++cnt > QM_MAX_PF_WAIT_COUNT)
1666 break;
1667 }
1668
1669 mutex_unlock(&qm->mailbox_lock);
1670
1671 /* Check which vf respond timeout. */
1672 for (i = 1; i <= vfs_num; i++) {
1673 if (val & BIT(i))
1674 dev_err(dev, "failed to get response from VF(%u)!\n", i);
1675 }
1676
1677 return -ETIMEDOUT;
1678}
1679
1680static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
1681{
1682 struct qm_mailbox mailbox;
1683 int cnt = 0;
1684 u32 val;
1685 int ret;
1686
1687 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
1688 mutex_lock(&qm->mailbox_lock);
1689 ret = qm_mb_nolock(qm, &mailbox);
1690 if (ret) {
1691 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
1692 goto unlock;
1693 }
1694
1695 qm_trigger_pf_interrupt(qm);
1696 /* Waiting for PF response */
1697 while (true) {
1698 msleep(QM_WAIT_DST_ACK);
1699 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1700 if (!(val & QM_IFC_INT_STATUS_MASK))
1701 break;
1702
1703 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
1704 ret = -ETIMEDOUT;
1705 break;
1706 }
1707 }
1708
1709unlock:
1710 mutex_unlock(&qm->mailbox_lock);
1711 return ret;
1712}
1713
1714static int qm_stop_qp(struct hisi_qp *qp)
1715{
1716 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
1717}
1718
1719static int qm_set_msi(struct hisi_qm *qm, bool set)
1720{
1721 struct pci_dev *pdev = qm->pdev;
1722
1723 if (set) {
1724 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
1725 0);
1726 } else {
1727 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
1728 ACC_PEH_MSI_DISABLE);
1729 if (qm->err_status.is_qm_ecc_mbit ||
1730 qm->err_status.is_dev_ecc_mbit)
1731 return 0;
1732
1733 mdelay(1);
1734 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
1735 return -EFAULT;
1736 }
1737
1738 return 0;
1739}
1740
1741static void qm_wait_msi_finish(struct hisi_qm *qm)
1742{
1743 struct pci_dev *pdev = qm->pdev;
1744 u32 cmd = ~0;
1745 int cnt = 0;
1746 u32 val;
1747 int ret;
1748
1749 while (true) {
1750 pci_read_config_dword(pdev, pdev->msi_cap +
1751 PCI_MSI_PENDING_64, &cmd);
1752 if (!cmd)
1753 break;
1754
1755 if (++cnt > MAX_WAIT_COUNTS) {
1756 pci_warn(pdev, "failed to empty MSI PENDING!\n");
1757 break;
1758 }
1759
1760 udelay(1);
1761 }
1762
1763 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
1764 val, !(val & QM_PEH_DFX_MASK),
1765 POLL_PERIOD, POLL_TIMEOUT);
1766 if (ret)
1767 pci_warn(pdev, "failed to empty PEH MSI!\n");
1768
1769 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
1770 val, !(val & QM_PEH_MSI_FINISH_MASK),
1771 POLL_PERIOD, POLL_TIMEOUT);
1772 if (ret)
1773 pci_warn(pdev, "failed to finish MSI operation!\n");
1774}
1775
1776static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
1777{
1778 struct pci_dev *pdev = qm->pdev;
1779 int ret = -ETIMEDOUT;
1780 u32 cmd, i;
1781
1782 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
1783 if (set)
1784 cmd |= QM_MSI_CAP_ENABLE;
1785 else
1786 cmd &= ~QM_MSI_CAP_ENABLE;
1787
1788 pci_write_config_dword(pdev, pdev->msi_cap, cmd);
1789 if (set) {
1790 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
1791 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
1792 if (cmd & QM_MSI_CAP_ENABLE)
1793 return 0;
1794
1795 udelay(1);
1796 }
1797 } else {
1798 udelay(WAIT_PERIOD_US_MIN);
1799 qm_wait_msi_finish(qm);
1800 ret = 0;
1801 }
1802
1803 return ret;
1804}
1805
1806static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1807 .qm_db = qm_db_v1,
1808 .hw_error_init = qm_hw_error_init_v1,
1809 .set_msi = qm_set_msi,
1810};
1811
1812static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1813 .get_vft = qm_get_vft_v2,
1814 .qm_db = qm_db_v2,
1815 .hw_error_init = qm_hw_error_init_v2,
1816 .hw_error_uninit = qm_hw_error_uninit_v2,
1817 .hw_error_handle = qm_hw_error_handle_v2,
1818 .set_msi = qm_set_msi,
1819};
1820
1821static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
1822 .get_vft = qm_get_vft_v2,
1823 .qm_db = qm_db_v2,
1824 .hw_error_init = qm_hw_error_init_v3,
1825 .hw_error_uninit = qm_hw_error_uninit_v3,
1826 .hw_error_handle = qm_hw_error_handle_v2,
1827 .set_msi = qm_set_msi_v3,
1828};
1829
1830static void *qm_get_avail_sqe(struct hisi_qp *qp)
1831{
1832 struct hisi_qp_status *qp_status = &qp->qp_status;
1833 u16 sq_tail = qp_status->sq_tail;
1834
1835 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1))
1836 return NULL;
1837
1838 return qp->sqe + sq_tail * qp->qm->sqe_size;
1839}
1840
1841static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
1842{
1843 u64 *addr;
1844
1845 /* Use last 64 bits of DUS to reset status. */
1846 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
1847 *addr = 0;
1848}
1849
1850static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1851{
1852 struct device *dev = &qm->pdev->dev;
1853 struct hisi_qp *qp;
1854 int qp_id;
1855
1856 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1857 return ERR_PTR(-EPERM);
1858
1859 if (qm->qp_in_used == qm->qp_num) {
1860 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1861 qm->qp_num);
1862 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1863 return ERR_PTR(-EBUSY);
1864 }
1865
1866 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1867 if (qp_id < 0) {
1868 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1869 qm->qp_num);
1870 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1871 return ERR_PTR(-EBUSY);
1872 }
1873
1874 qp = &qm->qp_array[qp_id];
1875 hisi_qm_unset_hw_reset(qp);
1876 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth);
1877
1878 qp->event_cb = NULL;
1879 qp->req_cb = NULL;
1880 qp->qp_id = qp_id;
1881 qp->alg_type = alg_type;
1882 qp->is_in_kernel = true;
1883 qm->qp_in_used++;
1884 atomic_set(&qp->qp_status.flags, QP_INIT);
1885
1886 return qp;
1887}
1888
1889/**
1890 * hisi_qm_create_qp() - Create a queue pair from qm.
1891 * @qm: The qm we create a qp from.
1892 * @alg_type: Accelerator specific algorithm type in sqc.
1893 *
1894 * Return created qp, negative error code if failed.
1895 */
1896static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1897{
1898 struct hisi_qp *qp;
1899 int ret;
1900
1901 ret = qm_pm_get_sync(qm);
1902 if (ret)
1903 return ERR_PTR(ret);
1904
1905 down_write(&qm->qps_lock);
1906 qp = qm_create_qp_nolock(qm, alg_type);
1907 up_write(&qm->qps_lock);
1908
1909 if (IS_ERR(qp))
1910 qm_pm_put_sync(qm);
1911
1912 return qp;
1913}
1914
1915/**
1916 * hisi_qm_release_qp() - Release a qp back to its qm.
1917 * @qp: The qp we want to release.
1918 *
1919 * This function releases the resource of a qp.
1920 */
1921static void hisi_qm_release_qp(struct hisi_qp *qp)
1922{
1923 struct hisi_qm *qm = qp->qm;
1924
1925 down_write(&qm->qps_lock);
1926
1927 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1928 up_write(&qm->qps_lock);
1929 return;
1930 }
1931
1932 qm->qp_in_used--;
1933 idr_remove(&qm->qp_idr, qp->qp_id);
1934
1935 up_write(&qm->qps_lock);
1936
1937 qm_pm_put_sync(qm);
1938}
1939
1940static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1941{
1942 struct hisi_qm *qm = qp->qm;
1943 struct device *dev = &qm->pdev->dev;
1944 enum qm_hw_ver ver = qm->ver;
1945 struct qm_sqc *sqc;
1946 dma_addr_t sqc_dma;
1947 int ret;
1948
1949 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1950 if (!sqc)
1951 return -ENOMEM;
1952
1953 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1954 if (ver == QM_HW_V1) {
1955 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1956 sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
1957 } else {
1958 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
1959 sqc->w8 = 0; /* rand_qc */
1960 }
1961 sqc->cq_num = cpu_to_le16(qp_id);
1962 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
1963
1964 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
1965 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
1966 QM_QC_PASID_ENABLE_SHIFT);
1967
1968 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1969 DMA_TO_DEVICE);
1970 if (dma_mapping_error(dev, sqc_dma)) {
1971 kfree(sqc);
1972 return -ENOMEM;
1973 }
1974
1975 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1976 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1977 kfree(sqc);
1978
1979 return ret;
1980}
1981
1982static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1983{
1984 struct hisi_qm *qm = qp->qm;
1985 struct device *dev = &qm->pdev->dev;
1986 enum qm_hw_ver ver = qm->ver;
1987 struct qm_cqc *cqc;
1988 dma_addr_t cqc_dma;
1989 int ret;
1990
1991 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1992 if (!cqc)
1993 return -ENOMEM;
1994
1995 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1996 if (ver == QM_HW_V1) {
1997 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
1998 QM_QC_CQE_SIZE));
1999 cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
2000 } else {
2001 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
2002 cqc->w8 = 0; /* rand_qc */
2003 }
2004 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2005
2006 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2007 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2008
2009 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
2010 DMA_TO_DEVICE);
2011 if (dma_mapping_error(dev, cqc_dma)) {
2012 kfree(cqc);
2013 return -ENOMEM;
2014 }
2015
2016 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2017 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
2018 kfree(cqc);
2019
2020 return ret;
2021}
2022
2023static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2024{
2025 int ret;
2026
2027 qm_init_qp_status(qp);
2028
2029 ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
2030 if (ret)
2031 return ret;
2032
2033 return qm_cq_ctx_cfg(qp, qp_id, pasid);
2034}
2035
2036static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
2037{
2038 struct hisi_qm *qm = qp->qm;
2039 struct device *dev = &qm->pdev->dev;
2040 int qp_id = qp->qp_id;
2041 u32 pasid = arg;
2042 int ret;
2043
2044 if (!qm_qp_avail_state(qm, qp, QP_START))
2045 return -EPERM;
2046
2047 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
2048 if (ret)
2049 return ret;
2050
2051 atomic_set(&qp->qp_status.flags, QP_START);
2052 dev_dbg(dev, "queue %d started\n", qp_id);
2053
2054 return 0;
2055}
2056
2057/**
2058 * hisi_qm_start_qp() - Start a qp into running.
2059 * @qp: The qp we want to start to run.
2060 * @arg: Accelerator specific argument.
2061 *
2062 * After this function, qp can receive request from user. Return 0 if
2063 * successful, negative error code if failed.
2064 */
2065int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
2066{
2067 struct hisi_qm *qm = qp->qm;
2068 int ret;
2069
2070 down_write(&qm->qps_lock);
2071 ret = qm_start_qp_nolock(qp, arg);
2072 up_write(&qm->qps_lock);
2073
2074 return ret;
2075}
2076EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
2077
2078/**
2079 * qp_stop_fail_cb() - call request cb.
2080 * @qp: stopped failed qp.
2081 *
2082 * Callback function should be called whether task completed or not.
2083 */
2084static void qp_stop_fail_cb(struct hisi_qp *qp)
2085{
2086 int qp_used = atomic_read(&qp->qp_status.used);
2087 u16 cur_tail = qp->qp_status.sq_tail;
2088 u16 sq_depth = qp->sq_depth;
2089 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth;
2090 struct hisi_qm *qm = qp->qm;
2091 u16 pos;
2092 int i;
2093
2094 for (i = 0; i < qp_used; i++) {
2095 pos = (i + cur_head) % sq_depth;
2096 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2097 atomic_dec(&qp->qp_status.used);
2098 }
2099}
2100
2101/**
2102 * qm_drain_qp() - Drain a qp.
2103 * @qp: The qp we want to drain.
2104 *
2105 * Determine whether the queue is cleared by judging the tail pointers of
2106 * sq and cq.
2107 */
2108static int qm_drain_qp(struct hisi_qp *qp)
2109{
2110 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
2111 struct hisi_qm *qm = qp->qm;
2112 struct device *dev = &qm->pdev->dev;
2113 struct qm_sqc *sqc;
2114 struct qm_cqc *cqc;
2115 dma_addr_t dma_addr;
2116 int ret = 0, i = 0;
2117 void *addr;
2118
2119 /* No need to judge if master OOO is blocked. */
2120 if (qm_check_dev_error(qm))
2121 return 0;
2122
2123 /* Kunpeng930 supports drain qp by device */
2124 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
2125 ret = qm_stop_qp(qp);
2126 if (ret)
2127 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
2128 return ret;
2129 }
2130
2131 addr = hisi_qm_ctx_alloc(qm, size, &dma_addr);
2132 if (IS_ERR(addr)) {
2133 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
2134 return -ENOMEM;
2135 }
2136
2137 while (++i) {
2138 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
2139 if (ret) {
2140 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
2141 break;
2142 }
2143 sqc = addr;
2144
2145 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
2146 qp->qp_id);
2147 if (ret) {
2148 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
2149 break;
2150 }
2151 cqc = addr + sizeof(struct qm_sqc);
2152
2153 if ((sqc->tail == cqc->tail) &&
2154 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
2155 break;
2156
2157 if (i == MAX_WAIT_COUNTS) {
2158 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
2159 ret = -EBUSY;
2160 break;
2161 }
2162
2163 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
2164 }
2165
2166 hisi_qm_ctx_free(qm, size, addr, &dma_addr);
2167
2168 return ret;
2169}
2170
2171static int qm_stop_qp_nolock(struct hisi_qp *qp)
2172{
2173 struct device *dev = &qp->qm->pdev->dev;
2174 int ret;
2175
2176 /*
2177 * It is allowed to stop and release qp when reset, If the qp is
2178 * stopped when reset but still want to be released then, the
2179 * is_resetting flag should be set negative so that this qp will not
2180 * be restarted after reset.
2181 */
2182 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
2183 qp->is_resetting = false;
2184 return 0;
2185 }
2186
2187 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
2188 return -EPERM;
2189
2190 atomic_set(&qp->qp_status.flags, QP_STOP);
2191
2192 ret = qm_drain_qp(qp);
2193 if (ret)
2194 dev_err(dev, "Failed to drain out data for stopping!\n");
2195
2196
2197 flush_workqueue(qp->qm->wq);
2198 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
2199 qp_stop_fail_cb(qp);
2200
2201 dev_dbg(dev, "stop queue %u!", qp->qp_id);
2202
2203 return 0;
2204}
2205
2206/**
2207 * hisi_qm_stop_qp() - Stop a qp in qm.
2208 * @qp: The qp we want to stop.
2209 *
2210 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
2211 */
2212int hisi_qm_stop_qp(struct hisi_qp *qp)
2213{
2214 int ret;
2215
2216 down_write(&qp->qm->qps_lock);
2217 ret = qm_stop_qp_nolock(qp);
2218 up_write(&qp->qm->qps_lock);
2219
2220 return ret;
2221}
2222EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
2223
2224/**
2225 * hisi_qp_send() - Queue up a task in the hardware queue.
2226 * @qp: The qp in which to put the message.
2227 * @msg: The message.
2228 *
2229 * This function will return -EBUSY if qp is currently full, and -EAGAIN
2230 * if qp related qm is resetting.
2231 *
2232 * Note: This function may run with qm_irq_thread and ACC reset at same time.
2233 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
2234 * reset may happen, we have no lock here considering performance. This
2235 * causes current qm_db sending fail or can not receive sended sqe. QM
2236 * sync/async receive function should handle the error sqe. ACC reset
2237 * done function should clear used sqe to 0.
2238 */
2239int hisi_qp_send(struct hisi_qp *qp, const void *msg)
2240{
2241 struct hisi_qp_status *qp_status = &qp->qp_status;
2242 u16 sq_tail = qp_status->sq_tail;
2243 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
2244 void *sqe = qm_get_avail_sqe(qp);
2245
2246 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
2247 atomic_read(&qp->qm->status.flags) == QM_STOP ||
2248 qp->is_resetting)) {
2249 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2250 return -EAGAIN;
2251 }
2252
2253 if (!sqe)
2254 return -EBUSY;
2255
2256 memcpy(sqe, msg, qp->qm->sqe_size);
2257
2258 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2259 atomic_inc(&qp->qp_status.used);
2260 qp_status->sq_tail = sq_tail_next;
2261
2262 return 0;
2263}
2264EXPORT_SYMBOL_GPL(hisi_qp_send);
2265
2266static void hisi_qm_cache_wb(struct hisi_qm *qm)
2267{
2268 unsigned int val;
2269
2270 if (qm->ver == QM_HW_V1)
2271 return;
2272
2273 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2274 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2275 val, val & BIT(0), POLL_PERIOD,
2276 POLL_TIMEOUT))
2277 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2278}
2279
2280static void qm_qp_event_notifier(struct hisi_qp *qp)
2281{
2282 wake_up_interruptible(&qp->uacce_q->wait);
2283}
2284
2285 /* This function returns free number of qp in qm. */
2286static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2287{
2288 struct hisi_qm *qm = uacce->priv;
2289 int ret;
2290
2291 down_read(&qm->qps_lock);
2292 ret = qm->qp_num - qm->qp_in_used;
2293 up_read(&qm->qps_lock);
2294
2295 return ret;
2296}
2297
2298static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
2299{
2300 int i;
2301
2302 for (i = 0; i < qm->qp_num; i++)
2303 qm_set_qp_disable(&qm->qp_array[i], offset);
2304}
2305
2306static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2307 unsigned long arg,
2308 struct uacce_queue *q)
2309{
2310 struct hisi_qm *qm = uacce->priv;
2311 struct hisi_qp *qp;
2312 u8 alg_type = 0;
2313
2314 qp = hisi_qm_create_qp(qm, alg_type);
2315 if (IS_ERR(qp))
2316 return PTR_ERR(qp);
2317
2318 q->priv = qp;
2319 q->uacce = uacce;
2320 qp->uacce_q = q;
2321 qp->event_cb = qm_qp_event_notifier;
2322 qp->pasid = arg;
2323 qp->is_in_kernel = false;
2324
2325 return 0;
2326}
2327
2328static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2329{
2330 struct hisi_qp *qp = q->priv;
2331
2332 hisi_qm_release_qp(qp);
2333}
2334
2335/* map sq/cq/doorbell to user space */
2336static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2337 struct vm_area_struct *vma,
2338 struct uacce_qfile_region *qfr)
2339{
2340 struct hisi_qp *qp = q->priv;
2341 struct hisi_qm *qm = qp->qm;
2342 resource_size_t phys_base = qm->db_phys_base +
2343 qp->qp_id * qm->db_interval;
2344 size_t sz = vma->vm_end - vma->vm_start;
2345 struct pci_dev *pdev = qm->pdev;
2346 struct device *dev = &pdev->dev;
2347 unsigned long vm_pgoff;
2348 int ret;
2349
2350 switch (qfr->type) {
2351 case UACCE_QFRT_MMIO:
2352 if (qm->ver == QM_HW_V1) {
2353 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2354 return -EINVAL;
2355 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
2356 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2357 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2358 return -EINVAL;
2359 } else {
2360 if (sz > qm->db_interval)
2361 return -EINVAL;
2362 }
2363
2364 vm_flags_set(vma, VM_IO);
2365
2366 return remap_pfn_range(vma, vma->vm_start,
2367 phys_base >> PAGE_SHIFT,
2368 sz, pgprot_noncached(vma->vm_page_prot));
2369 case UACCE_QFRT_DUS:
2370 if (sz != qp->qdma.size)
2371 return -EINVAL;
2372
2373 /*
2374 * dma_mmap_coherent() requires vm_pgoff as 0
2375 * restore vm_pfoff to initial value for mmap()
2376 */
2377 vm_pgoff = vma->vm_pgoff;
2378 vma->vm_pgoff = 0;
2379 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2380 qp->qdma.dma, sz);
2381 vma->vm_pgoff = vm_pgoff;
2382 return ret;
2383
2384 default:
2385 return -EINVAL;
2386 }
2387}
2388
2389static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2390{
2391 struct hisi_qp *qp = q->priv;
2392
2393 return hisi_qm_start_qp(qp, qp->pasid);
2394}
2395
2396static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2397{
2398 hisi_qm_stop_qp(q->priv);
2399}
2400
2401static int hisi_qm_is_q_updated(struct uacce_queue *q)
2402{
2403 struct hisi_qp *qp = q->priv;
2404 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
2405 int updated = 0;
2406
2407 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
2408 /* make sure to read data from memory */
2409 dma_rmb();
2410 qm_cq_head_update(qp);
2411 cqe = qp->cqe + qp->qp_status.cq_head;
2412 updated = 1;
2413 }
2414
2415 return updated;
2416}
2417
2418static void qm_set_sqctype(struct uacce_queue *q, u16 type)
2419{
2420 struct hisi_qm *qm = q->uacce->priv;
2421 struct hisi_qp *qp = q->priv;
2422
2423 down_write(&qm->qps_lock);
2424 qp->alg_type = type;
2425 up_write(&qm->qps_lock);
2426}
2427
2428static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2429 unsigned long arg)
2430{
2431 struct hisi_qp *qp = q->priv;
2432 struct hisi_qp_info qp_info;
2433 struct hisi_qp_ctx qp_ctx;
2434
2435 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2436 if (copy_from_user(&qp_ctx, (void __user *)arg,
2437 sizeof(struct hisi_qp_ctx)))
2438 return -EFAULT;
2439
2440 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2441 return -EINVAL;
2442
2443 qm_set_sqctype(q, qp_ctx.qc_type);
2444 qp_ctx.id = qp->qp_id;
2445
2446 if (copy_to_user((void __user *)arg, &qp_ctx,
2447 sizeof(struct hisi_qp_ctx)))
2448 return -EFAULT;
2449
2450 return 0;
2451 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) {
2452 if (copy_from_user(&qp_info, (void __user *)arg,
2453 sizeof(struct hisi_qp_info)))
2454 return -EFAULT;
2455
2456 qp_info.sqe_size = qp->qm->sqe_size;
2457 qp_info.sq_depth = qp->sq_depth;
2458 qp_info.cq_depth = qp->cq_depth;
2459
2460 if (copy_to_user((void __user *)arg, &qp_info,
2461 sizeof(struct hisi_qp_info)))
2462 return -EFAULT;
2463
2464 return 0;
2465 }
2466
2467 return -EINVAL;
2468}
2469
2470/**
2471 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
2472 * according to user's configuration of error threshold.
2473 * @qm: the uacce device
2474 */
2475static int qm_hw_err_isolate(struct hisi_qm *qm)
2476{
2477 struct qm_hw_err *err, *tmp, *hw_err;
2478 struct qm_err_isolate *isolate;
2479 u32 count = 0;
2480
2481 isolate = &qm->isolate_data;
2482
2483#define SECONDS_PER_HOUR 3600
2484
2485 /* All the hw errs are processed by PF driver */
2486 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
2487 return 0;
2488
2489 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL);
2490 if (!hw_err)
2491 return -ENOMEM;
2492
2493 /*
2494 * Time-stamp every slot AER error. Then check the AER error log when the
2495 * next device AER error occurred. if the device slot AER error count exceeds
2496 * the setting error threshold in one hour, the isolated state will be set
2497 * to true. And the AER error logs that exceed one hour will be cleared.
2498 */
2499 mutex_lock(&isolate->isolate_lock);
2500 hw_err->timestamp = jiffies;
2501 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) {
2502 if ((hw_err->timestamp - err->timestamp) / HZ >
2503 SECONDS_PER_HOUR) {
2504 list_del(&err->list);
2505 kfree(err);
2506 } else {
2507 count++;
2508 }
2509 }
2510 list_add(&hw_err->list, &isolate->qm_hw_errs);
2511 mutex_unlock(&isolate->isolate_lock);
2512
2513 if (count >= isolate->err_threshold)
2514 isolate->is_isolate = true;
2515
2516 return 0;
2517}
2518
2519static void qm_hw_err_destroy(struct hisi_qm *qm)
2520{
2521 struct qm_hw_err *err, *tmp;
2522
2523 mutex_lock(&qm->isolate_data.isolate_lock);
2524 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
2525 list_del(&err->list);
2526 kfree(err);
2527 }
2528 mutex_unlock(&qm->isolate_data.isolate_lock);
2529}
2530
2531static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
2532{
2533 struct hisi_qm *qm = uacce->priv;
2534 struct hisi_qm *pf_qm;
2535
2536 if (uacce->is_vf)
2537 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2538 else
2539 pf_qm = qm;
2540
2541 return pf_qm->isolate_data.is_isolate ?
2542 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL;
2543}
2544
2545static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
2546{
2547 struct hisi_qm *qm = uacce->priv;
2548
2549 /* Must be set by PF */
2550 if (uacce->is_vf)
2551 return -EPERM;
2552
2553 if (qm->isolate_data.is_isolate)
2554 return -EPERM;
2555
2556 qm->isolate_data.err_threshold = num;
2557
2558 /* After the policy is updated, need to reset the hardware err list */
2559 qm_hw_err_destroy(qm);
2560
2561 return 0;
2562}
2563
2564static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce)
2565{
2566 struct hisi_qm *qm = uacce->priv;
2567 struct hisi_qm *pf_qm;
2568
2569 if (uacce->is_vf) {
2570 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2571 return pf_qm->isolate_data.err_threshold;
2572 }
2573
2574 return qm->isolate_data.err_threshold;
2575}
2576
2577static const struct uacce_ops uacce_qm_ops = {
2578 .get_available_instances = hisi_qm_get_available_instances,
2579 .get_queue = hisi_qm_uacce_get_queue,
2580 .put_queue = hisi_qm_uacce_put_queue,
2581 .start_queue = hisi_qm_uacce_start_queue,
2582 .stop_queue = hisi_qm_uacce_stop_queue,
2583 .mmap = hisi_qm_uacce_mmap,
2584 .ioctl = hisi_qm_uacce_ioctl,
2585 .is_q_updated = hisi_qm_is_q_updated,
2586 .get_isolate_state = hisi_qm_get_isolate_state,
2587 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
2588 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read,
2589};
2590
2591static void qm_remove_uacce(struct hisi_qm *qm)
2592{
2593 struct uacce_device *uacce = qm->uacce;
2594
2595 if (qm->use_sva) {
2596 qm_hw_err_destroy(qm);
2597 uacce_remove(uacce);
2598 qm->uacce = NULL;
2599 }
2600}
2601
2602static int qm_alloc_uacce(struct hisi_qm *qm)
2603{
2604 struct pci_dev *pdev = qm->pdev;
2605 struct uacce_device *uacce;
2606 unsigned long mmio_page_nr;
2607 unsigned long dus_page_nr;
2608 u16 sq_depth, cq_depth;
2609 struct uacce_interface interface = {
2610 .flags = UACCE_DEV_SVA,
2611 .ops = &uacce_qm_ops,
2612 };
2613 int ret;
2614
2615 ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
2616 sizeof(interface.name));
2617 if (ret < 0)
2618 return -ENAMETOOLONG;
2619
2620 uacce = uacce_alloc(&pdev->dev, &interface);
2621 if (IS_ERR(uacce))
2622 return PTR_ERR(uacce);
2623
2624 if (uacce->flags & UACCE_DEV_SVA) {
2625 qm->use_sva = true;
2626 } else {
2627 /* only consider sva case */
2628 qm_remove_uacce(qm);
2629 return -EINVAL;
2630 }
2631
2632 uacce->is_vf = pdev->is_virtfn;
2633 uacce->priv = qm;
2634
2635 if (qm->ver == QM_HW_V1)
2636 uacce->api_ver = HISI_QM_API_VER_BASE;
2637 else if (qm->ver == QM_HW_V2)
2638 uacce->api_ver = HISI_QM_API_VER2_BASE;
2639 else
2640 uacce->api_ver = HISI_QM_API_VER3_BASE;
2641
2642 if (qm->ver == QM_HW_V1)
2643 mmio_page_nr = QM_DOORBELL_PAGE_NR;
2644 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2645 mmio_page_nr = QM_DOORBELL_PAGE_NR +
2646 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2647 else
2648 mmio_page_nr = qm->db_interval / PAGE_SIZE;
2649
2650 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
2651
2652 /* Add one more page for device or qp status */
2653 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
2654 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
2655 PAGE_SHIFT;
2656
2657 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2658 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
2659
2660 qm->uacce = uacce;
2661 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
2662 mutex_init(&qm->isolate_data.isolate_lock);
2663
2664 return 0;
2665}
2666
2667/**
2668 * qm_frozen() - Try to froze QM to cut continuous queue request. If
2669 * there is user on the QM, return failure without doing anything.
2670 * @qm: The qm needed to be fronzen.
2671 *
2672 * This function frozes QM, then we can do SRIOV disabling.
2673 */
2674static int qm_frozen(struct hisi_qm *qm)
2675{
2676 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
2677 return 0;
2678
2679 down_write(&qm->qps_lock);
2680
2681 if (!qm->qp_in_used) {
2682 qm->qp_in_used = qm->qp_num;
2683 up_write(&qm->qps_lock);
2684 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
2685 return 0;
2686 }
2687
2688 up_write(&qm->qps_lock);
2689
2690 return -EBUSY;
2691}
2692
2693static int qm_try_frozen_vfs(struct pci_dev *pdev,
2694 struct hisi_qm_list *qm_list)
2695{
2696 struct hisi_qm *qm, *vf_qm;
2697 struct pci_dev *dev;
2698 int ret = 0;
2699
2700 if (!qm_list || !pdev)
2701 return -EINVAL;
2702
2703 /* Try to frozen all the VFs as disable SRIOV */
2704 mutex_lock(&qm_list->lock);
2705 list_for_each_entry(qm, &qm_list->list, list) {
2706 dev = qm->pdev;
2707 if (dev == pdev)
2708 continue;
2709 if (pci_physfn(dev) == pdev) {
2710 vf_qm = pci_get_drvdata(dev);
2711 ret = qm_frozen(vf_qm);
2712 if (ret)
2713 goto frozen_fail;
2714 }
2715 }
2716
2717frozen_fail:
2718 mutex_unlock(&qm_list->lock);
2719
2720 return ret;
2721}
2722
2723/**
2724 * hisi_qm_wait_task_finish() - Wait until the task is finished
2725 * when removing the driver.
2726 * @qm: The qm needed to wait for the task to finish.
2727 * @qm_list: The list of all available devices.
2728 */
2729void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2730{
2731 while (qm_frozen(qm) ||
2732 ((qm->fun_type == QM_HW_PF) &&
2733 qm_try_frozen_vfs(qm->pdev, qm_list))) {
2734 msleep(WAIT_PERIOD);
2735 }
2736
2737 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
2738 test_bit(QM_RESETTING, &qm->misc_ctl))
2739 msleep(WAIT_PERIOD);
2740
2741 udelay(REMOVE_WAIT_DELAY);
2742}
2743EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
2744
2745static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2746{
2747 struct device *dev = &qm->pdev->dev;
2748 struct qm_dma *qdma;
2749 int i;
2750
2751 for (i = num - 1; i >= 0; i--) {
2752 qdma = &qm->qp_array[i].qdma;
2753 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2754 kfree(qm->poll_data[i].qp_finish_id);
2755 }
2756
2757 kfree(qm->poll_data);
2758 kfree(qm->qp_array);
2759}
2760
2761static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
2762 u16 sq_depth, u16 cq_depth)
2763{
2764 struct device *dev = &qm->pdev->dev;
2765 size_t off = qm->sqe_size * sq_depth;
2766 struct hisi_qp *qp;
2767 int ret = -ENOMEM;
2768
2769 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
2770 GFP_KERNEL);
2771 if (!qm->poll_data[id].qp_finish_id)
2772 return -ENOMEM;
2773
2774 qp = &qm->qp_array[id];
2775 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2776 GFP_KERNEL);
2777 if (!qp->qdma.va)
2778 goto err_free_qp_finish_id;
2779
2780 qp->sqe = qp->qdma.va;
2781 qp->sqe_dma = qp->qdma.dma;
2782 qp->cqe = qp->qdma.va + off;
2783 qp->cqe_dma = qp->qdma.dma + off;
2784 qp->qdma.size = dma_size;
2785 qp->sq_depth = sq_depth;
2786 qp->cq_depth = cq_depth;
2787 qp->qm = qm;
2788 qp->qp_id = id;
2789
2790 return 0;
2791
2792err_free_qp_finish_id:
2793 kfree(qm->poll_data[id].qp_finish_id);
2794 return ret;
2795}
2796
2797static void hisi_qm_pre_init(struct hisi_qm *qm)
2798{
2799 struct pci_dev *pdev = qm->pdev;
2800
2801 if (qm->ver == QM_HW_V1)
2802 qm->ops = &qm_hw_ops_v1;
2803 else if (qm->ver == QM_HW_V2)
2804 qm->ops = &qm_hw_ops_v2;
2805 else
2806 qm->ops = &qm_hw_ops_v3;
2807
2808 pci_set_drvdata(pdev, qm);
2809 mutex_init(&qm->mailbox_lock);
2810 init_rwsem(&qm->qps_lock);
2811 qm->qp_in_used = 0;
2812 qm->misc_ctl = false;
2813 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
2814 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
2815 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
2816 }
2817}
2818
2819static void qm_cmd_uninit(struct hisi_qm *qm)
2820{
2821 u32 val;
2822
2823 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2824 return;
2825
2826 val = readl(qm->io_base + QM_IFC_INT_MASK);
2827 val |= QM_IFC_INT_DISABLE;
2828 writel(val, qm->io_base + QM_IFC_INT_MASK);
2829}
2830
2831static void qm_cmd_init(struct hisi_qm *qm)
2832{
2833 u32 val;
2834
2835 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2836 return;
2837
2838 /* Clear communication interrupt source */
2839 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
2840
2841 /* Enable pf to vf communication reg. */
2842 val = readl(qm->io_base + QM_IFC_INT_MASK);
2843 val &= ~QM_IFC_INT_DISABLE;
2844 writel(val, qm->io_base + QM_IFC_INT_MASK);
2845}
2846
2847static void qm_put_pci_res(struct hisi_qm *qm)
2848{
2849 struct pci_dev *pdev = qm->pdev;
2850
2851 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2852 iounmap(qm->db_io_base);
2853
2854 iounmap(qm->io_base);
2855 pci_release_mem_regions(pdev);
2856}
2857
2858static void hisi_qm_pci_uninit(struct hisi_qm *qm)
2859{
2860 struct pci_dev *pdev = qm->pdev;
2861
2862 pci_free_irq_vectors(pdev);
2863 qm_put_pci_res(qm);
2864 pci_disable_device(pdev);
2865}
2866
2867static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
2868{
2869 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
2870 writel(state, qm->io_base + QM_VF_STATE);
2871}
2872
2873static void hisi_qm_unint_work(struct hisi_qm *qm)
2874{
2875 destroy_workqueue(qm->wq);
2876}
2877
2878static void hisi_qm_memory_uninit(struct hisi_qm *qm)
2879{
2880 struct device *dev = &qm->pdev->dev;
2881
2882 hisi_qp_memory_uninit(qm, qm->qp_num);
2883 if (qm->qdma.va) {
2884 hisi_qm_cache_wb(qm);
2885 dma_free_coherent(dev, qm->qdma.size,
2886 qm->qdma.va, qm->qdma.dma);
2887 }
2888
2889 idr_destroy(&qm->qp_idr);
2890
2891 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
2892 kfree(qm->factor);
2893}
2894
2895/**
2896 * hisi_qm_uninit() - Uninitialize qm.
2897 * @qm: The qm needed uninit.
2898 *
2899 * This function uninits qm related device resources.
2900 */
2901void hisi_qm_uninit(struct hisi_qm *qm)
2902{
2903 qm_cmd_uninit(qm);
2904 hisi_qm_unint_work(qm);
2905 down_write(&qm->qps_lock);
2906
2907 if (!qm_avail_state(qm, QM_CLOSE)) {
2908 up_write(&qm->qps_lock);
2909 return;
2910 }
2911
2912 hisi_qm_memory_uninit(qm);
2913 hisi_qm_set_state(qm, QM_NOT_READY);
2914 up_write(&qm->qps_lock);
2915
2916 qm_irqs_unregister(qm);
2917 hisi_qm_pci_uninit(qm);
2918 if (qm->use_sva) {
2919 uacce_remove(qm->uacce);
2920 qm->uacce = NULL;
2921 }
2922}
2923EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2924
2925/**
2926 * hisi_qm_get_vft() - Get vft from a qm.
2927 * @qm: The qm we want to get its vft.
2928 * @base: The base number of queue in vft.
2929 * @number: The number of queues in vft.
2930 *
2931 * We can allocate multiple queues to a qm by configuring virtual function
2932 * table. We get related configures by this function. Normally, we call this
2933 * function in VF driver to get the queue information.
2934 *
2935 * qm hw v1 does not support this interface.
2936 */
2937static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2938{
2939 if (!base || !number)
2940 return -EINVAL;
2941
2942 if (!qm->ops->get_vft) {
2943 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2944 return -EINVAL;
2945 }
2946
2947 return qm->ops->get_vft(qm, base, number);
2948}
2949
2950/**
2951 * hisi_qm_set_vft() - Set vft to a qm.
2952 * @qm: The qm we want to set its vft.
2953 * @fun_num: The function number.
2954 * @base: The base number of queue in vft.
2955 * @number: The number of queues in vft.
2956 *
2957 * This function is alway called in PF driver, it is used to assign queues
2958 * among PF and VFs.
2959 *
2960 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2961 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2962 * (VF function number 0x2)
2963 */
2964static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
2965 u32 number)
2966{
2967 u32 max_q_num = qm->ctrl_qp_num;
2968
2969 if (base >= max_q_num || number > max_q_num ||
2970 (base + number) > max_q_num)
2971 return -EINVAL;
2972
2973 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2974}
2975
2976static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2977{
2978 struct hisi_qm_status *status = &qm->status;
2979
2980 status->eq_head = 0;
2981 status->aeq_head = 0;
2982 status->eqc_phase = true;
2983 status->aeqc_phase = true;
2984}
2985
2986static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
2987{
2988 /* Clear eq/aeq interrupt source */
2989 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
2990 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
2991
2992 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2993 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2994}
2995
2996static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
2997{
2998 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2999 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3000}
3001
3002static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3003{
3004 struct device *dev = &qm->pdev->dev;
3005 struct qm_eqc *eqc;
3006 dma_addr_t eqc_dma;
3007 int ret;
3008
3009 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
3010 if (!eqc)
3011 return -ENOMEM;
3012
3013 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3014 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3015 if (qm->ver == QM_HW_V1)
3016 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3017 eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3018
3019 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
3020 DMA_TO_DEVICE);
3021 if (dma_mapping_error(dev, eqc_dma)) {
3022 kfree(eqc);
3023 return -ENOMEM;
3024 }
3025
3026 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3027 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
3028 kfree(eqc);
3029
3030 return ret;
3031}
3032
3033static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3034{
3035 struct device *dev = &qm->pdev->dev;
3036 struct qm_aeqc *aeqc;
3037 dma_addr_t aeqc_dma;
3038 int ret;
3039
3040 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
3041 if (!aeqc)
3042 return -ENOMEM;
3043
3044 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3045 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3046 aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3047
3048 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
3049 DMA_TO_DEVICE);
3050 if (dma_mapping_error(dev, aeqc_dma)) {
3051 kfree(aeqc);
3052 return -ENOMEM;
3053 }
3054
3055 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3056 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
3057 kfree(aeqc);
3058
3059 return ret;
3060}
3061
3062static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3063{
3064 struct device *dev = &qm->pdev->dev;
3065 int ret;
3066
3067 qm_init_eq_aeq_status(qm);
3068
3069 ret = qm_eq_ctx_cfg(qm);
3070 if (ret) {
3071 dev_err(dev, "Set eqc failed!\n");
3072 return ret;
3073 }
3074
3075 return qm_aeq_ctx_cfg(qm);
3076}
3077
3078static int __hisi_qm_start(struct hisi_qm *qm)
3079{
3080 int ret;
3081
3082 WARN_ON(!qm->qdma.va);
3083
3084 if (qm->fun_type == QM_HW_PF) {
3085 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3086 if (ret)
3087 return ret;
3088 }
3089
3090 ret = qm_eq_aeq_ctx_cfg(qm);
3091 if (ret)
3092 return ret;
3093
3094 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3095 if (ret)
3096 return ret;
3097
3098 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3099 if (ret)
3100 return ret;
3101
3102 qm_init_prefetch(qm);
3103 qm_enable_eq_aeq_interrupts(qm);
3104
3105 return 0;
3106}
3107
3108/**
3109 * hisi_qm_start() - start qm
3110 * @qm: The qm to be started.
3111 *
3112 * This function starts a qm, then we can allocate qp from this qm.
3113 */
3114int hisi_qm_start(struct hisi_qm *qm)
3115{
3116 struct device *dev = &qm->pdev->dev;
3117 int ret = 0;
3118
3119 down_write(&qm->qps_lock);
3120
3121 if (!qm_avail_state(qm, QM_START)) {
3122 up_write(&qm->qps_lock);
3123 return -EPERM;
3124 }
3125
3126 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3127
3128 if (!qm->qp_num) {
3129 dev_err(dev, "qp_num should not be 0\n");
3130 ret = -EINVAL;
3131 goto err_unlock;
3132 }
3133
3134 ret = __hisi_qm_start(qm);
3135 if (!ret)
3136 atomic_set(&qm->status.flags, QM_START);
3137
3138 hisi_qm_set_state(qm, QM_READY);
3139err_unlock:
3140 up_write(&qm->qps_lock);
3141 return ret;
3142}
3143EXPORT_SYMBOL_GPL(hisi_qm_start);
3144
3145static int qm_restart(struct hisi_qm *qm)
3146{
3147 struct device *dev = &qm->pdev->dev;
3148 struct hisi_qp *qp;
3149 int ret, i;
3150
3151 ret = hisi_qm_start(qm);
3152 if (ret < 0)
3153 return ret;
3154
3155 down_write(&qm->qps_lock);
3156 for (i = 0; i < qm->qp_num; i++) {
3157 qp = &qm->qp_array[i];
3158 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3159 qp->is_resetting == true) {
3160 ret = qm_start_qp_nolock(qp, 0);
3161 if (ret < 0) {
3162 dev_err(dev, "Failed to start qp%d!\n", i);
3163
3164 up_write(&qm->qps_lock);
3165 return ret;
3166 }
3167 qp->is_resetting = false;
3168 }
3169 }
3170 up_write(&qm->qps_lock);
3171
3172 return 0;
3173}
3174
3175/* Stop started qps in reset flow */
3176static int qm_stop_started_qp(struct hisi_qm *qm)
3177{
3178 struct device *dev = &qm->pdev->dev;
3179 struct hisi_qp *qp;
3180 int i, ret;
3181
3182 for (i = 0; i < qm->qp_num; i++) {
3183 qp = &qm->qp_array[i];
3184 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
3185 qp->is_resetting = true;
3186 ret = qm_stop_qp_nolock(qp);
3187 if (ret < 0) {
3188 dev_err(dev, "Failed to stop qp%d!\n", i);
3189 return ret;
3190 }
3191 }
3192 }
3193
3194 return 0;
3195}
3196
3197/**
3198 * qm_clear_queues() - Clear all queues memory in a qm.
3199 * @qm: The qm in which the queues will be cleared.
3200 *
3201 * This function clears all queues memory in a qm. Reset of accelerator can
3202 * use this to clear queues.
3203 */
3204static void qm_clear_queues(struct hisi_qm *qm)
3205{
3206 struct hisi_qp *qp;
3207 int i;
3208
3209 for (i = 0; i < qm->qp_num; i++) {
3210 qp = &qm->qp_array[i];
3211 if (qp->is_in_kernel && qp->is_resetting)
3212 memset(qp->qdma.va, 0, qp->qdma.size);
3213 }
3214
3215 memset(qm->qdma.va, 0, qm->qdma.size);
3216}
3217
3218/**
3219 * hisi_qm_stop() - Stop a qm.
3220 * @qm: The qm which will be stopped.
3221 * @r: The reason to stop qm.
3222 *
3223 * This function stops qm and its qps, then qm can not accept request.
3224 * Related resources are not released at this state, we can use hisi_qm_start
3225 * to let qm start again.
3226 */
3227int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3228{
3229 struct device *dev = &qm->pdev->dev;
3230 int ret = 0;
3231
3232 down_write(&qm->qps_lock);
3233
3234 qm->status.stop_reason = r;
3235 if (!qm_avail_state(qm, QM_STOP)) {
3236 ret = -EPERM;
3237 goto err_unlock;
3238 }
3239
3240 if (qm->status.stop_reason == QM_SOFT_RESET ||
3241 qm->status.stop_reason == QM_FLR) {
3242 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3243 ret = qm_stop_started_qp(qm);
3244 if (ret < 0) {
3245 dev_err(dev, "Failed to stop started qp!\n");
3246 goto err_unlock;
3247 }
3248 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3249 }
3250
3251 qm_disable_eq_aeq_interrupts(qm);
3252 if (qm->fun_type == QM_HW_PF) {
3253 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3254 if (ret < 0) {
3255 dev_err(dev, "Failed to set vft!\n");
3256 ret = -EBUSY;
3257 goto err_unlock;
3258 }
3259 }
3260
3261 qm_clear_queues(qm);
3262 atomic_set(&qm->status.flags, QM_STOP);
3263
3264err_unlock:
3265 up_write(&qm->qps_lock);
3266 return ret;
3267}
3268EXPORT_SYMBOL_GPL(hisi_qm_stop);
3269
3270static void qm_hw_error_init(struct hisi_qm *qm)
3271{
3272 if (!qm->ops->hw_error_init) {
3273 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3274 return;
3275 }
3276
3277 qm->ops->hw_error_init(qm);
3278}
3279
3280static void qm_hw_error_uninit(struct hisi_qm *qm)
3281{
3282 if (!qm->ops->hw_error_uninit) {
3283 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3284 return;
3285 }
3286
3287 qm->ops->hw_error_uninit(qm);
3288}
3289
3290static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3291{
3292 if (!qm->ops->hw_error_handle) {
3293 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3294 return ACC_ERR_NONE;
3295 }
3296
3297 return qm->ops->hw_error_handle(qm);
3298}
3299
3300/**
3301 * hisi_qm_dev_err_init() - Initialize device error configuration.
3302 * @qm: The qm for which we want to do error initialization.
3303 *
3304 * Initialize QM and device error related configuration.
3305 */
3306void hisi_qm_dev_err_init(struct hisi_qm *qm)
3307{
3308 if (qm->fun_type == QM_HW_VF)
3309 return;
3310
3311 qm_hw_error_init(qm);
3312
3313 if (!qm->err_ini->hw_err_enable) {
3314 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3315 return;
3316 }
3317 qm->err_ini->hw_err_enable(qm);
3318}
3319EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
3320
3321/**
3322 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3323 * @qm: The qm for which we want to do error uninitialization.
3324 *
3325 * Uninitialize QM and device error related configuration.
3326 */
3327void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3328{
3329 if (qm->fun_type == QM_HW_VF)
3330 return;
3331
3332 qm_hw_error_uninit(qm);
3333
3334 if (!qm->err_ini->hw_err_disable) {
3335 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3336 return;
3337 }
3338 qm->err_ini->hw_err_disable(qm);
3339}
3340EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
3341
3342/**
3343 * hisi_qm_free_qps() - free multiple queue pairs.
3344 * @qps: The queue pairs need to be freed.
3345 * @qp_num: The num of queue pairs.
3346 */
3347void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
3348{
3349 int i;
3350
3351 if (!qps || qp_num <= 0)
3352 return;
3353
3354 for (i = qp_num - 1; i >= 0; i--)
3355 hisi_qm_release_qp(qps[i]);
3356}
3357EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
3358
3359static void free_list(struct list_head *head)
3360{
3361 struct hisi_qm_resource *res, *tmp;
3362
3363 list_for_each_entry_safe(res, tmp, head, list) {
3364 list_del(&res->list);
3365 kfree(res);
3366 }
3367}
3368
3369static int hisi_qm_sort_devices(int node, struct list_head *head,
3370 struct hisi_qm_list *qm_list)
3371{
3372 struct hisi_qm_resource *res, *tmp;
3373 struct hisi_qm *qm;
3374 struct list_head *n;
3375 struct device *dev;
3376 int dev_node;
3377
3378 list_for_each_entry(qm, &qm_list->list, list) {
3379 dev = &qm->pdev->dev;
3380
3381 dev_node = dev_to_node(dev);
3382 if (dev_node < 0)
3383 dev_node = 0;
3384
3385 res = kzalloc(sizeof(*res), GFP_KERNEL);
3386 if (!res)
3387 return -ENOMEM;
3388
3389 res->qm = qm;
3390 res->distance = node_distance(dev_node, node);
3391 n = head;
3392 list_for_each_entry(tmp, head, list) {
3393 if (res->distance < tmp->distance) {
3394 n = &tmp->list;
3395 break;
3396 }
3397 }
3398 list_add_tail(&res->list, n);
3399 }
3400
3401 return 0;
3402}
3403
3404/**
3405 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3406 * @qm_list: The list of all available devices.
3407 * @qp_num: The number of queue pairs need created.
3408 * @alg_type: The algorithm type.
3409 * @node: The numa node.
3410 * @qps: The queue pairs need created.
3411 *
3412 * This function will sort all available device according to numa distance.
3413 * Then try to create all queue pairs from one device, if all devices do
3414 * not meet the requirements will return error.
3415 */
3416int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3417 u8 alg_type, int node, struct hisi_qp **qps)
3418{
3419 struct hisi_qm_resource *tmp;
3420 int ret = -ENODEV;
3421 LIST_HEAD(head);
3422 int i;
3423
3424 if (!qps || !qm_list || qp_num <= 0)
3425 return -EINVAL;
3426
3427 mutex_lock(&qm_list->lock);
3428 if (hisi_qm_sort_devices(node, &head, qm_list)) {
3429 mutex_unlock(&qm_list->lock);
3430 goto err;
3431 }
3432
3433 list_for_each_entry(tmp, &head, list) {
3434 for (i = 0; i < qp_num; i++) {
3435 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3436 if (IS_ERR(qps[i])) {
3437 hisi_qm_free_qps(qps, i);
3438 break;
3439 }
3440 }
3441
3442 if (i == qp_num) {
3443 ret = 0;
3444 break;
3445 }
3446 }
3447
3448 mutex_unlock(&qm_list->lock);
3449 if (ret)
3450 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
3451 node, alg_type, qp_num);
3452
3453err:
3454 free_list(&head);
3455 return ret;
3456}
3457EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3458
3459static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3460{
3461 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
3462 u32 max_qp_num = qm->max_qp_num;
3463 u32 q_base = qm->qp_num;
3464 int ret;
3465
3466 if (!num_vfs)
3467 return -EINVAL;
3468
3469 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3470
3471 /* If vfs_q_num is less than num_vfs, return error. */
3472 if (vfs_q_num < num_vfs)
3473 return -EINVAL;
3474
3475 q_num = vfs_q_num / num_vfs;
3476 remain_q_num = vfs_q_num % num_vfs;
3477
3478 for (i = num_vfs; i > 0; i--) {
3479 /*
3480 * if q_num + remain_q_num > max_qp_num in last vf, divide the
3481 * remaining queues equally.
3482 */
3483 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
3484 act_q_num = q_num + remain_q_num;
3485 remain_q_num = 0;
3486 } else if (remain_q_num > 0) {
3487 act_q_num = q_num + 1;
3488 remain_q_num--;
3489 } else {
3490 act_q_num = q_num;
3491 }
3492
3493 act_q_num = min(act_q_num, max_qp_num);
3494 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
3495 if (ret) {
3496 for (j = num_vfs; j > i; j--)
3497 hisi_qm_set_vft(qm, j, 0, 0);
3498 return ret;
3499 }
3500 q_base += act_q_num;
3501 }
3502
3503 return 0;
3504}
3505
3506static int qm_clear_vft_config(struct hisi_qm *qm)
3507{
3508 int ret;
3509 u32 i;
3510
3511 for (i = 1; i <= qm->vfs_num; i++) {
3512 ret = hisi_qm_set_vft(qm, i, 0, 0);
3513 if (ret)
3514 return ret;
3515 }
3516 qm->vfs_num = 0;
3517
3518 return 0;
3519}
3520
3521static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
3522{
3523 struct device *dev = &qm->pdev->dev;
3524 u32 ir = qos * QM_QOS_RATE;
3525 int ret, total_vfs, i;
3526
3527 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
3528 if (fun_index > total_vfs)
3529 return -EINVAL;
3530
3531 qm->factor[fun_index].func_qos = qos;
3532
3533 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
3534 if (ret) {
3535 dev_err(dev, "failed to calculate shaper parameter!\n");
3536 return -EINVAL;
3537 }
3538
3539 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
3540 /* The base number of queue reuse for different alg type */
3541 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3542 if (ret) {
3543 dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
3544 return -EINVAL;
3545 }
3546 }
3547
3548 return 0;
3549}
3550
3551static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
3552{
3553 u64 cir_u = 0, cir_b = 0, cir_s = 0;
3554 u64 shaper_vft, ir_calc, ir;
3555 unsigned int val;
3556 u32 error_rate;
3557 int ret;
3558
3559 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3560 val & BIT(0), POLL_PERIOD,
3561 POLL_TIMEOUT);
3562 if (ret)
3563 return 0;
3564
3565 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
3566 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
3567 writel(fun_index, qm->io_base + QM_VFT_CFG);
3568
3569 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
3570 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
3571
3572 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3573 val & BIT(0), POLL_PERIOD,
3574 POLL_TIMEOUT);
3575 if (ret)
3576 return 0;
3577
3578 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
3579 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
3580
3581 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
3582 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
3583 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
3584
3585 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
3586 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
3587
3588 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
3589
3590 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
3591
3592 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
3593 if (error_rate > QM_QOS_MIN_ERROR_RATE) {
3594 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
3595 return 0;
3596 }
3597
3598 return ir;
3599}
3600
3601static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
3602{
3603 struct device *dev = &qm->pdev->dev;
3604 u64 mb_cmd;
3605 u32 qos;
3606 int ret;
3607
3608 qos = qm_get_shaper_vft_qos(qm, fun_num);
3609 if (!qos) {
3610 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
3611 return;
3612 }
3613
3614 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
3615 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
3616 if (ret)
3617 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
3618}
3619
3620static int qm_vf_read_qos(struct hisi_qm *qm)
3621{
3622 int cnt = 0;
3623 int ret = -EINVAL;
3624
3625 /* reset mailbox qos val */
3626 qm->mb_qos = 0;
3627
3628 /* vf ping pf to get function qos */
3629 ret = qm_ping_pf(qm, QM_VF_GET_QOS);
3630 if (ret) {
3631 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
3632 return ret;
3633 }
3634
3635 while (true) {
3636 msleep(QM_WAIT_DST_ACK);
3637 if (qm->mb_qos)
3638 break;
3639
3640 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
3641 pci_err(qm->pdev, "PF ping VF timeout!\n");
3642 return -ETIMEDOUT;
3643 }
3644 }
3645
3646 return ret;
3647}
3648
3649static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
3650 size_t count, loff_t *pos)
3651{
3652 struct hisi_qm *qm = filp->private_data;
3653 char tbuf[QM_DBG_READ_LEN];
3654 u32 qos_val, ir;
3655 int ret;
3656
3657 ret = hisi_qm_get_dfx_access(qm);
3658 if (ret)
3659 return ret;
3660
3661 /* Mailbox and reset cannot be operated at the same time */
3662 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3663 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
3664 ret = -EAGAIN;
3665 goto err_put_dfx_access;
3666 }
3667
3668 if (qm->fun_type == QM_HW_PF) {
3669 ir = qm_get_shaper_vft_qos(qm, 0);
3670 } else {
3671 ret = qm_vf_read_qos(qm);
3672 if (ret)
3673 goto err_get_status;
3674 ir = qm->mb_qos;
3675 }
3676
3677 qos_val = ir / QM_QOS_RATE;
3678 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
3679
3680 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
3681
3682err_get_status:
3683 clear_bit(QM_RESETTING, &qm->misc_ctl);
3684err_put_dfx_access:
3685 hisi_qm_put_dfx_access(qm);
3686 return ret;
3687}
3688
3689static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
3690 unsigned long *val,
3691 unsigned int *fun_index)
3692{
3693 const struct bus_type *bus_type = qm->pdev->dev.bus;
3694 char tbuf_bdf[QM_DBG_READ_LEN] = {0};
3695 char val_buf[QM_DBG_READ_LEN] = {0};
3696 struct pci_dev *pdev;
3697 struct device *dev;
3698 int ret;
3699
3700 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
3701 if (ret != QM_QOS_PARAM_NUM)
3702 return -EINVAL;
3703
3704 ret = kstrtoul(val_buf, 10, val);
3705 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
3706 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
3707 return -EINVAL;
3708 }
3709
3710 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf);
3711 if (!dev) {
3712 pci_err(qm->pdev, "input pci bdf number is error!\n");
3713 return -ENODEV;
3714 }
3715
3716 pdev = container_of(dev, struct pci_dev, dev);
3717
3718 *fun_index = pdev->devfn;
3719
3720 return 0;
3721}
3722
3723static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
3724 size_t count, loff_t *pos)
3725{
3726 struct hisi_qm *qm = filp->private_data;
3727 char tbuf[QM_DBG_READ_LEN];
3728 unsigned int fun_index;
3729 unsigned long val;
3730 int len, ret;
3731
3732 if (*pos != 0)
3733 return 0;
3734
3735 if (count >= QM_DBG_READ_LEN)
3736 return -ENOSPC;
3737
3738 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
3739 if (len < 0)
3740 return len;
3741
3742 tbuf[len] = '\0';
3743 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
3744 if (ret)
3745 return ret;
3746
3747 /* Mailbox and reset cannot be operated at the same time */
3748 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3749 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
3750 return -EAGAIN;
3751 }
3752
3753 ret = qm_pm_get_sync(qm);
3754 if (ret) {
3755 ret = -EINVAL;
3756 goto err_get_status;
3757 }
3758
3759 ret = qm_func_shaper_enable(qm, fun_index, val);
3760 if (ret) {
3761 pci_err(qm->pdev, "failed to enable function shaper!\n");
3762 ret = -EINVAL;
3763 goto err_put_sync;
3764 }
3765
3766 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
3767 fun_index, val);
3768 ret = count;
3769
3770err_put_sync:
3771 qm_pm_put_sync(qm);
3772err_get_status:
3773 clear_bit(QM_RESETTING, &qm->misc_ctl);
3774 return ret;
3775}
3776
3777static const struct file_operations qm_algqos_fops = {
3778 .owner = THIS_MODULE,
3779 .open = simple_open,
3780 .read = qm_algqos_read,
3781 .write = qm_algqos_write,
3782};
3783
3784/**
3785 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
3786 * @qm: The qm for which we want to add debugfs files.
3787 *
3788 * Create function qos debugfs files, VF ping PF to get function qos.
3789 */
3790void hisi_qm_set_algqos_init(struct hisi_qm *qm)
3791{
3792 if (qm->fun_type == QM_HW_PF)
3793 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
3794 qm, &qm_algqos_fops);
3795 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3796 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
3797 qm, &qm_algqos_fops);
3798}
3799
3800static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
3801{
3802 int i;
3803
3804 for (i = 1; i <= total_func; i++)
3805 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
3806}
3807
3808/**
3809 * hisi_qm_sriov_enable() - enable virtual functions
3810 * @pdev: the PCIe device
3811 * @max_vfs: the number of virtual functions to enable
3812 *
3813 * Returns the number of enabled VFs. If there are VFs enabled already or
3814 * max_vfs is more than the total number of device can be enabled, returns
3815 * failure.
3816 */
3817int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3818{
3819 struct hisi_qm *qm = pci_get_drvdata(pdev);
3820 int pre_existing_vfs, num_vfs, total_vfs, ret;
3821
3822 ret = qm_pm_get_sync(qm);
3823 if (ret)
3824 return ret;
3825
3826 total_vfs = pci_sriov_get_totalvfs(pdev);
3827 pre_existing_vfs = pci_num_vf(pdev);
3828 if (pre_existing_vfs) {
3829 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3830 pre_existing_vfs);
3831 goto err_put_sync;
3832 }
3833
3834 if (max_vfs > total_vfs) {
3835 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs);
3836 ret = -ERANGE;
3837 goto err_put_sync;
3838 }
3839
3840 num_vfs = max_vfs;
3841
3842 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3843 hisi_qm_init_vf_qos(qm, num_vfs);
3844
3845 ret = qm_vf_q_assign(qm, num_vfs);
3846 if (ret) {
3847 pci_err(pdev, "Can't assign queues for VF!\n");
3848 goto err_put_sync;
3849 }
3850
3851 qm->vfs_num = num_vfs;
3852
3853 ret = pci_enable_sriov(pdev, num_vfs);
3854 if (ret) {
3855 pci_err(pdev, "Can't enable VF!\n");
3856 qm_clear_vft_config(qm);
3857 goto err_put_sync;
3858 }
3859
3860 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3861
3862 return num_vfs;
3863
3864err_put_sync:
3865 qm_pm_put_sync(qm);
3866 return ret;
3867}
3868EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3869
3870/**
3871 * hisi_qm_sriov_disable - disable virtual functions
3872 * @pdev: the PCI device.
3873 * @is_frozen: true when all the VFs are frozen.
3874 *
3875 * Return failure if there are VFs assigned already or VF is in used.
3876 */
3877int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
3878{
3879 struct hisi_qm *qm = pci_get_drvdata(pdev);
3880 int ret;
3881
3882 if (pci_vfs_assigned(pdev)) {
3883 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3884 return -EPERM;
3885 }
3886
3887 /* While VF is in used, SRIOV cannot be disabled. */
3888 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3889 pci_err(pdev, "Task is using its VF!\n");
3890 return -EBUSY;
3891 }
3892
3893 pci_disable_sriov(pdev);
3894
3895 ret = qm_clear_vft_config(qm);
3896 if (ret)
3897 return ret;
3898
3899 qm_pm_put_sync(qm);
3900
3901 return 0;
3902}
3903EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3904
3905/**
3906 * hisi_qm_sriov_configure - configure the number of VFs
3907 * @pdev: The PCI device
3908 * @num_vfs: The number of VFs need enabled
3909 *
3910 * Enable SR-IOV according to num_vfs, 0 means disable.
3911 */
3912int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3913{
3914 if (num_vfs == 0)
3915 return hisi_qm_sriov_disable(pdev, false);
3916 else
3917 return hisi_qm_sriov_enable(pdev, num_vfs);
3918}
3919EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3920
3921static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3922{
3923 u32 err_sts;
3924
3925 if (!qm->err_ini->get_dev_hw_err_status) {
3926 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3927 return ACC_ERR_NONE;
3928 }
3929
3930 /* get device hardware error status */
3931 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3932 if (err_sts) {
3933 if (err_sts & qm->err_info.ecc_2bits_mask)
3934 qm->err_status.is_dev_ecc_mbit = true;
3935
3936 if (qm->err_ini->log_dev_hw_err)
3937 qm->err_ini->log_dev_hw_err(qm, err_sts);
3938
3939 if (err_sts & qm->err_info.dev_reset_mask)
3940 return ACC_ERR_NEED_RESET;
3941
3942 if (qm->err_ini->clear_dev_hw_err_status)
3943 qm->err_ini->clear_dev_hw_err_status(qm, err_sts);
3944 }
3945
3946 return ACC_ERR_RECOVERED;
3947}
3948
3949static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3950{
3951 enum acc_err_result qm_ret, dev_ret;
3952
3953 /* log qm error */
3954 qm_ret = qm_hw_error_handle(qm);
3955
3956 /* log device error */
3957 dev_ret = qm_dev_err_handle(qm);
3958
3959 return (qm_ret == ACC_ERR_NEED_RESET ||
3960 dev_ret == ACC_ERR_NEED_RESET) ?
3961 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
3962}
3963
3964/**
3965 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3966 * @pdev: The PCI device which need report error.
3967 * @state: The connectivity between CPU and device.
3968 *
3969 * We register this function into PCIe AER handlers, It will report device or
3970 * qm hardware error status when error occur.
3971 */
3972pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3973 pci_channel_state_t state)
3974{
3975 struct hisi_qm *qm = pci_get_drvdata(pdev);
3976 enum acc_err_result ret;
3977
3978 if (pdev->is_virtfn)
3979 return PCI_ERS_RESULT_NONE;
3980
3981 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
3982 if (state == pci_channel_io_perm_failure)
3983 return PCI_ERS_RESULT_DISCONNECT;
3984
3985 ret = qm_process_dev_error(qm);
3986 if (ret == ACC_ERR_NEED_RESET)
3987 return PCI_ERS_RESULT_NEED_RESET;
3988
3989 return PCI_ERS_RESULT_RECOVERED;
3990}
3991EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3992
3993static int qm_check_req_recv(struct hisi_qm *qm)
3994{
3995 struct pci_dev *pdev = qm->pdev;
3996 int ret;
3997 u32 val;
3998
3999 if (qm->ver >= QM_HW_V3)
4000 return 0;
4001
4002 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4003 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4004 (val == ACC_VENDOR_ID_VALUE),
4005 POLL_PERIOD, POLL_TIMEOUT);
4006 if (ret) {
4007 dev_err(&pdev->dev, "Fails to read QM reg!\n");
4008 return ret;
4009 }
4010
4011 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4012 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4013 (val == PCI_VENDOR_ID_HUAWEI),
4014 POLL_PERIOD, POLL_TIMEOUT);
4015 if (ret)
4016 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
4017
4018 return ret;
4019}
4020
4021static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4022{
4023 struct pci_dev *pdev = qm->pdev;
4024 u16 cmd;
4025 int i;
4026
4027 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4028 if (set)
4029 cmd |= PCI_COMMAND_MEMORY;
4030 else
4031 cmd &= ~PCI_COMMAND_MEMORY;
4032
4033 pci_write_config_word(pdev, PCI_COMMAND, cmd);
4034 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4035 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4036 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
4037 return 0;
4038
4039 udelay(1);
4040 }
4041
4042 return -ETIMEDOUT;
4043}
4044
4045static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
4046{
4047 struct pci_dev *pdev = qm->pdev;
4048 u16 sriov_ctrl;
4049 int pos;
4050 int i;
4051
4052 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4053 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4054 if (set)
4055 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
4056 else
4057 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
4058 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
4059
4060 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4061 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
4062 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
4063 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
4064 return 0;
4065
4066 udelay(1);
4067 }
4068
4069 return -ETIMEDOUT;
4070}
4071
4072static int qm_vf_reset_prepare(struct hisi_qm *qm,
4073 enum qm_stop_reason stop_reason)
4074{
4075 struct hisi_qm_list *qm_list = qm->qm_list;
4076 struct pci_dev *pdev = qm->pdev;
4077 struct pci_dev *virtfn;
4078 struct hisi_qm *vf_qm;
4079 int ret = 0;
4080
4081 mutex_lock(&qm_list->lock);
4082 list_for_each_entry(vf_qm, &qm_list->list, list) {
4083 virtfn = vf_qm->pdev;
4084 if (virtfn == pdev)
4085 continue;
4086
4087 if (pci_physfn(virtfn) == pdev) {
4088 /* save VFs PCIE BAR configuration */
4089 pci_save_state(virtfn);
4090
4091 ret = hisi_qm_stop(vf_qm, stop_reason);
4092 if (ret)
4093 goto stop_fail;
4094 }
4095 }
4096
4097stop_fail:
4098 mutex_unlock(&qm_list->lock);
4099 return ret;
4100}
4101
4102static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
4103 enum qm_stop_reason stop_reason)
4104{
4105 struct pci_dev *pdev = qm->pdev;
4106 int ret;
4107
4108 if (!qm->vfs_num)
4109 return 0;
4110
4111 /* Kunpeng930 supports to notify VFs to stop before PF reset */
4112 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4113 ret = qm_ping_all_vfs(qm, cmd);
4114 if (ret)
4115 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
4116 } else {
4117 ret = qm_vf_reset_prepare(qm, stop_reason);
4118 if (ret)
4119 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
4120 }
4121
4122 return ret;
4123}
4124
4125static int qm_controller_reset_prepare(struct hisi_qm *qm)
4126{
4127 struct pci_dev *pdev = qm->pdev;
4128 int ret;
4129
4130 ret = qm_reset_prepare_ready(qm);
4131 if (ret) {
4132 pci_err(pdev, "Controller reset not ready!\n");
4133 return ret;
4134 }
4135
4136 /* PF obtains the information of VF by querying the register. */
4137 qm_cmd_uninit(qm);
4138
4139 /* Whether VFs stop successfully, soft reset will continue. */
4140 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4141 if (ret)
4142 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
4143
4144 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4145 if (ret) {
4146 pci_err(pdev, "Fails to stop QM!\n");
4147 qm_reset_bit_clear(qm);
4148 return ret;
4149 }
4150
4151 if (qm->use_sva) {
4152 ret = qm_hw_err_isolate(qm);
4153 if (ret)
4154 pci_err(pdev, "failed to isolate hw err!\n");
4155 }
4156
4157 ret = qm_wait_vf_prepare_finish(qm);
4158 if (ret)
4159 pci_err(pdev, "failed to stop by vfs in soft reset!\n");
4160
4161 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4162
4163 return 0;
4164}
4165
4166static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4167{
4168 u32 nfe_enb = 0;
4169
4170 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
4171 if (qm->ver >= QM_HW_V3)
4172 return;
4173
4174 if (!qm->err_status.is_dev_ecc_mbit &&
4175 qm->err_status.is_qm_ecc_mbit &&
4176 qm->err_ini->close_axi_master_ooo) {
4177 qm->err_ini->close_axi_master_ooo(qm);
4178 } else if (qm->err_status.is_dev_ecc_mbit &&
4179 !qm->err_status.is_qm_ecc_mbit &&
4180 !qm->err_ini->close_axi_master_ooo) {
4181 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4182 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
4183 qm->io_base + QM_RAS_NFE_ENABLE);
4184 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4185 }
4186}
4187
4188static int qm_soft_reset(struct hisi_qm *qm)
4189{
4190 struct pci_dev *pdev = qm->pdev;
4191 int ret;
4192 u32 val;
4193
4194 /* Ensure all doorbells and mailboxes received by QM */
4195 ret = qm_check_req_recv(qm);
4196 if (ret)
4197 return ret;
4198
4199 if (qm->vfs_num) {
4200 ret = qm_set_vf_mse(qm, false);
4201 if (ret) {
4202 pci_err(pdev, "Fails to disable vf MSE bit.\n");
4203 return ret;
4204 }
4205 }
4206
4207 ret = qm->ops->set_msi(qm, false);
4208 if (ret) {
4209 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
4210 return ret;
4211 }
4212
4213 qm_dev_ecc_mbit_handle(qm);
4214
4215 /* OOO register set and check */
4216 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
4217 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4218
4219 /* If bus lock, reset chip */
4220 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4221 val,
4222 (val == ACC_MASTER_TRANS_RETURN_RW),
4223 POLL_PERIOD, POLL_TIMEOUT);
4224 if (ret) {
4225 pci_emerg(pdev, "Bus lock! Please reset system.\n");
4226 return ret;
4227 }
4228
4229 if (qm->err_ini->close_sva_prefetch)
4230 qm->err_ini->close_sva_prefetch(qm);
4231
4232 ret = qm_set_pf_mse(qm, false);
4233 if (ret) {
4234 pci_err(pdev, "Fails to disable pf MSE bit.\n");
4235 return ret;
4236 }
4237
4238 /* The reset related sub-control registers are not in PCI BAR */
4239 if (ACPI_HANDLE(&pdev->dev)) {
4240 unsigned long long value = 0;
4241 acpi_status s;
4242
4243 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
4244 qm->err_info.acpi_rst,
4245 NULL, &value);
4246 if (ACPI_FAILURE(s)) {
4247 pci_err(pdev, "NO controller reset method!\n");
4248 return -EIO;
4249 }
4250
4251 if (value) {
4252 pci_err(pdev, "Reset step %llu failed!\n", value);
4253 return -EIO;
4254 }
4255 } else {
4256 pci_err(pdev, "No reset method!\n");
4257 return -EINVAL;
4258 }
4259
4260 return 0;
4261}
4262
4263static int qm_vf_reset_done(struct hisi_qm *qm)
4264{
4265 struct hisi_qm_list *qm_list = qm->qm_list;
4266 struct pci_dev *pdev = qm->pdev;
4267 struct pci_dev *virtfn;
4268 struct hisi_qm *vf_qm;
4269 int ret = 0;
4270
4271 mutex_lock(&qm_list->lock);
4272 list_for_each_entry(vf_qm, &qm_list->list, list) {
4273 virtfn = vf_qm->pdev;
4274 if (virtfn == pdev)
4275 continue;
4276
4277 if (pci_physfn(virtfn) == pdev) {
4278 /* enable VFs PCIE BAR configuration */
4279 pci_restore_state(virtfn);
4280
4281 ret = qm_restart(vf_qm);
4282 if (ret)
4283 goto restart_fail;
4284 }
4285 }
4286
4287restart_fail:
4288 mutex_unlock(&qm_list->lock);
4289 return ret;
4290}
4291
4292static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
4293{
4294 struct pci_dev *pdev = qm->pdev;
4295 int ret;
4296
4297 if (!qm->vfs_num)
4298 return 0;
4299
4300 ret = qm_vf_q_assign(qm, qm->vfs_num);
4301 if (ret) {
4302 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
4303 return ret;
4304 }
4305
4306 /* Kunpeng930 supports to notify VFs to start after PF reset. */
4307 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4308 ret = qm_ping_all_vfs(qm, cmd);
4309 if (ret)
4310 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
4311 } else {
4312 ret = qm_vf_reset_done(qm);
4313 if (ret)
4314 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
4315 }
4316
4317 return ret;
4318}
4319
4320static int qm_dev_hw_init(struct hisi_qm *qm)
4321{
4322 return qm->err_ini->hw_init(qm);
4323}
4324
4325static void qm_restart_prepare(struct hisi_qm *qm)
4326{
4327 u32 value;
4328
4329 if (qm->err_ini->open_sva_prefetch)
4330 qm->err_ini->open_sva_prefetch(qm);
4331
4332 if (qm->ver >= QM_HW_V3)
4333 return;
4334
4335 if (!qm->err_status.is_qm_ecc_mbit &&
4336 !qm->err_status.is_dev_ecc_mbit)
4337 return;
4338
4339 /* temporarily close the OOO port used for PEH to write out MSI */
4340 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4341 writel(value & ~qm->err_info.msi_wr_port,
4342 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4343
4344 /* clear dev ecc 2bit error source if having */
4345 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
4346 if (value && qm->err_ini->clear_dev_hw_err_status)
4347 qm->err_ini->clear_dev_hw_err_status(qm, value);
4348
4349 /* clear QM ecc mbit error source */
4350 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4351
4352 /* clear AM Reorder Buffer ecc mbit source */
4353 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
4354}
4355
4356static void qm_restart_done(struct hisi_qm *qm)
4357{
4358 u32 value;
4359
4360 if (qm->ver >= QM_HW_V3)
4361 goto clear_flags;
4362
4363 if (!qm->err_status.is_qm_ecc_mbit &&
4364 !qm->err_status.is_dev_ecc_mbit)
4365 return;
4366
4367 /* open the OOO port for PEH to write out MSI */
4368 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4369 value |= qm->err_info.msi_wr_port;
4370 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4371
4372clear_flags:
4373 qm->err_status.is_qm_ecc_mbit = false;
4374 qm->err_status.is_dev_ecc_mbit = false;
4375}
4376
4377static int qm_controller_reset_done(struct hisi_qm *qm)
4378{
4379 struct pci_dev *pdev = qm->pdev;
4380 int ret;
4381
4382 ret = qm->ops->set_msi(qm, true);
4383 if (ret) {
4384 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
4385 return ret;
4386 }
4387
4388 ret = qm_set_pf_mse(qm, true);
4389 if (ret) {
4390 pci_err(pdev, "Fails to enable pf MSE bit!\n");
4391 return ret;
4392 }
4393
4394 if (qm->vfs_num) {
4395 ret = qm_set_vf_mse(qm, true);
4396 if (ret) {
4397 pci_err(pdev, "Fails to enable vf MSE bit!\n");
4398 return ret;
4399 }
4400 }
4401
4402 ret = qm_dev_hw_init(qm);
4403 if (ret) {
4404 pci_err(pdev, "Failed to init device\n");
4405 return ret;
4406 }
4407
4408 qm_restart_prepare(qm);
4409 hisi_qm_dev_err_init(qm);
4410 if (qm->err_ini->open_axi_master_ooo)
4411 qm->err_ini->open_axi_master_ooo(qm);
4412
4413 ret = qm_dev_mem_reset(qm);
4414 if (ret) {
4415 pci_err(pdev, "failed to reset device memory\n");
4416 return ret;
4417 }
4418
4419 ret = qm_restart(qm);
4420 if (ret) {
4421 pci_err(pdev, "Failed to start QM!\n");
4422 return ret;
4423 }
4424
4425 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4426 if (ret)
4427 pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
4428
4429 ret = qm_wait_vf_prepare_finish(qm);
4430 if (ret)
4431 pci_err(pdev, "failed to start by vfs in soft reset!\n");
4432
4433 qm_cmd_init(qm);
4434 qm_restart_done(qm);
4435
4436 qm_reset_bit_clear(qm);
4437
4438 return 0;
4439}
4440
4441static int qm_controller_reset(struct hisi_qm *qm)
4442{
4443 struct pci_dev *pdev = qm->pdev;
4444 int ret;
4445
4446 pci_info(pdev, "Controller resetting...\n");
4447
4448 ret = qm_controller_reset_prepare(qm);
4449 if (ret) {
4450 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4451 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4452 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4453 return ret;
4454 }
4455
4456 hisi_qm_show_last_dfx_regs(qm);
4457 if (qm->err_ini->show_last_dfx_regs)
4458 qm->err_ini->show_last_dfx_regs(qm);
4459
4460 ret = qm_soft_reset(qm);
4461 if (ret)
4462 goto err_reset;
4463
4464 ret = qm_controller_reset_done(qm);
4465 if (ret)
4466 goto err_reset;
4467
4468 pci_info(pdev, "Controller reset complete\n");
4469
4470 return 0;
4471
4472err_reset:
4473 pci_err(pdev, "Controller reset failed (%d)\n", ret);
4474 qm_reset_bit_clear(qm);
4475
4476 /* if resetting fails, isolate the device */
4477 if (qm->use_sva)
4478 qm->isolate_data.is_isolate = true;
4479 return ret;
4480}
4481
4482/**
4483 * hisi_qm_dev_slot_reset() - slot reset
4484 * @pdev: the PCIe device
4485 *
4486 * This function offers QM relate PCIe device reset interface. Drivers which
4487 * use QM can use this function as slot_reset in its struct pci_error_handlers.
4488 */
4489pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
4490{
4491 struct hisi_qm *qm = pci_get_drvdata(pdev);
4492 int ret;
4493
4494 if (pdev->is_virtfn)
4495 return PCI_ERS_RESULT_RECOVERED;
4496
4497 /* reset pcie device controller */
4498 ret = qm_controller_reset(qm);
4499 if (ret) {
4500 pci_err(pdev, "Controller reset failed (%d)\n", ret);
4501 return PCI_ERS_RESULT_DISCONNECT;
4502 }
4503
4504 return PCI_ERS_RESULT_RECOVERED;
4505}
4506EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
4507
4508void hisi_qm_reset_prepare(struct pci_dev *pdev)
4509{
4510 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4511 struct hisi_qm *qm = pci_get_drvdata(pdev);
4512 u32 delay = 0;
4513 int ret;
4514
4515 hisi_qm_dev_err_uninit(pf_qm);
4516
4517 /*
4518 * Check whether there is an ECC mbit error, If it occurs, need to
4519 * wait for soft reset to fix it.
4520 */
4521 while (qm_check_dev_error(pf_qm)) {
4522 msleep(++delay);
4523 if (delay > QM_RESET_WAIT_TIMEOUT)
4524 return;
4525 }
4526
4527 ret = qm_reset_prepare_ready(qm);
4528 if (ret) {
4529 pci_err(pdev, "FLR not ready!\n");
4530 return;
4531 }
4532
4533 /* PF obtains the information of VF by querying the register. */
4534 if (qm->fun_type == QM_HW_PF)
4535 qm_cmd_uninit(qm);
4536
4537 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
4538 if (ret)
4539 pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
4540
4541 ret = hisi_qm_stop(qm, QM_FLR);
4542 if (ret) {
4543 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
4544 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4545 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4546 return;
4547 }
4548
4549 ret = qm_wait_vf_prepare_finish(qm);
4550 if (ret)
4551 pci_err(pdev, "failed to stop by vfs in FLR!\n");
4552
4553 pci_info(pdev, "FLR resetting...\n");
4554}
4555EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
4556
4557static bool qm_flr_reset_complete(struct pci_dev *pdev)
4558{
4559 struct pci_dev *pf_pdev = pci_physfn(pdev);
4560 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
4561 u32 id;
4562
4563 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
4564 if (id == QM_PCI_COMMAND_INVALID) {
4565 pci_err(pdev, "Device can not be used!\n");
4566 return false;
4567 }
4568
4569 return true;
4570}
4571
4572void hisi_qm_reset_done(struct pci_dev *pdev)
4573{
4574 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
4575 struct hisi_qm *qm = pci_get_drvdata(pdev);
4576 int ret;
4577
4578 if (qm->fun_type == QM_HW_PF) {
4579 ret = qm_dev_hw_init(qm);
4580 if (ret) {
4581 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
4582 goto flr_done;
4583 }
4584 }
4585
4586 hisi_qm_dev_err_init(pf_qm);
4587
4588 ret = qm_restart(qm);
4589 if (ret) {
4590 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
4591 goto flr_done;
4592 }
4593
4594 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4595 if (ret)
4596 pci_err(pdev, "failed to start vfs by pf in FLR.\n");
4597
4598 ret = qm_wait_vf_prepare_finish(qm);
4599 if (ret)
4600 pci_err(pdev, "failed to start by vfs in FLR!\n");
4601
4602flr_done:
4603 if (qm->fun_type == QM_HW_PF)
4604 qm_cmd_init(qm);
4605
4606 if (qm_flr_reset_complete(pdev))
4607 pci_info(pdev, "FLR reset complete\n");
4608
4609 qm_reset_bit_clear(qm);
4610}
4611EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
4612
4613static irqreturn_t qm_abnormal_irq(int irq, void *data)
4614{
4615 struct hisi_qm *qm = data;
4616 enum acc_err_result ret;
4617
4618 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
4619 ret = qm_process_dev_error(qm);
4620 if (ret == ACC_ERR_NEED_RESET &&
4621 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
4622 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
4623 schedule_work(&qm->rst_work);
4624
4625 return IRQ_HANDLED;
4626}
4627
4628/**
4629 * hisi_qm_dev_shutdown() - Shutdown device.
4630 * @pdev: The device will be shutdown.
4631 *
4632 * This function will stop qm when OS shutdown or rebooting.
4633 */
4634void hisi_qm_dev_shutdown(struct pci_dev *pdev)
4635{
4636 struct hisi_qm *qm = pci_get_drvdata(pdev);
4637 int ret;
4638
4639 ret = hisi_qm_stop(qm, QM_NORMAL);
4640 if (ret)
4641 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
4642}
4643EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
4644
4645static void hisi_qm_controller_reset(struct work_struct *rst_work)
4646{
4647 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
4648 int ret;
4649
4650 ret = qm_pm_get_sync(qm);
4651 if (ret) {
4652 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4653 return;
4654 }
4655
4656 /* reset pcie device controller */
4657 ret = qm_controller_reset(qm);
4658 if (ret)
4659 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
4660
4661 qm_pm_put_sync(qm);
4662}
4663
4664static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
4665 enum qm_stop_reason stop_reason)
4666{
4667 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
4668 struct pci_dev *pdev = qm->pdev;
4669 int ret;
4670
4671 ret = qm_reset_prepare_ready(qm);
4672 if (ret) {
4673 dev_err(&pdev->dev, "reset prepare not ready!\n");
4674 atomic_set(&qm->status.flags, QM_STOP);
4675 cmd = QM_VF_PREPARE_FAIL;
4676 goto err_prepare;
4677 }
4678
4679 ret = hisi_qm_stop(qm, stop_reason);
4680 if (ret) {
4681 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
4682 atomic_set(&qm->status.flags, QM_STOP);
4683 cmd = QM_VF_PREPARE_FAIL;
4684 goto err_prepare;
4685 } else {
4686 goto out;
4687 }
4688
4689err_prepare:
4690 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4691 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4692out:
4693 pci_save_state(pdev);
4694 ret = qm_ping_pf(qm, cmd);
4695 if (ret)
4696 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
4697}
4698
4699static void qm_pf_reset_vf_done(struct hisi_qm *qm)
4700{
4701 enum qm_mb_cmd cmd = QM_VF_START_DONE;
4702 struct pci_dev *pdev = qm->pdev;
4703 int ret;
4704
4705 pci_restore_state(pdev);
4706 ret = hisi_qm_start(qm);
4707 if (ret) {
4708 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
4709 cmd = QM_VF_START_FAIL;
4710 }
4711
4712 qm_cmd_init(qm);
4713 ret = qm_ping_pf(qm, cmd);
4714 if (ret)
4715 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
4716
4717 qm_reset_bit_clear(qm);
4718}
4719
4720static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
4721{
4722 struct device *dev = &qm->pdev->dev;
4723 u32 val, cmd;
4724 u64 msg;
4725 int ret;
4726
4727 /* Wait for reset to finish */
4728 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
4729 val == BIT(0), QM_VF_RESET_WAIT_US,
4730 QM_VF_RESET_WAIT_TIMEOUT_US);
4731 /* hardware completion status should be available by this time */
4732 if (ret) {
4733 dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
4734 return -ETIMEDOUT;
4735 }
4736
4737 /*
4738 * Whether message is got successfully,
4739 * VF needs to ack PF by clearing the interrupt.
4740 */
4741 ret = qm_get_mb_cmd(qm, &msg, 0);
4742 qm_clear_cmd_interrupt(qm, 0);
4743 if (ret) {
4744 dev_err(dev, "failed to get msg from PF in reset done!\n");
4745 return ret;
4746 }
4747
4748 cmd = msg & QM_MB_CMD_DATA_MASK;
4749 if (cmd != QM_PF_RESET_DONE) {
4750 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
4751 ret = -EINVAL;
4752 }
4753
4754 return ret;
4755}
4756
4757static void qm_pf_reset_vf_process(struct hisi_qm *qm,
4758 enum qm_stop_reason stop_reason)
4759{
4760 struct device *dev = &qm->pdev->dev;
4761 int ret;
4762
4763 dev_info(dev, "device reset start...\n");
4764
4765 /* The message is obtained by querying the register during resetting */
4766 qm_cmd_uninit(qm);
4767 qm_pf_reset_vf_prepare(qm, stop_reason);
4768
4769 ret = qm_wait_pf_reset_finish(qm);
4770 if (ret)
4771 goto err_get_status;
4772
4773 qm_pf_reset_vf_done(qm);
4774
4775 dev_info(dev, "device reset done.\n");
4776
4777 return;
4778
4779err_get_status:
4780 qm_cmd_init(qm);
4781 qm_reset_bit_clear(qm);
4782}
4783
4784static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
4785{
4786 struct device *dev = &qm->pdev->dev;
4787 u64 msg;
4788 u32 cmd;
4789 int ret;
4790
4791 /*
4792 * Get the msg from source by sending mailbox. Whether message is got
4793 * successfully, destination needs to ack source by clearing the interrupt.
4794 */
4795 ret = qm_get_mb_cmd(qm, &msg, fun_num);
4796 qm_clear_cmd_interrupt(qm, BIT(fun_num));
4797 if (ret) {
4798 dev_err(dev, "failed to get msg from source!\n");
4799 return;
4800 }
4801
4802 cmd = msg & QM_MB_CMD_DATA_MASK;
4803 switch (cmd) {
4804 case QM_PF_FLR_PREPARE:
4805 qm_pf_reset_vf_process(qm, QM_FLR);
4806 break;
4807 case QM_PF_SRST_PREPARE:
4808 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
4809 break;
4810 case QM_VF_GET_QOS:
4811 qm_vf_get_qos(qm, fun_num);
4812 break;
4813 case QM_PF_SET_QOS:
4814 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
4815 break;
4816 default:
4817 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
4818 break;
4819 }
4820}
4821
4822static void qm_cmd_process(struct work_struct *cmd_process)
4823{
4824 struct hisi_qm *qm = container_of(cmd_process,
4825 struct hisi_qm, cmd_process);
4826 u32 vfs_num = qm->vfs_num;
4827 u64 val;
4828 u32 i;
4829
4830 if (qm->fun_type == QM_HW_PF) {
4831 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
4832 if (!val)
4833 return;
4834
4835 for (i = 1; i <= vfs_num; i++) {
4836 if (val & BIT(i))
4837 qm_handle_cmd_msg(qm, i);
4838 }
4839
4840 return;
4841 }
4842
4843 qm_handle_cmd_msg(qm, 0);
4844}
4845
4846/**
4847 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
4848 * @qm: The qm needs add.
4849 * @qm_list: The qm list.
4850 *
4851 * This function adds qm to qm list, and will register algorithm to
4852 * crypto when the qm list is empty.
4853 */
4854int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
4855{
4856 struct device *dev = &qm->pdev->dev;
4857 int flag = 0;
4858 int ret = 0;
4859
4860 mutex_lock(&qm_list->lock);
4861 if (list_empty(&qm_list->list))
4862 flag = 1;
4863 list_add_tail(&qm->list, &qm_list->list);
4864 mutex_unlock(&qm_list->lock);
4865
4866 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
4867 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
4868 return 0;
4869 }
4870
4871 if (flag) {
4872 ret = qm_list->register_to_crypto(qm);
4873 if (ret) {
4874 mutex_lock(&qm_list->lock);
4875 list_del(&qm->list);
4876 mutex_unlock(&qm_list->lock);
4877 }
4878 }
4879
4880 return ret;
4881}
4882EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
4883
4884/**
4885 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
4886 * qm list.
4887 * @qm: The qm needs delete.
4888 * @qm_list: The qm list.
4889 *
4890 * This function deletes qm from qm list, and will unregister algorithm
4891 * from crypto when the qm list is empty.
4892 */
4893void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
4894{
4895 mutex_lock(&qm_list->lock);
4896 list_del(&qm->list);
4897 mutex_unlock(&qm_list->lock);
4898
4899 if (qm->ver <= QM_HW_V2 && qm->use_sva)
4900 return;
4901
4902 if (list_empty(&qm_list->list))
4903 qm_list->unregister_from_crypto(qm);
4904}
4905EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
4906
4907static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
4908{
4909 struct pci_dev *pdev = qm->pdev;
4910 u32 irq_vector, val;
4911
4912 if (qm->fun_type == QM_HW_VF)
4913 return;
4914
4915 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
4916 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
4917 return;
4918
4919 irq_vector = val & QM_IRQ_VECTOR_MASK;
4920 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4921}
4922
4923static int qm_register_abnormal_irq(struct hisi_qm *qm)
4924{
4925 struct pci_dev *pdev = qm->pdev;
4926 u32 irq_vector, val;
4927 int ret;
4928
4929 if (qm->fun_type == QM_HW_VF)
4930 return 0;
4931
4932 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver);
4933 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
4934 return 0;
4935
4936 irq_vector = val & QM_IRQ_VECTOR_MASK;
4937 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
4938 if (ret)
4939 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
4940
4941 return ret;
4942}
4943
4944static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
4945{
4946 struct pci_dev *pdev = qm->pdev;
4947 u32 irq_vector, val;
4948
4949 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
4950 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4951 return;
4952
4953 irq_vector = val & QM_IRQ_VECTOR_MASK;
4954 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4955}
4956
4957static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
4958{
4959 struct pci_dev *pdev = qm->pdev;
4960 u32 irq_vector, val;
4961 int ret;
4962
4963 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver);
4964 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4965 return 0;
4966
4967 irq_vector = val & QM_IRQ_VECTOR_MASK;
4968 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
4969 if (ret)
4970 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret);
4971
4972 return ret;
4973}
4974
4975static void qm_unregister_aeq_irq(struct hisi_qm *qm)
4976{
4977 struct pci_dev *pdev = qm->pdev;
4978 u32 irq_vector, val;
4979
4980 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
4981 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4982 return;
4983
4984 irq_vector = val & QM_IRQ_VECTOR_MASK;
4985 free_irq(pci_irq_vector(pdev, irq_vector), qm);
4986}
4987
4988static int qm_register_aeq_irq(struct hisi_qm *qm)
4989{
4990 struct pci_dev *pdev = qm->pdev;
4991 u32 irq_vector, val;
4992 int ret;
4993
4994 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver);
4995 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
4996 return 0;
4997
4998 irq_vector = val & QM_IRQ_VECTOR_MASK;
4999 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
5000 qm_aeq_thread, 0, qm->dev_name, qm);
5001 if (ret)
5002 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
5003
5004 return ret;
5005}
5006
5007static void qm_unregister_eq_irq(struct hisi_qm *qm)
5008{
5009 struct pci_dev *pdev = qm->pdev;
5010 u32 irq_vector, val;
5011
5012 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
5013 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5014 return;
5015
5016 irq_vector = val & QM_IRQ_VECTOR_MASK;
5017 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5018}
5019
5020static int qm_register_eq_irq(struct hisi_qm *qm)
5021{
5022 struct pci_dev *pdev = qm->pdev;
5023 u32 irq_vector, val;
5024 int ret;
5025
5026 val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver);
5027 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
5028 return 0;
5029
5030 irq_vector = val & QM_IRQ_VECTOR_MASK;
5031 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
5032 if (ret)
5033 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
5034
5035 return ret;
5036}
5037
5038static void qm_irqs_unregister(struct hisi_qm *qm)
5039{
5040 qm_unregister_mb_cmd_irq(qm);
5041 qm_unregister_abnormal_irq(qm);
5042 qm_unregister_aeq_irq(qm);
5043 qm_unregister_eq_irq(qm);
5044}
5045
5046static int qm_irqs_register(struct hisi_qm *qm)
5047{
5048 int ret;
5049
5050 ret = qm_register_eq_irq(qm);
5051 if (ret)
5052 return ret;
5053
5054 ret = qm_register_aeq_irq(qm);
5055 if (ret)
5056 goto free_eq_irq;
5057
5058 ret = qm_register_abnormal_irq(qm);
5059 if (ret)
5060 goto free_aeq_irq;
5061
5062 ret = qm_register_mb_cmd_irq(qm);
5063 if (ret)
5064 goto free_abnormal_irq;
5065
5066 return 0;
5067
5068free_abnormal_irq:
5069 qm_unregister_abnormal_irq(qm);
5070free_aeq_irq:
5071 qm_unregister_aeq_irq(qm);
5072free_eq_irq:
5073 qm_unregister_eq_irq(qm);
5074 return ret;
5075}
5076
5077static int qm_get_qp_num(struct hisi_qm *qm)
5078{
5079 bool is_db_isolation;
5080
5081 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */
5082 if (qm->fun_type == QM_HW_VF) {
5083 if (qm->ver != QM_HW_V1)
5084 /* v2 starts to support get vft by mailbox */
5085 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
5086
5087 return 0;
5088 }
5089
5090 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5091 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
5092 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
5093 QM_FUNC_MAX_QP_CAP, is_db_isolation);
5094
5095 /* check if qp number is valid */
5096 if (qm->qp_num > qm->max_qp_num) {
5097 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
5098 qm->qp_num, qm->max_qp_num);
5099 return -EINVAL;
5100 }
5101
5102 return 0;
5103}
5104
5105static void qm_get_hw_caps(struct hisi_qm *qm)
5106{
5107 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
5108 qm_cap_info_pf : qm_cap_info_vf;
5109 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
5110 ARRAY_SIZE(qm_cap_info_vf);
5111 u32 val, i;
5112
5113 /* Doorbell isolate register is a independent register. */
5114 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
5115 if (val)
5116 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5117
5118 if (qm->ver >= QM_HW_V3) {
5119 val = readl(qm->io_base + QM_FUNC_CAPS_REG);
5120 qm->cap_ver = val & QM_CAPBILITY_VERSION;
5121 }
5122
5123 /* Get PF/VF common capbility */
5124 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) {
5125 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
5126 if (val)
5127 set_bit(qm_cap_info_comm[i].type, &qm->caps);
5128 }
5129
5130 /* Get PF/VF different capbility */
5131 for (i = 0; i < size; i++) {
5132 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
5133 if (val)
5134 set_bit(cap_info[i].type, &qm->caps);
5135 }
5136}
5137
5138static int qm_get_pci_res(struct hisi_qm *qm)
5139{
5140 struct pci_dev *pdev = qm->pdev;
5141 struct device *dev = &pdev->dev;
5142 int ret;
5143
5144 ret = pci_request_mem_regions(pdev, qm->dev_name);
5145 if (ret < 0) {
5146 dev_err(dev, "Failed to request mem regions!\n");
5147 return ret;
5148 }
5149
5150 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5151 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5152 if (!qm->io_base) {
5153 ret = -EIO;
5154 goto err_request_mem_regions;
5155 }
5156
5157 qm_get_hw_caps(qm);
5158 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
5159 qm->db_interval = QM_QP_DB_INTERVAL;
5160 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5161 qm->db_io_base = ioremap(qm->db_phys_base,
5162 pci_resource_len(pdev, PCI_BAR_4));
5163 if (!qm->db_io_base) {
5164 ret = -EIO;
5165 goto err_ioremap;
5166 }
5167 } else {
5168 qm->db_phys_base = qm->phys_base;
5169 qm->db_io_base = qm->io_base;
5170 qm->db_interval = 0;
5171 }
5172
5173 ret = qm_get_qp_num(qm);
5174 if (ret)
5175 goto err_db_ioremap;
5176
5177 return 0;
5178
5179err_db_ioremap:
5180 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
5181 iounmap(qm->db_io_base);
5182err_ioremap:
5183 iounmap(qm->io_base);
5184err_request_mem_regions:
5185 pci_release_mem_regions(pdev);
5186 return ret;
5187}
5188
5189static int hisi_qm_pci_init(struct hisi_qm *qm)
5190{
5191 struct pci_dev *pdev = qm->pdev;
5192 struct device *dev = &pdev->dev;
5193 unsigned int num_vec;
5194 int ret;
5195
5196 ret = pci_enable_device_mem(pdev);
5197 if (ret < 0) {
5198 dev_err(dev, "Failed to enable device mem!\n");
5199 return ret;
5200 }
5201
5202 ret = qm_get_pci_res(qm);
5203 if (ret)
5204 goto err_disable_pcidev;
5205
5206 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
5207 if (ret < 0)
5208 goto err_get_pci_res;
5209 pci_set_master(pdev);
5210
5211 num_vec = qm_get_irq_num(qm);
5212 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
5213 if (ret < 0) {
5214 dev_err(dev, "Failed to enable MSI vectors!\n");
5215 goto err_get_pci_res;
5216 }
5217
5218 return 0;
5219
5220err_get_pci_res:
5221 qm_put_pci_res(qm);
5222err_disable_pcidev:
5223 pci_disable_device(pdev);
5224 return ret;
5225}
5226
5227static int hisi_qm_init_work(struct hisi_qm *qm)
5228{
5229 int i;
5230
5231 for (i = 0; i < qm->qp_num; i++)
5232 INIT_WORK(&qm->poll_data[i].work, qm_work_process);
5233
5234 if (qm->fun_type == QM_HW_PF)
5235 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5236
5237 if (qm->ver > QM_HW_V2)
5238 INIT_WORK(&qm->cmd_process, qm_cmd_process);
5239
5240 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
5241 WQ_UNBOUND, num_online_cpus(),
5242 pci_name(qm->pdev));
5243 if (!qm->wq) {
5244 pci_err(qm->pdev, "failed to alloc workqueue!\n");
5245 return -ENOMEM;
5246 }
5247
5248 return 0;
5249}
5250
5251static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5252{
5253 struct device *dev = &qm->pdev->dev;
5254 u16 sq_depth, cq_depth;
5255 size_t qp_dma_size;
5256 int i, ret;
5257
5258 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5259 if (!qm->qp_array)
5260 return -ENOMEM;
5261
5262 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
5263 if (!qm->poll_data) {
5264 kfree(qm->qp_array);
5265 return -ENOMEM;
5266 }
5267
5268 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
5269
5270 /* one more page for device or qp statuses */
5271 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
5272 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
5273 for (i = 0; i < qm->qp_num; i++) {
5274 qm->poll_data[i].qm = qm;
5275 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
5276 if (ret)
5277 goto err_init_qp_mem;
5278
5279 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
5280 }
5281
5282 return 0;
5283err_init_qp_mem:
5284 hisi_qp_memory_uninit(qm, i);
5285
5286 return ret;
5287}
5288
5289static int hisi_qm_memory_init(struct hisi_qm *qm)
5290{
5291 struct device *dev = &qm->pdev->dev;
5292 int ret, total_func;
5293 size_t off = 0;
5294
5295 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
5296 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
5297 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5298 if (!qm->factor)
5299 return -ENOMEM;
5300
5301 /* Only the PF value needs to be initialized */
5302 qm->factor[0].func_qos = QM_QOS_MAX_VAL;
5303 }
5304
5305#define QM_INIT_BUF(qm, type, num) do { \
5306 (qm)->type = ((qm)->qdma.va + (off)); \
5307 (qm)->type##_dma = (qm)->qdma.dma + (off); \
5308 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
5309} while (0)
5310
5311 idr_init(&qm->qp_idr);
5312 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
5313 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
5314 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
5315 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5316 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5317 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5318 GFP_ATOMIC);
5319 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5320 if (!qm->qdma.va) {
5321 ret = -ENOMEM;
5322 goto err_destroy_idr;
5323 }
5324
5325 QM_INIT_BUF(qm, eqe, qm->eq_depth);
5326 QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
5327 QM_INIT_BUF(qm, sqc, qm->qp_num);
5328 QM_INIT_BUF(qm, cqc, qm->qp_num);
5329
5330 ret = hisi_qp_alloc_memory(qm);
5331 if (ret)
5332 goto err_alloc_qp_array;
5333
5334 return 0;
5335
5336err_alloc_qp_array:
5337 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5338err_destroy_idr:
5339 idr_destroy(&qm->qp_idr);
5340 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
5341 kfree(qm->factor);
5342
5343 return ret;
5344}
5345
5346/**
5347 * hisi_qm_init() - Initialize configures about qm.
5348 * @qm: The qm needing init.
5349 *
5350 * This function init qm, then we can call hisi_qm_start to put qm into work.
5351 */
5352int hisi_qm_init(struct hisi_qm *qm)
5353{
5354 struct pci_dev *pdev = qm->pdev;
5355 struct device *dev = &pdev->dev;
5356 int ret;
5357
5358 hisi_qm_pre_init(qm);
5359
5360 ret = hisi_qm_pci_init(qm);
5361 if (ret)
5362 return ret;
5363
5364 ret = qm_irqs_register(qm);
5365 if (ret)
5366 goto err_pci_init;
5367
5368 if (qm->fun_type == QM_HW_PF) {
5369 qm_disable_clock_gate(qm);
5370 ret = qm_dev_mem_reset(qm);
5371 if (ret) {
5372 dev_err(dev, "failed to reset device memory\n");
5373 goto err_irq_register;
5374 }
5375 }
5376
5377 if (qm->mode == UACCE_MODE_SVA) {
5378 ret = qm_alloc_uacce(qm);
5379 if (ret < 0)
5380 dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
5381 }
5382
5383 ret = hisi_qm_memory_init(qm);
5384 if (ret)
5385 goto err_alloc_uacce;
5386
5387 ret = hisi_qm_init_work(qm);
5388 if (ret)
5389 goto err_free_qm_memory;
5390
5391 qm_cmd_init(qm);
5392 atomic_set(&qm->status.flags, QM_INIT);
5393
5394 return 0;
5395
5396err_free_qm_memory:
5397 hisi_qm_memory_uninit(qm);
5398err_alloc_uacce:
5399 qm_remove_uacce(qm);
5400err_irq_register:
5401 qm_irqs_unregister(qm);
5402err_pci_init:
5403 hisi_qm_pci_uninit(qm);
5404 return ret;
5405}
5406EXPORT_SYMBOL_GPL(hisi_qm_init);
5407
5408/**
5409 * hisi_qm_get_dfx_access() - Try to get dfx access.
5410 * @qm: pointer to accelerator device.
5411 *
5412 * Try to get dfx access, then user can get message.
5413 *
5414 * If device is in suspended, return failure, otherwise
5415 * bump up the runtime PM usage counter.
5416 */
5417int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5418{
5419 struct device *dev = &qm->pdev->dev;
5420
5421 if (pm_runtime_suspended(dev)) {
5422 dev_info(dev, "can not read/write - device in suspended.\n");
5423 return -EAGAIN;
5424 }
5425
5426 return qm_pm_get_sync(qm);
5427}
5428EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
5429
5430/**
5431 * hisi_qm_put_dfx_access() - Put dfx access.
5432 * @qm: pointer to accelerator device.
5433 *
5434 * Put dfx access, drop runtime PM usage counter.
5435 */
5436void hisi_qm_put_dfx_access(struct hisi_qm *qm)
5437{
5438 qm_pm_put_sync(qm);
5439}
5440EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
5441
5442/**
5443 * hisi_qm_pm_init() - Initialize qm runtime PM.
5444 * @qm: pointer to accelerator device.
5445 *
5446 * Function that initialize qm runtime PM.
5447 */
5448void hisi_qm_pm_init(struct hisi_qm *qm)
5449{
5450 struct device *dev = &qm->pdev->dev;
5451
5452 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5453 return;
5454
5455 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
5456 pm_runtime_use_autosuspend(dev);
5457 pm_runtime_put_noidle(dev);
5458}
5459EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
5460
5461/**
5462 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5463 * @qm: pointer to accelerator device.
5464 *
5465 * Function that uninitialize qm runtime PM.
5466 */
5467void hisi_qm_pm_uninit(struct hisi_qm *qm)
5468{
5469 struct device *dev = &qm->pdev->dev;
5470
5471 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5472 return;
5473
5474 pm_runtime_get_noresume(dev);
5475 pm_runtime_dont_use_autosuspend(dev);
5476}
5477EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
5478
5479static int qm_prepare_for_suspend(struct hisi_qm *qm)
5480{
5481 struct pci_dev *pdev = qm->pdev;
5482 int ret;
5483 u32 val;
5484
5485 ret = qm->ops->set_msi(qm, false);
5486 if (ret) {
5487 pci_err(pdev, "failed to disable MSI before suspending!\n");
5488 return ret;
5489 }
5490
5491 /* shutdown OOO register */
5492 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
5493 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5494
5495 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
5496 val,
5497 (val == ACC_MASTER_TRANS_RETURN_RW),
5498 POLL_PERIOD, POLL_TIMEOUT);
5499 if (ret) {
5500 pci_emerg(pdev, "Bus lock! Please reset system.\n");
5501 return ret;
5502 }
5503
5504 ret = qm_set_pf_mse(qm, false);
5505 if (ret)
5506 pci_err(pdev, "failed to disable MSE before suspending!\n");
5507
5508 return ret;
5509}
5510
5511static int qm_rebuild_for_resume(struct hisi_qm *qm)
5512{
5513 struct pci_dev *pdev = qm->pdev;
5514 int ret;
5515
5516 ret = qm_set_pf_mse(qm, true);
5517 if (ret) {
5518 pci_err(pdev, "failed to enable MSE after resuming!\n");
5519 return ret;
5520 }
5521
5522 ret = qm->ops->set_msi(qm, true);
5523 if (ret) {
5524 pci_err(pdev, "failed to enable MSI after resuming!\n");
5525 return ret;
5526 }
5527
5528 ret = qm_dev_hw_init(qm);
5529 if (ret) {
5530 pci_err(pdev, "failed to init device after resuming\n");
5531 return ret;
5532 }
5533
5534 qm_cmd_init(qm);
5535 hisi_qm_dev_err_init(qm);
5536 qm_disable_clock_gate(qm);
5537 ret = qm_dev_mem_reset(qm);
5538 if (ret)
5539 pci_err(pdev, "failed to reset device memory\n");
5540
5541 return ret;
5542}
5543
5544/**
5545 * hisi_qm_suspend() - Runtime suspend of given device.
5546 * @dev: device to suspend.
5547 *
5548 * Function that suspend the device.
5549 */
5550int hisi_qm_suspend(struct device *dev)
5551{
5552 struct pci_dev *pdev = to_pci_dev(dev);
5553 struct hisi_qm *qm = pci_get_drvdata(pdev);
5554 int ret;
5555
5556 pci_info(pdev, "entering suspended state\n");
5557
5558 ret = hisi_qm_stop(qm, QM_NORMAL);
5559 if (ret) {
5560 pci_err(pdev, "failed to stop qm(%d)\n", ret);
5561 return ret;
5562 }
5563
5564 ret = qm_prepare_for_suspend(qm);
5565 if (ret)
5566 pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
5567
5568 return ret;
5569}
5570EXPORT_SYMBOL_GPL(hisi_qm_suspend);
5571
5572/**
5573 * hisi_qm_resume() - Runtime resume of given device.
5574 * @dev: device to resume.
5575 *
5576 * Function that resume the device.
5577 */
5578int hisi_qm_resume(struct device *dev)
5579{
5580 struct pci_dev *pdev = to_pci_dev(dev);
5581 struct hisi_qm *qm = pci_get_drvdata(pdev);
5582 int ret;
5583
5584 pci_info(pdev, "resuming from suspend state\n");
5585
5586 ret = qm_rebuild_for_resume(qm);
5587 if (ret) {
5588 pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
5589 return ret;
5590 }
5591
5592 ret = hisi_qm_start(qm);
5593 if (ret) {
5594 if (qm_check_dev_error(qm)) {
5595 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n");
5596 return 0;
5597 }
5598
5599 pci_err(pdev, "failed to start qm(%d)!\n", ret);
5600 }
5601
5602 return ret;
5603}
5604EXPORT_SYMBOL_GPL(hisi_qm_resume);
5605
5606MODULE_LICENSE("GPL v2");
5607MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
5608MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");