Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
3
4#include <linux/acpi.h>
5#include <linux/clk.h>
6#include <linux/dmaengine.h>
7#include <linux/dma-mapping.h>
8#include <linux/dma/qcom-gpi-dma.h>
9#include <linux/err.h>
10#include <linux/i2c.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
17#include <linux/soc/qcom/geni-se.h>
18#include <linux/spinlock.h>
19#include <linux/units.h>
20
21#define SE_I2C_TX_TRANS_LEN 0x26c
22#define SE_I2C_RX_TRANS_LEN 0x270
23#define SE_I2C_SCL_COUNTERS 0x278
24
25#define SE_I2C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
26 M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
27#define SE_I2C_ABORT BIT(1)
28
29/* M_CMD OP codes for I2C */
30#define I2C_WRITE 0x1
31#define I2C_READ 0x2
32#define I2C_WRITE_READ 0x3
33#define I2C_ADDR_ONLY 0x4
34#define I2C_BUS_CLEAR 0x6
35#define I2C_STOP_ON_BUS 0x7
36/* M_CMD params for I2C */
37#define PRE_CMD_DELAY BIT(0)
38#define TIMESTAMP_BEFORE BIT(1)
39#define STOP_STRETCH BIT(2)
40#define TIMESTAMP_AFTER BIT(3)
41#define POST_COMMAND_DELAY BIT(4)
42#define IGNORE_ADD_NACK BIT(6)
43#define READ_FINISHED_WITH_ACK BIT(7)
44#define BYPASS_ADDR_PHASE BIT(8)
45#define SLV_ADDR_MSK GENMASK(15, 9)
46#define SLV_ADDR_SHFT 9
47/* I2C SCL COUNTER fields */
48#define HIGH_COUNTER_MSK GENMASK(29, 20)
49#define HIGH_COUNTER_SHFT 20
50#define LOW_COUNTER_MSK GENMASK(19, 10)
51#define LOW_COUNTER_SHFT 10
52#define CYCLE_COUNTER_MSK GENMASK(9, 0)
53
54#define I2C_PACK_TX BIT(0)
55#define I2C_PACK_RX BIT(1)
56
57enum geni_i2c_err_code {
58 GP_IRQ0,
59 NACK,
60 GP_IRQ2,
61 BUS_PROTO,
62 ARB_LOST,
63 GP_IRQ5,
64 GENI_OVERRUN,
65 GENI_ILLEGAL_CMD,
66 GENI_ABORT_DONE,
67 GENI_TIMEOUT,
68};
69
70#define DM_I2C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(ARB_LOST)) \
71 << 5)
72
73#define I2C_AUTO_SUSPEND_DELAY 250
74#define PACKING_BYTES_PW 4
75
76#define ABORT_TIMEOUT HZ
77#define XFER_TIMEOUT HZ
78#define RST_TIMEOUT HZ
79
80#define QCOM_I2C_MIN_NUM_OF_MSGS_MULTI_DESC 2
81
82/**
83 * struct geni_i2c_gpi_multi_desc_xfer - Structure for multi transfer support
84 *
85 * @msg_idx_cnt: Current message index being processed in the transfer
86 * @unmap_msg_cnt: Number of messages that have been unmapped
87 * @irq_cnt: Number of transfer completion interrupts received
88 * @dma_buf: Array of virtual addresses for DMA-safe buffers
89 * @dma_addr: Array of DMA addresses corresponding to the buffers
90 */
91struct geni_i2c_gpi_multi_desc_xfer {
92 u32 msg_idx_cnt;
93 u32 unmap_msg_cnt;
94 u32 irq_cnt;
95 void **dma_buf;
96 dma_addr_t *dma_addr;
97};
98
99struct geni_i2c_dev {
100 struct geni_se se;
101 u32 tx_wm;
102 int irq;
103 int err;
104 struct i2c_adapter adap;
105 struct completion done;
106 struct i2c_msg *cur;
107 int cur_wr;
108 int cur_rd;
109 spinlock_t lock;
110 struct clk *core_clk;
111 u32 clk_freq_out;
112 const struct geni_i2c_clk_fld *clk_fld;
113 int suspended;
114 void *dma_buf;
115 size_t xfer_len;
116 dma_addr_t dma_addr;
117 struct dma_chan *tx_c;
118 struct dma_chan *rx_c;
119 bool no_dma;
120 bool gpi_mode;
121 bool abort_done;
122 bool is_tx_multi_desc_xfer;
123 u32 num_msgs;
124 struct geni_i2c_gpi_multi_desc_xfer i2c_multi_desc_config;
125};
126
127struct geni_i2c_desc {
128 bool has_core_clk;
129 char *icc_ddr;
130 bool no_dma_support;
131 unsigned int tx_fifo_depth;
132};
133
134struct geni_i2c_err_log {
135 int err;
136 const char *msg;
137};
138
139static const struct geni_i2c_err_log gi2c_log[] = {
140 [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"},
141 [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"},
142 [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"},
143 [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"},
144 [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"},
145 [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"},
146 [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
147 [GENI_ILLEGAL_CMD] = {-EIO, "Illegal cmd, check GENI cmd-state machine"},
148 [GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
149 [GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
150};
151
152struct geni_i2c_clk_fld {
153 u32 clk_freq_out;
154 u8 clk_div;
155 u8 t_high_cnt;
156 u8 t_low_cnt;
157 u8 t_cycle_cnt;
158};
159
160/*
161 * Hardware uses the underlying formula to calculate time periods of
162 * SCL clock cycle. Firmware uses some additional cycles excluded from the
163 * below formula and it is confirmed that the time periods are within
164 * specification limits.
165 *
166 * time of high period of SCL: t_high = (t_high_cnt * clk_div) / source_clock
167 * time of low period of SCL: t_low = (t_low_cnt * clk_div) / source_clock
168 * time of full period of SCL: t_cycle = (t_cycle_cnt * clk_div) / source_clock
169 * clk_freq_out = t / t_cycle
170 * source_clock = 19.2 MHz
171 */
172static const struct geni_i2c_clk_fld geni_i2c_clk_map_19p2mhz[] = {
173 { I2C_MAX_STANDARD_MODE_FREQ, 7, 10, 12, 26 },
174 { I2C_MAX_FAST_MODE_FREQ, 2, 5, 11, 22 },
175 { I2C_MAX_FAST_MODE_PLUS_FREQ, 1, 2, 8, 18 },
176 {}
177};
178
179/* source_clock = 32 MHz */
180static const struct geni_i2c_clk_fld geni_i2c_clk_map_32mhz[] = {
181 { I2C_MAX_STANDARD_MODE_FREQ, 8, 14, 18, 38 },
182 { I2C_MAX_FAST_MODE_FREQ, 4, 3, 9, 19 },
183 { I2C_MAX_FAST_MODE_PLUS_FREQ, 2, 3, 5, 15 },
184 {}
185};
186
187static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
188{
189 const struct geni_i2c_clk_fld *itr;
190
191 if (clk_get_rate(gi2c->se.clk) == 32 * HZ_PER_MHZ)
192 itr = geni_i2c_clk_map_32mhz;
193 else
194 itr = geni_i2c_clk_map_19p2mhz;
195
196 while (itr->clk_freq_out != 0) {
197 if (itr->clk_freq_out == gi2c->clk_freq_out) {
198 gi2c->clk_fld = itr;
199 return 0;
200 }
201 itr++;
202 }
203 return -EINVAL;
204}
205
206static void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c)
207{
208 const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
209 u32 val;
210
211 writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL);
212
213 val = (itr->clk_div << CLK_DIV_SHFT) | SER_CLK_EN;
214 writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG);
215
216 val = itr->t_high_cnt << HIGH_COUNTER_SHFT;
217 val |= itr->t_low_cnt << LOW_COUNTER_SHFT;
218 val |= itr->t_cycle_cnt;
219 writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS);
220}
221
222static void geni_i2c_err_misc(struct geni_i2c_dev *gi2c)
223{
224 u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0);
225 u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
226 u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS);
227 u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS);
228 u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
229 u32 rx_st, tx_st;
230
231 if (dma) {
232 rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
233 tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
234 } else {
235 rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
236 tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS);
237 }
238 dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
239 dma, tx_st, rx_st, m_stat);
240 dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
241 m_cmd, geni_s, geni_ios);
242}
243
244static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
245{
246 if (!gi2c->err)
247 gi2c->err = gi2c_log[err].err;
248 if (gi2c->cur)
249 dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
250 gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags);
251
252 switch (err) {
253 case GENI_ABORT_DONE:
254 gi2c->abort_done = true;
255 break;
256 case NACK:
257 case GENI_TIMEOUT:
258 dev_dbg(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
259 break;
260 default:
261 dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
262 geni_i2c_err_misc(gi2c);
263 break;
264 }
265}
266
267static irqreturn_t geni_i2c_irq(int irq, void *dev)
268{
269 struct geni_i2c_dev *gi2c = dev;
270 void __iomem *base = gi2c->se.base;
271 int j, p;
272 u32 m_stat;
273 u32 rx_st;
274 u32 dm_tx_st;
275 u32 dm_rx_st;
276 u32 dma;
277 u32 val;
278 struct i2c_msg *cur;
279
280 spin_lock(&gi2c->lock);
281 m_stat = readl_relaxed(base + SE_GENI_M_IRQ_STATUS);
282 rx_st = readl_relaxed(base + SE_GENI_RX_FIFO_STATUS);
283 dm_tx_st = readl_relaxed(base + SE_DMA_TX_IRQ_STAT);
284 dm_rx_st = readl_relaxed(base + SE_DMA_RX_IRQ_STAT);
285 dma = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
286 cur = gi2c->cur;
287
288 if (!cur ||
289 m_stat & (M_CMD_FAILURE_EN | M_CMD_ABORT_EN) ||
290 dm_rx_st & (DM_I2C_CB_ERR)) {
291 if (m_stat & M_GP_IRQ_1_EN)
292 geni_i2c_err(gi2c, NACK);
293 if (m_stat & M_GP_IRQ_3_EN)
294 geni_i2c_err(gi2c, BUS_PROTO);
295 if (m_stat & M_GP_IRQ_4_EN)
296 geni_i2c_err(gi2c, ARB_LOST);
297 if (m_stat & M_CMD_OVERRUN_EN)
298 geni_i2c_err(gi2c, GENI_OVERRUN);
299 if (m_stat & M_ILLEGAL_CMD_EN)
300 geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
301 if (m_stat & M_CMD_ABORT_EN)
302 geni_i2c_err(gi2c, GENI_ABORT_DONE);
303 if (m_stat & M_GP_IRQ_0_EN)
304 geni_i2c_err(gi2c, GP_IRQ0);
305
306 /* Disable the TX Watermark interrupt to stop TX */
307 if (!dma)
308 writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG);
309 } else if (dma) {
310 dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n",
311 dm_tx_st, dm_rx_st);
312 } else if (cur->flags & I2C_M_RD &&
313 m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) {
314 u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
315
316 for (j = 0; j < rxcnt; j++) {
317 p = 0;
318 val = readl_relaxed(base + SE_GENI_RX_FIFOn);
319 while (gi2c->cur_rd < cur->len && p < sizeof(val)) {
320 cur->buf[gi2c->cur_rd++] = val & 0xff;
321 val >>= 8;
322 p++;
323 }
324 if (gi2c->cur_rd == cur->len)
325 break;
326 }
327 } else if (!(cur->flags & I2C_M_RD) &&
328 m_stat & M_TX_FIFO_WATERMARK_EN) {
329 for (j = 0; j < gi2c->tx_wm; j++) {
330 u32 temp;
331
332 val = 0;
333 p = 0;
334 while (gi2c->cur_wr < cur->len && p < sizeof(val)) {
335 temp = cur->buf[gi2c->cur_wr++];
336 val |= temp << (p * 8);
337 p++;
338 }
339 writel_relaxed(val, base + SE_GENI_TX_FIFOn);
340 /* TX Complete, Disable the TX Watermark interrupt */
341 if (gi2c->cur_wr == cur->len) {
342 writel_relaxed(0, base + SE_GENI_TX_WATERMARK_REG);
343 break;
344 }
345 }
346 }
347
348 if (m_stat)
349 writel_relaxed(m_stat, base + SE_GENI_M_IRQ_CLEAR);
350
351 if (dma && dm_tx_st)
352 writel_relaxed(dm_tx_st, base + SE_DMA_TX_IRQ_CLR);
353 if (dma && dm_rx_st)
354 writel_relaxed(dm_rx_st, base + SE_DMA_RX_IRQ_CLR);
355
356 /* if this is err with done-bit not set, handle that through timeout. */
357 if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN ||
358 dm_tx_st & TX_DMA_DONE || dm_tx_st & TX_RESET_DONE ||
359 dm_rx_st & RX_DMA_DONE || dm_rx_st & RX_RESET_DONE)
360 complete(&gi2c->done);
361
362 spin_unlock(&gi2c->lock);
363
364 return IRQ_HANDLED;
365}
366
367static void geni_i2c_abort_xfer(struct geni_i2c_dev *gi2c)
368{
369 unsigned long time_left = ABORT_TIMEOUT;
370 unsigned long flags;
371
372 spin_lock_irqsave(&gi2c->lock, flags);
373 geni_i2c_err(gi2c, GENI_TIMEOUT);
374 gi2c->cur = NULL;
375 gi2c->abort_done = false;
376 geni_se_abort_m_cmd(&gi2c->se);
377 spin_unlock_irqrestore(&gi2c->lock, flags);
378
379 do {
380 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
381 } while (!gi2c->abort_done && time_left);
382
383 if (!time_left)
384 dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
385}
386
387static void geni_i2c_rx_fsm_rst(struct geni_i2c_dev *gi2c)
388{
389 u32 val;
390 unsigned long time_left = RST_TIMEOUT;
391
392 writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST);
393 do {
394 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
395 val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
396 } while (!(val & RX_RESET_DONE) && time_left);
397
398 if (!(val & RX_RESET_DONE))
399 dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n");
400}
401
402static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
403{
404 u32 val;
405 unsigned long time_left = RST_TIMEOUT;
406
407 writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST);
408 do {
409 time_left = wait_for_completion_timeout(&gi2c->done, time_left);
410 val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
411 } while (!(val & TX_RESET_DONE) && time_left);
412
413 if (!(val & TX_RESET_DONE))
414 dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
415}
416
417static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
418 struct i2c_msg *cur)
419{
420 gi2c->cur_rd = 0;
421 if (gi2c->dma_buf) {
422 if (gi2c->err)
423 geni_i2c_rx_fsm_rst(gi2c);
424 geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
425 i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
426 }
427}
428
429static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
430 struct i2c_msg *cur)
431{
432 gi2c->cur_wr = 0;
433 if (gi2c->dma_buf) {
434 if (gi2c->err)
435 geni_i2c_tx_fsm_rst(gi2c);
436 geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
437 i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
438 }
439}
440
441static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
442 u32 m_param)
443{
444 dma_addr_t rx_dma = 0;
445 unsigned long time_left;
446 void *dma_buf;
447 struct geni_se *se = &gi2c->se;
448 size_t len = msg->len;
449 struct i2c_msg *cur;
450
451 dma_buf = gi2c->no_dma ? NULL : i2c_get_dma_safe_msg_buf(msg, 32);
452 if (dma_buf)
453 geni_se_select_mode(se, GENI_SE_DMA);
454 else
455 geni_se_select_mode(se, GENI_SE_FIFO);
456
457 writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
458 geni_se_setup_m_cmd(se, I2C_READ, m_param);
459
460 if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
461 geni_se_select_mode(se, GENI_SE_FIFO);
462 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
463 dma_buf = NULL;
464 } else {
465 gi2c->xfer_len = len;
466 gi2c->dma_addr = rx_dma;
467 gi2c->dma_buf = dma_buf;
468 }
469
470 cur = gi2c->cur;
471 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
472 if (!time_left)
473 geni_i2c_abort_xfer(gi2c);
474
475 geni_i2c_rx_msg_cleanup(gi2c, cur);
476
477 return gi2c->err;
478}
479
480static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
481 u32 m_param)
482{
483 dma_addr_t tx_dma = 0;
484 unsigned long time_left;
485 void *dma_buf;
486 struct geni_se *se = &gi2c->se;
487 size_t len = msg->len;
488 struct i2c_msg *cur;
489
490 dma_buf = gi2c->no_dma ? NULL : i2c_get_dma_safe_msg_buf(msg, 32);
491 if (dma_buf)
492 geni_se_select_mode(se, GENI_SE_DMA);
493 else
494 geni_se_select_mode(se, GENI_SE_FIFO);
495
496 writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
497 geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
498
499 if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
500 geni_se_select_mode(se, GENI_SE_FIFO);
501 i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
502 dma_buf = NULL;
503 } else {
504 gi2c->xfer_len = len;
505 gi2c->dma_addr = tx_dma;
506 gi2c->dma_buf = dma_buf;
507 }
508
509 if (!dma_buf) /* Get FIFO IRQ */
510 writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
511
512 cur = gi2c->cur;
513 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
514 if (!time_left)
515 geni_i2c_abort_xfer(gi2c);
516
517 geni_i2c_tx_msg_cleanup(gi2c, cur);
518
519 return gi2c->err;
520}
521
522static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
523{
524 struct geni_i2c_dev *gi2c = cb;
525 struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer;
526
527 if (result->result != DMA_TRANS_NOERROR) {
528 dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
529 gi2c->err = -EIO;
530 } else if (result->residue) {
531 dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
532 }
533
534 if (gi2c->is_tx_multi_desc_xfer) {
535 tx_multi_xfer = &gi2c->i2c_multi_desc_config;
536 tx_multi_xfer->irq_cnt++;
537 }
538
539 complete(&gi2c->done);
540}
541
542static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
543 void *tx_buf, dma_addr_t tx_addr,
544 void *rx_buf, dma_addr_t rx_addr)
545{
546 if (tx_buf) {
547 dma_unmap_single(gi2c->se.dev->parent, tx_addr, msg->len, DMA_TO_DEVICE);
548 i2c_put_dma_safe_msg_buf(tx_buf, msg, !gi2c->err);
549 }
550
551 if (rx_buf) {
552 dma_unmap_single(gi2c->se.dev->parent, rx_addr, msg->len, DMA_FROM_DEVICE);
553 i2c_put_dma_safe_msg_buf(rx_buf, msg, !gi2c->err);
554 }
555}
556
557/**
558 * geni_i2c_gpi_multi_desc_unmap() - Unmaps DMA buffers post multi message TX transfers
559 * @gi2c: I2C dev handle
560 * @msgs: Array of I2C messages
561 * @peripheral: Pointer to gpi_i2c_config
562 */
563static void geni_i2c_gpi_multi_desc_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
564 struct gpi_i2c_config *peripheral)
565{
566 u32 msg_xfer_cnt, wr_idx = 0;
567 struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer = &gi2c->i2c_multi_desc_config;
568
569 msg_xfer_cnt = gi2c->err ? tx_multi_xfer->msg_idx_cnt : tx_multi_xfer->irq_cnt;
570
571 /* Unmap the processed DMA buffers based on the received interrupt count */
572 for (; tx_multi_xfer->unmap_msg_cnt < msg_xfer_cnt; tx_multi_xfer->unmap_msg_cnt++) {
573 wr_idx = tx_multi_xfer->unmap_msg_cnt;
574 geni_i2c_gpi_unmap(gi2c, &msgs[wr_idx],
575 tx_multi_xfer->dma_buf[wr_idx],
576 tx_multi_xfer->dma_addr[wr_idx],
577 NULL, 0);
578
579 if (tx_multi_xfer->unmap_msg_cnt == gi2c->num_msgs - 1) {
580 kfree(tx_multi_xfer->dma_buf);
581 kfree(tx_multi_xfer->dma_addr);
582 break;
583 }
584 }
585}
586
587/**
588 * geni_i2c_gpi_multi_xfer_timeout_handler() - Handles multi message transfer timeout
589 * @dev: Pointer to the corresponding dev node
590 * @multi_xfer: Pointer to the geni_i2c_gpi_multi_desc_xfer
591 * @transfer_timeout_msecs: Timeout value in milliseconds
592 * @transfer_comp: Completion object of the transfer
593 *
594 * This function waits for the completion of each processed transfer messages
595 * based on the interrupts generated upon transfer completion.
596 *
597 * Return: On success returns 0, -ETIMEDOUT on timeout.
598 */
599static int geni_i2c_gpi_multi_xfer_timeout_handler(struct device *dev,
600 struct geni_i2c_gpi_multi_desc_xfer *multi_xfer,
601 u32 transfer_timeout_msecs,
602 struct completion *transfer_comp)
603{
604 int i;
605 u32 time_left;
606
607 for (i = 0; i < multi_xfer->msg_idx_cnt - 1; i++) {
608 reinit_completion(transfer_comp);
609
610 if (multi_xfer->msg_idx_cnt != multi_xfer->irq_cnt) {
611 time_left = wait_for_completion_timeout(transfer_comp,
612 transfer_timeout_msecs);
613 if (!time_left) {
614 dev_err(dev, "%s: Transfer timeout\n", __func__);
615 return -ETIMEDOUT;
616 }
617 }
618 }
619 return 0;
620}
621
622static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
623 struct dma_slave_config *config, dma_addr_t *dma_addr_p,
624 void **buf, unsigned int op, struct dma_chan *dma_chan)
625{
626 struct gpi_i2c_config *peripheral;
627 unsigned int flags;
628 void *dma_buf = NULL;
629 dma_addr_t addr = 0;
630 enum dma_data_direction map_dirn;
631 enum dma_transfer_direction dma_dirn;
632 struct dma_async_tx_descriptor *desc;
633 int ret;
634 struct geni_i2c_gpi_multi_desc_xfer *gi2c_gpi_xfer;
635 dma_cookie_t cookie;
636 u32 msg_idx;
637
638 peripheral = config->peripheral_config;
639 gi2c_gpi_xfer = &gi2c->i2c_multi_desc_config;
640 msg_idx = gi2c_gpi_xfer->msg_idx_cnt;
641
642 /*
643 * Skip TX DMA mapping for a read message (I2C_M_RD) to avoid
644 * programming an extra TX DMA TRE that would cause an unintended
645 * write cycle on the I2C bus before the actual read operation.
646 */
647 if (op == I2C_WRITE && msgs[msg_idx].flags & I2C_M_RD) {
648 peripheral->multi_msg = true;
649 goto skip_tx_dma_map;
650 }
651
652 dma_buf = i2c_get_dma_safe_msg_buf(&msgs[msg_idx], 1);
653 if (!dma_buf) {
654 ret = -ENOMEM;
655 goto out;
656 }
657
658 if (op == I2C_WRITE)
659 map_dirn = DMA_TO_DEVICE;
660 else
661 map_dirn = DMA_FROM_DEVICE;
662
663 addr = dma_map_single(gi2c->se.dev->parent, dma_buf,
664 msgs[msg_idx].len, map_dirn);
665 if (dma_mapping_error(gi2c->se.dev->parent, addr)) {
666 i2c_put_dma_safe_msg_buf(dma_buf, &msgs[msg_idx], false);
667 ret = -ENOMEM;
668 goto out;
669 }
670
671skip_tx_dma_map:
672 if (gi2c->is_tx_multi_desc_xfer) {
673 flags = DMA_CTRL_ACK;
674
675 /* BEI bit to be cleared for last TRE */
676 if (msg_idx == gi2c->num_msgs - 1)
677 flags |= DMA_PREP_INTERRUPT;
678 } else {
679 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
680 }
681
682 /* set the length as message for rx txn */
683 peripheral->rx_len = msgs[msg_idx].len;
684 peripheral->op = op;
685
686 ret = dmaengine_slave_config(dma_chan, config);
687 if (ret) {
688 dev_err(gi2c->se.dev, "dma config error: %d for op:%d\n", ret, op);
689 goto err_config;
690 }
691
692 peripheral->set_config = 0;
693 peripheral->multi_msg = true;
694
695 if (op == I2C_WRITE)
696 dma_dirn = DMA_MEM_TO_DEV;
697 else
698 dma_dirn = DMA_DEV_TO_MEM;
699
700 desc = dmaengine_prep_slave_single(dma_chan, addr, msgs[msg_idx].len,
701 dma_dirn, flags);
702 if (!desc && !(flags & DMA_PREP_INTERRUPT)) {
703 /* Retry with interrupt if not enough TREs */
704 flags |= DMA_PREP_INTERRUPT;
705 desc = dmaengine_prep_slave_single(dma_chan, addr, msgs[msg_idx].len,
706 dma_dirn, flags);
707 }
708
709 if (!desc) {
710 dev_err(gi2c->se.dev, "prep_slave_sg failed\n");
711 ret = -EIO;
712 goto err_config;
713 }
714
715 desc->callback_result = i2c_gpi_cb_result;
716 desc->callback_param = gi2c;
717
718 if (!((msgs[msg_idx].flags & I2C_M_RD) && op == I2C_WRITE))
719 gi2c_gpi_xfer->msg_idx_cnt++;
720
721 cookie = dmaengine_submit(desc);
722 if (dma_submit_error(cookie)) {
723 dev_err(gi2c->se.dev,
724 "%s: dmaengine_submit failed (%d)\n", __func__, cookie);
725 ret = -EINVAL;
726 goto err_config;
727 }
728
729 if (gi2c->is_tx_multi_desc_xfer) {
730 gi2c_gpi_xfer->dma_buf[msg_idx] = dma_buf;
731 gi2c_gpi_xfer->dma_addr[msg_idx] = addr;
732
733 dma_async_issue_pending(gi2c->tx_c);
734
735 if ((msg_idx == (gi2c->num_msgs - 1)) || flags & DMA_PREP_INTERRUPT) {
736 ret = geni_i2c_gpi_multi_xfer_timeout_handler(gi2c->se.dev, gi2c_gpi_xfer,
737 XFER_TIMEOUT, &gi2c->done);
738 if (ret) {
739 dev_err(gi2c->se.dev,
740 "I2C multi write msg transfer timeout: %d\n",
741 ret);
742 gi2c->err = ret;
743 return ret;
744 }
745 }
746 } else {
747 /* Non multi descriptor message transfer */
748 *buf = dma_buf;
749 *dma_addr_p = addr;
750 }
751 return 0;
752
753err_config:
754 /* Avoid DMA unmap as the write operation skipped DMA mapping */
755 if (dma_buf) {
756 dma_unmap_single(gi2c->se.dev->parent, addr,
757 msgs[msg_idx].len, map_dirn);
758 i2c_put_dma_safe_msg_buf(dma_buf, &msgs[msg_idx], false);
759 }
760
761out:
762 gi2c->err = ret;
763 return ret;
764}
765
766static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], int num)
767{
768 struct dma_slave_config config = {};
769 struct gpi_i2c_config peripheral = {};
770 int i, ret = 0;
771 unsigned long time_left;
772 dma_addr_t tx_addr, rx_addr;
773 void *tx_buf = NULL, *rx_buf = NULL;
774 struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer;
775 const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
776
777 config.peripheral_config = &peripheral;
778 config.peripheral_size = sizeof(peripheral);
779
780 peripheral.pack_enable = I2C_PACK_TX | I2C_PACK_RX;
781 peripheral.cycle_count = itr->t_cycle_cnt;
782 peripheral.high_count = itr->t_high_cnt;
783 peripheral.low_count = itr->t_low_cnt;
784 peripheral.clk_div = itr->clk_div;
785 peripheral.set_config = 1;
786 peripheral.multi_msg = false;
787
788 gi2c->num_msgs = num;
789 gi2c->is_tx_multi_desc_xfer = false;
790
791 tx_multi_xfer = &gi2c->i2c_multi_desc_config;
792 memset(tx_multi_xfer, 0, sizeof(struct geni_i2c_gpi_multi_desc_xfer));
793
794 /*
795 * If number of write messages are two and higher then
796 * configure hardware for multi descriptor transfers with BEI.
797 */
798 if (num >= QCOM_I2C_MIN_NUM_OF_MSGS_MULTI_DESC) {
799 gi2c->is_tx_multi_desc_xfer = true;
800 for (i = 0; i < num; i++) {
801 if (msgs[i].flags & I2C_M_RD) {
802 /*
803 * Multi descriptor transfer with BEI
804 * support is enabled for write transfers.
805 * TODO: Add BEI optimization support for
806 * read transfers later.
807 */
808 gi2c->is_tx_multi_desc_xfer = false;
809 break;
810 }
811 }
812 }
813
814 if (gi2c->is_tx_multi_desc_xfer) {
815 tx_multi_xfer->dma_buf = kcalloc(num, sizeof(void *), GFP_KERNEL);
816 tx_multi_xfer->dma_addr = kzalloc_objs(dma_addr_t, num);
817 if (!tx_multi_xfer->dma_buf || !tx_multi_xfer->dma_addr) {
818 ret = -ENOMEM;
819 goto err;
820 }
821 }
822
823 for (i = 0; i < num; i++) {
824 gi2c->cur = &msgs[i];
825 gi2c->err = 0;
826 dev_dbg(gi2c->se.dev, "msg[%d].len:%d\n", i, gi2c->cur->len);
827
828 peripheral.stretch = 0;
829 if (i < num - 1)
830 peripheral.stretch = 1;
831
832 peripheral.addr = msgs[i].addr;
833 if (i > 0 && (!(msgs[i].flags & I2C_M_RD)))
834 peripheral.multi_msg = false;
835
836 ret = geni_i2c_gpi(gi2c, msgs, &config,
837 &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
838 if (ret)
839 goto err;
840
841 if (msgs[i].flags & I2C_M_RD) {
842 ret = geni_i2c_gpi(gi2c, msgs, &config,
843 &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
844 if (ret)
845 goto err;
846
847 dma_async_issue_pending(gi2c->rx_c);
848 }
849
850 if (!gi2c->is_tx_multi_desc_xfer) {
851 dma_async_issue_pending(gi2c->tx_c);
852 time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
853 if (!time_left) {
854 dev_err(gi2c->se.dev, "%s:I2C timeout\n", __func__);
855 gi2c->err = -ETIMEDOUT;
856 }
857 }
858
859 if (gi2c->err) {
860 ret = gi2c->err;
861 goto err;
862 }
863
864 if (!gi2c->is_tx_multi_desc_xfer)
865 geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
866 else if (tx_multi_xfer->unmap_msg_cnt != tx_multi_xfer->irq_cnt)
867 geni_i2c_gpi_multi_desc_unmap(gi2c, msgs, &peripheral);
868 }
869
870 return num;
871
872err:
873 dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret);
874 dmaengine_terminate_sync(gi2c->rx_c);
875 dmaengine_terminate_sync(gi2c->tx_c);
876 if (gi2c->is_tx_multi_desc_xfer)
877 geni_i2c_gpi_multi_desc_unmap(gi2c, msgs, &peripheral);
878 else
879 geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
880
881 return ret;
882}
883
884static int geni_i2c_fifo_xfer(struct geni_i2c_dev *gi2c,
885 struct i2c_msg msgs[], int num)
886{
887 int i, ret = 0;
888
889 for (i = 0; i < num; i++) {
890 u32 m_param = i < (num - 1) ? STOP_STRETCH : 0;
891
892 m_param |= ((msgs[i].addr << SLV_ADDR_SHFT) & SLV_ADDR_MSK);
893
894 gi2c->cur = &msgs[i];
895 if (msgs[i].flags & I2C_M_RD)
896 ret = geni_i2c_rx_one_msg(gi2c, &msgs[i], m_param);
897 else
898 ret = geni_i2c_tx_one_msg(gi2c, &msgs[i], m_param);
899
900 if (ret)
901 return ret;
902 }
903
904 return num;
905}
906
907static int geni_i2c_xfer(struct i2c_adapter *adap,
908 struct i2c_msg msgs[],
909 int num)
910{
911 struct geni_i2c_dev *gi2c = i2c_get_adapdata(adap);
912 int ret;
913
914 gi2c->err = 0;
915 reinit_completion(&gi2c->done);
916 ret = pm_runtime_get_sync(gi2c->se.dev);
917 if (ret < 0) {
918 dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret);
919 pm_runtime_put_noidle(gi2c->se.dev);
920 /* Set device in suspended since resume failed */
921 pm_runtime_set_suspended(gi2c->se.dev);
922 return ret;
923 }
924
925 qcom_geni_i2c_conf(gi2c);
926
927 if (gi2c->gpi_mode)
928 ret = geni_i2c_gpi_xfer(gi2c, msgs, num);
929 else
930 ret = geni_i2c_fifo_xfer(gi2c, msgs, num);
931
932 pm_runtime_put_autosuspend(gi2c->se.dev);
933 gi2c->cur = NULL;
934 gi2c->err = 0;
935 return ret;
936}
937
938static u32 geni_i2c_func(struct i2c_adapter *adap)
939{
940 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
941}
942
943static const struct i2c_algorithm geni_i2c_algo = {
944 .xfer = geni_i2c_xfer,
945 .functionality = geni_i2c_func,
946};
947
948#ifdef CONFIG_ACPI
949static const struct acpi_device_id geni_i2c_acpi_match[] = {
950 { "QCOM0220"},
951 { "QCOM0411" },
952 { }
953};
954MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
955#endif
956
957static void release_gpi_dma(struct geni_i2c_dev *gi2c)
958{
959 if (gi2c->rx_c)
960 dma_release_channel(gi2c->rx_c);
961
962 if (gi2c->tx_c)
963 dma_release_channel(gi2c->tx_c);
964}
965
966static int setup_gpi_dma(struct geni_i2c_dev *gi2c)
967{
968 int ret;
969
970 geni_se_select_mode(&gi2c->se, GENI_GPI_DMA);
971 gi2c->tx_c = dma_request_chan(gi2c->se.dev, "tx");
972 if (IS_ERR(gi2c->tx_c)) {
973 ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c),
974 "Failed to get tx DMA ch\n");
975 goto err_tx;
976 }
977
978 gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx");
979 if (IS_ERR(gi2c->rx_c)) {
980 ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c),
981 "Failed to get rx DMA ch\n");
982 goto err_rx;
983 }
984
985 dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n");
986 return 0;
987
988err_rx:
989 dma_release_channel(gi2c->tx_c);
990err_tx:
991 return ret;
992}
993
994static int geni_i2c_probe(struct platform_device *pdev)
995{
996 struct geni_i2c_dev *gi2c;
997 u32 proto, tx_depth, fifo_disable;
998 int ret;
999 struct device *dev = &pdev->dev;
1000 const struct geni_i2c_desc *desc = NULL;
1001
1002 gi2c = devm_kzalloc(dev, sizeof(*gi2c), GFP_KERNEL);
1003 if (!gi2c)
1004 return -ENOMEM;
1005
1006 gi2c->se.dev = dev;
1007 gi2c->se.wrapper = dev_get_drvdata(dev->parent);
1008 gi2c->se.base = devm_platform_ioremap_resource(pdev, 0);
1009 if (IS_ERR(gi2c->se.base))
1010 return PTR_ERR(gi2c->se.base);
1011
1012 desc = device_get_match_data(&pdev->dev);
1013
1014 if (desc && desc->has_core_clk) {
1015 gi2c->core_clk = devm_clk_get(dev, "core");
1016 if (IS_ERR(gi2c->core_clk))
1017 return PTR_ERR(gi2c->core_clk);
1018 }
1019
1020 gi2c->se.clk = devm_clk_get(dev, "se");
1021 if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(dev))
1022 return PTR_ERR(gi2c->se.clk);
1023
1024 ret = device_property_read_u32(dev, "clock-frequency",
1025 &gi2c->clk_freq_out);
1026 if (ret) {
1027 dev_info(dev, "Bus frequency not specified, default to 100kHz.\n");
1028 gi2c->clk_freq_out = I2C_MAX_STANDARD_MODE_FREQ;
1029 }
1030
1031 if (has_acpi_companion(dev))
1032 ACPI_COMPANION_SET(&gi2c->adap.dev, ACPI_COMPANION(dev));
1033
1034 gi2c->irq = platform_get_irq(pdev, 0);
1035 if (gi2c->irq < 0)
1036 return gi2c->irq;
1037
1038 ret = geni_i2c_clk_map_idx(gi2c);
1039 if (ret)
1040 return dev_err_probe(dev, ret, "Invalid clk frequency %d Hz\n",
1041 gi2c->clk_freq_out);
1042
1043 gi2c->adap.algo = &geni_i2c_algo;
1044 init_completion(&gi2c->done);
1045 spin_lock_init(&gi2c->lock);
1046 platform_set_drvdata(pdev, gi2c);
1047
1048 /* Keep interrupts disabled initially to allow for low-power modes */
1049 ret = devm_request_irq(dev, gi2c->irq, geni_i2c_irq, IRQF_NO_AUTOEN,
1050 dev_name(dev), gi2c);
1051 if (ret)
1052 return dev_err_probe(dev, ret,
1053 "Request_irq failed: %d\n", gi2c->irq);
1054
1055 i2c_set_adapdata(&gi2c->adap, gi2c);
1056 gi2c->adap.dev.parent = dev;
1057 gi2c->adap.dev.of_node = dev->of_node;
1058 strscpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
1059
1060 ret = geni_icc_get(&gi2c->se, desc ? desc->icc_ddr : "qup-memory");
1061 if (ret)
1062 return ret;
1063 /*
1064 * Set the bus quota for core and cpu to a reasonable value for
1065 * register access.
1066 * Set quota for DDR based on bus speed.
1067 */
1068 gi2c->se.icc_paths[GENI_TO_CORE].avg_bw = GENI_DEFAULT_BW;
1069 gi2c->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
1070 if (!desc || desc->icc_ddr)
1071 gi2c->se.icc_paths[GENI_TO_DDR].avg_bw = Bps_to_icc(gi2c->clk_freq_out);
1072
1073 ret = geni_icc_set_bw(&gi2c->se);
1074 if (ret)
1075 return ret;
1076
1077 ret = clk_prepare_enable(gi2c->core_clk);
1078 if (ret)
1079 return ret;
1080
1081 ret = geni_se_resources_on(&gi2c->se);
1082 if (ret) {
1083 dev_err_probe(dev, ret, "Error turning on resources\n");
1084 goto err_clk;
1085 }
1086 proto = geni_se_read_proto(&gi2c->se);
1087 if (proto == GENI_SE_INVALID_PROTO) {
1088 ret = geni_load_se_firmware(&gi2c->se, GENI_SE_I2C);
1089 if (ret) {
1090 dev_err_probe(dev, ret, "i2c firmware load failed ret: %d\n", ret);
1091 goto err_resources;
1092 }
1093 } else if (proto != GENI_SE_I2C) {
1094 ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto);
1095 goto err_resources;
1096 }
1097
1098 if (desc && desc->no_dma_support) {
1099 fifo_disable = false;
1100 gi2c->no_dma = true;
1101 } else {
1102 fifo_disable = readl_relaxed(gi2c->se.base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
1103 }
1104
1105 if (fifo_disable) {
1106 /* FIFO is disabled, so we can only use GPI DMA */
1107 gi2c->gpi_mode = true;
1108 ret = setup_gpi_dma(gi2c);
1109 if (ret)
1110 goto err_resources;
1111
1112 dev_dbg(dev, "Using GPI DMA mode for I2C\n");
1113 } else {
1114 gi2c->gpi_mode = false;
1115 tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
1116
1117 /* I2C Master Hub Serial Elements doesn't have the HW_PARAM_0 register */
1118 if (!tx_depth && desc)
1119 tx_depth = desc->tx_fifo_depth;
1120
1121 if (!tx_depth) {
1122 ret = dev_err_probe(dev, -EINVAL,
1123 "Invalid TX FIFO depth\n");
1124 goto err_resources;
1125 }
1126
1127 gi2c->tx_wm = tx_depth - 1;
1128 geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
1129 geni_se_config_packing(&gi2c->se, BITS_PER_BYTE,
1130 PACKING_BYTES_PW, true, true, true);
1131
1132 dev_dbg(dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
1133 }
1134
1135 clk_disable_unprepare(gi2c->core_clk);
1136 ret = geni_se_resources_off(&gi2c->se);
1137 if (ret) {
1138 dev_err_probe(dev, ret, "Error turning off resources\n");
1139 goto err_dma;
1140 }
1141
1142 ret = geni_icc_disable(&gi2c->se);
1143 if (ret)
1144 goto err_dma;
1145
1146 gi2c->suspended = 1;
1147 pm_runtime_set_suspended(gi2c->se.dev);
1148 pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
1149 pm_runtime_use_autosuspend(gi2c->se.dev);
1150 pm_runtime_enable(gi2c->se.dev);
1151
1152 ret = i2c_add_adapter(&gi2c->adap);
1153 if (ret) {
1154 dev_err_probe(dev, ret, "Error adding i2c adapter\n");
1155 pm_runtime_disable(gi2c->se.dev);
1156 goto err_dma;
1157 }
1158
1159 dev_dbg(dev, "Geni-I2C adaptor successfully added\n");
1160
1161 return ret;
1162
1163err_resources:
1164 geni_se_resources_off(&gi2c->se);
1165err_clk:
1166 clk_disable_unprepare(gi2c->core_clk);
1167
1168 return ret;
1169
1170err_dma:
1171 release_gpi_dma(gi2c);
1172
1173 return ret;
1174}
1175
1176static void geni_i2c_remove(struct platform_device *pdev)
1177{
1178 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
1179
1180 i2c_del_adapter(&gi2c->adap);
1181 release_gpi_dma(gi2c);
1182 pm_runtime_disable(gi2c->se.dev);
1183}
1184
1185static void geni_i2c_shutdown(struct platform_device *pdev)
1186{
1187 struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
1188
1189 /* Make client i2c transfers start failing */
1190 i2c_mark_adapter_suspended(&gi2c->adap);
1191}
1192
1193static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev)
1194{
1195 int ret;
1196 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1197
1198 disable_irq(gi2c->irq);
1199 ret = geni_se_resources_off(&gi2c->se);
1200 if (ret) {
1201 enable_irq(gi2c->irq);
1202 return ret;
1203
1204 } else {
1205 gi2c->suspended = 1;
1206 }
1207
1208 clk_disable_unprepare(gi2c->core_clk);
1209
1210 return geni_icc_disable(&gi2c->se);
1211}
1212
1213static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
1214{
1215 int ret;
1216 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1217
1218 ret = geni_icc_enable(&gi2c->se);
1219 if (ret)
1220 return ret;
1221
1222 ret = clk_prepare_enable(gi2c->core_clk);
1223 if (ret)
1224 goto out_icc_disable;
1225
1226 ret = geni_se_resources_on(&gi2c->se);
1227 if (ret)
1228 goto out_clk_disable;
1229
1230 enable_irq(gi2c->irq);
1231 gi2c->suspended = 0;
1232
1233 return 0;
1234
1235out_clk_disable:
1236 clk_disable_unprepare(gi2c->core_clk);
1237out_icc_disable:
1238 geni_icc_disable(&gi2c->se);
1239
1240 return ret;
1241}
1242
1243static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev)
1244{
1245 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1246
1247 i2c_mark_adapter_suspended(&gi2c->adap);
1248
1249 if (!gi2c->suspended) {
1250 geni_i2c_runtime_suspend(dev);
1251 pm_runtime_disable(dev);
1252 pm_runtime_set_suspended(dev);
1253 pm_runtime_enable(dev);
1254 }
1255 return 0;
1256}
1257
1258static int __maybe_unused geni_i2c_resume_noirq(struct device *dev)
1259{
1260 struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
1261
1262 i2c_mark_adapter_resumed(&gi2c->adap);
1263 return 0;
1264}
1265
1266static const struct dev_pm_ops geni_i2c_pm_ops = {
1267 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq)
1268 SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume,
1269 NULL)
1270};
1271
1272static const struct geni_i2c_desc i2c_master_hub = {
1273 .has_core_clk = true,
1274 .icc_ddr = NULL,
1275 .no_dma_support = true,
1276 .tx_fifo_depth = 16,
1277};
1278
1279static const struct of_device_id geni_i2c_dt_match[] = {
1280 { .compatible = "qcom,geni-i2c" },
1281 { .compatible = "qcom,geni-i2c-master-hub", .data = &i2c_master_hub },
1282 {}
1283};
1284MODULE_DEVICE_TABLE(of, geni_i2c_dt_match);
1285
1286static struct platform_driver geni_i2c_driver = {
1287 .probe = geni_i2c_probe,
1288 .remove = geni_i2c_remove,
1289 .shutdown = geni_i2c_shutdown,
1290 .driver = {
1291 .name = "geni_i2c",
1292 .pm = &geni_i2c_pm_ops,
1293 .of_match_table = geni_i2c_dt_match,
1294 .acpi_match_table = ACPI_PTR(geni_i2c_acpi_match),
1295 },
1296};
1297
1298module_platform_driver(geni_i2c_driver);
1299
1300MODULE_DESCRIPTION("I2C Controller Driver for GENI based QUP cores");
1301MODULE_LICENSE("GPL v2");