Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018-2021 ARM Ltd.
15 */
16
17#include <linux/bitmap.h>
18#include <linux/device.h>
19#include <linux/export.h>
20#include <linux/idr.h>
21#include <linux/io.h>
22#include <linux/kernel.h>
23#include <linux/ktime.h>
24#include <linux/hashtable.h>
25#include <linux/list.h>
26#include <linux/module.h>
27#include <linux/of_address.h>
28#include <linux/of_device.h>
29#include <linux/processor.h>
30#include <linux/refcount.h>
31#include <linux/slab.h>
32
33#include "common.h"
34#include "notify.h"
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/scmi.h>
38
39enum scmi_error_codes {
40 SCMI_SUCCESS = 0, /* Success */
41 SCMI_ERR_SUPPORT = -1, /* Not supported */
42 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
43 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
44 SCMI_ERR_ENTRY = -4, /* Not found */
45 SCMI_ERR_RANGE = -5, /* Value out of range */
46 SCMI_ERR_BUSY = -6, /* Device busy */
47 SCMI_ERR_COMMS = -7, /* Communication Error */
48 SCMI_ERR_GENERIC = -8, /* Generic Error */
49 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
50 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
51};
52
53/* List of all SCMI devices active in system */
54static LIST_HEAD(scmi_list);
55/* Protection for the entire list */
56static DEFINE_MUTEX(scmi_list_mutex);
57/* Track the unique id for the transfers for debug & profiling purpose */
58static atomic_t transfer_last_id;
59
60static DEFINE_IDR(scmi_requested_devices);
61static DEFINE_MUTEX(scmi_requested_devices_mtx);
62
63struct scmi_requested_dev {
64 const struct scmi_device_id *id_table;
65 struct list_head node;
66};
67
68/**
69 * struct scmi_xfers_info - Structure to manage transfer information
70 *
71 * @xfer_alloc_table: Bitmap table for allocated messages.
72 * Index of this bitmap table is also used for message
73 * sequence identifier.
74 * @xfer_lock: Protection for message allocation
75 * @max_msg: Maximum number of messages that can be pending
76 * @free_xfers: A free list for available to use xfers. It is initialized with
77 * a number of xfers equal to the maximum allowed in-flight
78 * messages.
79 * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
80 * currently in-flight messages.
81 */
82struct scmi_xfers_info {
83 unsigned long *xfer_alloc_table;
84 spinlock_t xfer_lock;
85 int max_msg;
86 struct hlist_head free_xfers;
87 DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
88};
89
90/**
91 * struct scmi_protocol_instance - Describe an initialized protocol instance.
92 * @handle: Reference to the SCMI handle associated to this protocol instance.
93 * @proto: A reference to the protocol descriptor.
94 * @gid: A reference for per-protocol devres management.
95 * @users: A refcount to track effective users of this protocol.
96 * @priv: Reference for optional protocol private data.
97 * @ph: An embedded protocol handle that will be passed down to protocol
98 * initialization code to identify this instance.
99 *
100 * Each protocol is initialized independently once for each SCMI platform in
101 * which is defined by DT and implemented by the SCMI server fw.
102 */
103struct scmi_protocol_instance {
104 const struct scmi_handle *handle;
105 const struct scmi_protocol *proto;
106 void *gid;
107 refcount_t users;
108 void *priv;
109 struct scmi_protocol_handle ph;
110};
111
112#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
113
114/**
115 * struct scmi_info - Structure representing a SCMI instance
116 *
117 * @dev: Device pointer
118 * @desc: SoC description for this instance
119 * @version: SCMI revision information containing protocol version,
120 * implementation version and (sub-)vendor identification.
121 * @handle: Instance of SCMI handle to send to clients
122 * @tx_minfo: Universal Transmit Message management info
123 * @rx_minfo: Universal Receive Message management info
124 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
125 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
126 * @protocols: IDR for protocols' instance descriptors initialized for
127 * this SCMI instance: populated on protocol's first attempted
128 * usage.
129 * @protocols_mtx: A mutex to protect protocols instances initialization.
130 * @protocols_imp: List of protocols implemented, currently maximum of
131 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
132 * @active_protocols: IDR storing device_nodes for protocols actually defined
133 * in the DT and confirmed as implemented by fw.
134 * @atomic_threshold: Optional system wide DT-configured threshold, expressed
135 * in microseconds, for atomic operations.
136 * Only SCMI synchronous commands reported by the platform
137 * to have an execution latency lesser-equal to the threshold
138 * should be considered for atomic mode operation: such
139 * decision is finally left up to the SCMI drivers.
140 * @notify_priv: Pointer to private data structure specific to notifications.
141 * @node: List head
142 * @users: Number of users of this instance
143 */
144struct scmi_info {
145 struct device *dev;
146 const struct scmi_desc *desc;
147 struct scmi_revision_info version;
148 struct scmi_handle handle;
149 struct scmi_xfers_info tx_minfo;
150 struct scmi_xfers_info rx_minfo;
151 struct idr tx_idr;
152 struct idr rx_idr;
153 struct idr protocols;
154 /* Ensure mutual exclusive access to protocols instance array */
155 struct mutex protocols_mtx;
156 u8 *protocols_imp;
157 struct idr active_protocols;
158 unsigned int atomic_threshold;
159 void *notify_priv;
160 struct list_head node;
161 int users;
162};
163
164#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
165
166static const int scmi_linux_errmap[] = {
167 /* better than switch case as long as return value is continuous */
168 0, /* SCMI_SUCCESS */
169 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
170 -EINVAL, /* SCMI_ERR_PARAM */
171 -EACCES, /* SCMI_ERR_ACCESS */
172 -ENOENT, /* SCMI_ERR_ENTRY */
173 -ERANGE, /* SCMI_ERR_RANGE */
174 -EBUSY, /* SCMI_ERR_BUSY */
175 -ECOMM, /* SCMI_ERR_COMMS */
176 -EIO, /* SCMI_ERR_GENERIC */
177 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
178 -EPROTO, /* SCMI_ERR_PROTOCOL */
179};
180
181static inline int scmi_to_linux_errno(int errno)
182{
183 int err_idx = -errno;
184
185 if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
186 return scmi_linux_errmap[err_idx];
187 return -EIO;
188}
189
190void scmi_notification_instance_data_set(const struct scmi_handle *handle,
191 void *priv)
192{
193 struct scmi_info *info = handle_to_scmi_info(handle);
194
195 info->notify_priv = priv;
196 /* Ensure updated protocol private date are visible */
197 smp_wmb();
198}
199
200void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
201{
202 struct scmi_info *info = handle_to_scmi_info(handle);
203
204 /* Ensure protocols_private_data has been updated */
205 smp_rmb();
206 return info->notify_priv;
207}
208
209/**
210 * scmi_xfer_token_set - Reserve and set new token for the xfer at hand
211 *
212 * @minfo: Pointer to Tx/Rx Message management info based on channel type
213 * @xfer: The xfer to act upon
214 *
215 * Pick the next unused monotonically increasing token and set it into
216 * xfer->hdr.seq: picking a monotonically increasing value avoids immediate
217 * reuse of freshly completed or timed-out xfers, thus mitigating the risk
218 * of incorrect association of a late and expired xfer with a live in-flight
219 * transaction, both happening to re-use the same token identifier.
220 *
221 * Since platform is NOT required to answer our request in-order we should
222 * account for a few rare but possible scenarios:
223 *
224 * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
225 * using find_next_zero_bit() starting from candidate next_token bit
226 *
227 * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
228 * are plenty of free tokens at start, so try a second pass using
229 * find_next_zero_bit() and starting from 0.
230 *
231 * X = used in-flight
232 *
233 * Normal
234 * ------
235 *
236 * |- xfer_id picked
237 * -----------+----------------------------------------------------------
238 * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
239 * ----------------------------------------------------------------------
240 * ^
241 * |- next_token
242 *
243 * Out-of-order pending at start
244 * -----------------------------
245 *
246 * |- xfer_id picked, last_token fixed
247 * -----+----------------------------------------------------------------
248 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
249 * ----------------------------------------------------------------------
250 * ^
251 * |- next_token
252 *
253 *
254 * Out-of-order pending at end
255 * ---------------------------
256 *
257 * |- xfer_id picked, last_token fixed
258 * -----+----------------------------------------------------------------
259 * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
260 * ----------------------------------------------------------------------
261 * ^
262 * |- next_token
263 *
264 * Context: Assumes to be called with @xfer_lock already acquired.
265 *
266 * Return: 0 on Success or error
267 */
268static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
269 struct scmi_xfer *xfer)
270{
271 unsigned long xfer_id, next_token;
272
273 /*
274 * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
275 * using the pre-allocated transfer_id as a base.
276 * Note that the global transfer_id is shared across all message types
277 * so there could be holes in the allocated set of monotonic sequence
278 * numbers, but that is going to limit the effectiveness of the
279 * mitigation only in very rare limit conditions.
280 */
281 next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
282
283 /* Pick the next available xfer_id >= next_token */
284 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
285 MSG_TOKEN_MAX, next_token);
286 if (xfer_id == MSG_TOKEN_MAX) {
287 /*
288 * After heavily out-of-order responses, there are no free
289 * tokens ahead, but only at start of xfer_alloc_table so
290 * try again from the beginning.
291 */
292 xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
293 MSG_TOKEN_MAX, 0);
294 /*
295 * Something is wrong if we got here since there can be a
296 * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
297 * but we have not found any free token [0, MSG_TOKEN_MAX - 1].
298 */
299 if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
300 return -ENOMEM;
301 }
302
303 /* Update +/- last_token accordingly if we skipped some hole */
304 if (xfer_id != next_token)
305 atomic_add((int)(xfer_id - next_token), &transfer_last_id);
306
307 /* Set in-flight */
308 set_bit(xfer_id, minfo->xfer_alloc_table);
309 xfer->hdr.seq = (u16)xfer_id;
310
311 return 0;
312}
313
314/**
315 * scmi_xfer_token_clear - Release the token
316 *
317 * @minfo: Pointer to Tx/Rx Message management info based on channel type
318 * @xfer: The xfer to act upon
319 */
320static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
321 struct scmi_xfer *xfer)
322{
323 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
324}
325
326/**
327 * scmi_xfer_get() - Allocate one message
328 *
329 * @handle: Pointer to SCMI entity handle
330 * @minfo: Pointer to Tx/Rx Message management info based on channel type
331 * @set_pending: If true a monotonic token is picked and the xfer is added to
332 * the pending hash table.
333 *
334 * Helper function which is used by various message functions that are
335 * exposed to clients of this driver for allocating a message traffic event.
336 *
337 * Picks an xfer from the free list @free_xfers (if any available) and, if
338 * required, sets a monotonically increasing token and stores the inflight xfer
339 * into the @pending_xfers hashtable for later retrieval.
340 *
341 * The successfully initialized xfer is refcounted.
342 *
343 * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and
344 * @free_xfers.
345 *
346 * Return: 0 if all went fine, else corresponding error.
347 */
348static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
349 struct scmi_xfers_info *minfo,
350 bool set_pending)
351{
352 int ret;
353 unsigned long flags;
354 struct scmi_xfer *xfer;
355
356 spin_lock_irqsave(&minfo->xfer_lock, flags);
357 if (hlist_empty(&minfo->free_xfers)) {
358 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
359 return ERR_PTR(-ENOMEM);
360 }
361
362 /* grab an xfer from the free_list */
363 xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
364 hlist_del_init(&xfer->node);
365
366 /*
367 * Allocate transfer_id early so that can be used also as base for
368 * monotonic sequence number generation if needed.
369 */
370 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
371
372 if (set_pending) {
373 /* Pick and set monotonic token */
374 ret = scmi_xfer_token_set(minfo, xfer);
375 if (!ret) {
376 hash_add(minfo->pending_xfers, &xfer->node,
377 xfer->hdr.seq);
378 xfer->pending = true;
379 } else {
380 dev_err(handle->dev,
381 "Failed to get monotonic token %d\n", ret);
382 hlist_add_head(&xfer->node, &minfo->free_xfers);
383 xfer = ERR_PTR(ret);
384 }
385 }
386
387 if (!IS_ERR(xfer)) {
388 refcount_set(&xfer->users, 1);
389 atomic_set(&xfer->busy, SCMI_XFER_FREE);
390 }
391 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
392
393 return xfer;
394}
395
396/**
397 * __scmi_xfer_put() - Release a message
398 *
399 * @minfo: Pointer to Tx/Rx Message management info based on channel type
400 * @xfer: message that was reserved by scmi_xfer_get
401 *
402 * After refcount check, possibly release an xfer, clearing the token slot,
403 * removing xfer from @pending_xfers and putting it back into free_xfers.
404 *
405 * This holds a spinlock to maintain integrity of internal data structures.
406 */
407static void
408__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
409{
410 unsigned long flags;
411
412 spin_lock_irqsave(&minfo->xfer_lock, flags);
413 if (refcount_dec_and_test(&xfer->users)) {
414 if (xfer->pending) {
415 scmi_xfer_token_clear(minfo, xfer);
416 hash_del(&xfer->node);
417 xfer->pending = false;
418 }
419 hlist_add_head(&xfer->node, &minfo->free_xfers);
420 }
421 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
422}
423
424/**
425 * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
426 *
427 * @minfo: Pointer to Tx/Rx Message management info based on channel type
428 * @xfer_id: Token ID to lookup in @pending_xfers
429 *
430 * Refcounting is untouched.
431 *
432 * Context: Assumes to be called with @xfer_lock already acquired.
433 *
434 * Return: A valid xfer on Success or error otherwise
435 */
436static struct scmi_xfer *
437scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
438{
439 struct scmi_xfer *xfer = NULL;
440
441 if (test_bit(xfer_id, minfo->xfer_alloc_table))
442 xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
443
444 return xfer ?: ERR_PTR(-EINVAL);
445}
446
447/**
448 * scmi_msg_response_validate - Validate message type against state of related
449 * xfer
450 *
451 * @cinfo: A reference to the channel descriptor.
452 * @msg_type: Message type to check
453 * @xfer: A reference to the xfer to validate against @msg_type
454 *
455 * This function checks if @msg_type is congruent with the current state of
456 * a pending @xfer; if an asynchronous delayed response is received before the
457 * related synchronous response (Out-of-Order Delayed Response) the missing
458 * synchronous response is assumed to be OK and completed, carrying on with the
459 * Delayed Response: this is done to address the case in which the underlying
460 * SCMI transport can deliver such out-of-order responses.
461 *
462 * Context: Assumes to be called with xfer->lock already acquired.
463 *
464 * Return: 0 on Success, error otherwise
465 */
466static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
467 u8 msg_type,
468 struct scmi_xfer *xfer)
469{
470 /*
471 * Even if a response was indeed expected on this slot at this point,
472 * a buggy platform could wrongly reply feeding us an unexpected
473 * delayed response we're not prepared to handle: bail-out safely
474 * blaming firmware.
475 */
476 if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
477 dev_err(cinfo->dev,
478 "Delayed Response for %d not expected! Buggy F/W ?\n",
479 xfer->hdr.seq);
480 return -EINVAL;
481 }
482
483 switch (xfer->state) {
484 case SCMI_XFER_SENT_OK:
485 if (msg_type == MSG_TYPE_DELAYED_RESP) {
486 /*
487 * Delayed Response expected but delivered earlier.
488 * Assume message RESPONSE was OK and skip state.
489 */
490 xfer->hdr.status = SCMI_SUCCESS;
491 xfer->state = SCMI_XFER_RESP_OK;
492 complete(&xfer->done);
493 dev_warn(cinfo->dev,
494 "Received valid OoO Delayed Response for %d\n",
495 xfer->hdr.seq);
496 }
497 break;
498 case SCMI_XFER_RESP_OK:
499 if (msg_type != MSG_TYPE_DELAYED_RESP)
500 return -EINVAL;
501 break;
502 case SCMI_XFER_DRESP_OK:
503 /* No further message expected once in SCMI_XFER_DRESP_OK */
504 return -EINVAL;
505 }
506
507 return 0;
508}
509
510/**
511 * scmi_xfer_state_update - Update xfer state
512 *
513 * @xfer: A reference to the xfer to update
514 * @msg_type: Type of message being processed.
515 *
516 * Note that this message is assumed to have been already successfully validated
517 * by @scmi_msg_response_validate(), so here we just update the state.
518 *
519 * Context: Assumes to be called on an xfer exclusively acquired using the
520 * busy flag.
521 */
522static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
523{
524 xfer->hdr.type = msg_type;
525
526 /* Unknown command types were already discarded earlier */
527 if (xfer->hdr.type == MSG_TYPE_COMMAND)
528 xfer->state = SCMI_XFER_RESP_OK;
529 else
530 xfer->state = SCMI_XFER_DRESP_OK;
531}
532
533static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
534{
535 int ret;
536
537 ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
538
539 return ret == SCMI_XFER_FREE;
540}
541
542/**
543 * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
544 *
545 * @cinfo: A reference to the channel descriptor.
546 * @msg_hdr: A message header to use as lookup key
547 *
548 * When a valid xfer is found for the sequence number embedded in the provided
549 * msg_hdr, reference counting is properly updated and exclusive access to this
550 * xfer is granted till released with @scmi_xfer_command_release.
551 *
552 * Return: A valid @xfer on Success or error otherwise.
553 */
554static inline struct scmi_xfer *
555scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
556{
557 int ret;
558 unsigned long flags;
559 struct scmi_xfer *xfer;
560 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
561 struct scmi_xfers_info *minfo = &info->tx_minfo;
562 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
563 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
564
565 /* Are we even expecting this? */
566 spin_lock_irqsave(&minfo->xfer_lock, flags);
567 xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
568 if (IS_ERR(xfer)) {
569 dev_err(cinfo->dev,
570 "Message for %d type %d is not expected!\n",
571 xfer_id, msg_type);
572 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
573 return xfer;
574 }
575 refcount_inc(&xfer->users);
576 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
577
578 spin_lock_irqsave(&xfer->lock, flags);
579 ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
580 /*
581 * If a pending xfer was found which was also in a congruent state with
582 * the received message, acquire exclusive access to it setting the busy
583 * flag.
584 * Spins only on the rare limit condition of concurrent reception of
585 * RESP and DRESP for the same xfer.
586 */
587 if (!ret) {
588 spin_until_cond(scmi_xfer_acquired(xfer));
589 scmi_xfer_state_update(xfer, msg_type);
590 }
591 spin_unlock_irqrestore(&xfer->lock, flags);
592
593 if (ret) {
594 dev_err(cinfo->dev,
595 "Invalid message type:%d for %d - HDR:0x%X state:%d\n",
596 msg_type, xfer_id, msg_hdr, xfer->state);
597 /* On error the refcount incremented above has to be dropped */
598 __scmi_xfer_put(minfo, xfer);
599 xfer = ERR_PTR(-EINVAL);
600 }
601
602 return xfer;
603}
604
605static inline void scmi_xfer_command_release(struct scmi_info *info,
606 struct scmi_xfer *xfer)
607{
608 atomic_set(&xfer->busy, SCMI_XFER_FREE);
609 __scmi_xfer_put(&info->tx_minfo, xfer);
610}
611
612static inline void scmi_clear_channel(struct scmi_info *info,
613 struct scmi_chan_info *cinfo)
614{
615 if (info->desc->ops->clear_channel)
616 info->desc->ops->clear_channel(cinfo);
617}
618
619static inline bool is_polling_required(struct scmi_chan_info *cinfo,
620 struct scmi_info *info)
621{
622 return cinfo->no_completion_irq || info->desc->force_polling;
623}
624
625static inline bool is_transport_polling_capable(struct scmi_info *info)
626{
627 return info->desc->ops->poll_done ||
628 info->desc->sync_cmds_completed_on_ret;
629}
630
631static inline bool is_polling_enabled(struct scmi_chan_info *cinfo,
632 struct scmi_info *info)
633{
634 return is_polling_required(cinfo, info) &&
635 is_transport_polling_capable(info);
636}
637
638static void scmi_handle_notification(struct scmi_chan_info *cinfo,
639 u32 msg_hdr, void *priv)
640{
641 struct scmi_xfer *xfer;
642 struct device *dev = cinfo->dev;
643 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
644 struct scmi_xfers_info *minfo = &info->rx_minfo;
645 ktime_t ts;
646
647 ts = ktime_get_boottime();
648 xfer = scmi_xfer_get(cinfo->handle, minfo, false);
649 if (IS_ERR(xfer)) {
650 dev_err(dev, "failed to get free message slot (%ld)\n",
651 PTR_ERR(xfer));
652 scmi_clear_channel(info, cinfo);
653 return;
654 }
655
656 unpack_scmi_header(msg_hdr, &xfer->hdr);
657 if (priv)
658 /* Ensure order between xfer->priv store and following ops */
659 smp_store_mb(xfer->priv, priv);
660 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
661 xfer);
662 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
663 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
664
665 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
666 xfer->hdr.protocol_id, xfer->hdr.seq,
667 MSG_TYPE_NOTIFICATION);
668
669 __scmi_xfer_put(minfo, xfer);
670
671 scmi_clear_channel(info, cinfo);
672}
673
674static void scmi_handle_response(struct scmi_chan_info *cinfo,
675 u32 msg_hdr, void *priv)
676{
677 struct scmi_xfer *xfer;
678 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
679
680 xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
681 if (IS_ERR(xfer)) {
682 if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
683 scmi_clear_channel(info, cinfo);
684 return;
685 }
686
687 /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
688 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
689 xfer->rx.len = info->desc->max_msg_size;
690
691 if (priv)
692 /* Ensure order between xfer->priv store and following ops */
693 smp_store_mb(xfer->priv, priv);
694 info->desc->ops->fetch_response(cinfo, xfer);
695
696 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
697 xfer->hdr.protocol_id, xfer->hdr.seq,
698 xfer->hdr.type);
699
700 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
701 scmi_clear_channel(info, cinfo);
702 complete(xfer->async_done);
703 } else {
704 complete(&xfer->done);
705 }
706
707 scmi_xfer_command_release(info, xfer);
708}
709
710/**
711 * scmi_rx_callback() - callback for receiving messages
712 *
713 * @cinfo: SCMI channel info
714 * @msg_hdr: Message header
715 * @priv: Transport specific private data.
716 *
717 * Processes one received message to appropriate transfer information and
718 * signals completion of the transfer.
719 *
720 * NOTE: This function will be invoked in IRQ context, hence should be
721 * as optimal as possible.
722 */
723void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv)
724{
725 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
726
727 switch (msg_type) {
728 case MSG_TYPE_NOTIFICATION:
729 scmi_handle_notification(cinfo, msg_hdr, priv);
730 break;
731 case MSG_TYPE_COMMAND:
732 case MSG_TYPE_DELAYED_RESP:
733 scmi_handle_response(cinfo, msg_hdr, priv);
734 break;
735 default:
736 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
737 break;
738 }
739}
740
741/**
742 * xfer_put() - Release a transmit message
743 *
744 * @ph: Pointer to SCMI protocol handle
745 * @xfer: message that was reserved by xfer_get_init
746 */
747static void xfer_put(const struct scmi_protocol_handle *ph,
748 struct scmi_xfer *xfer)
749{
750 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
751 struct scmi_info *info = handle_to_scmi_info(pi->handle);
752
753 __scmi_xfer_put(&info->tx_minfo, xfer);
754}
755
756static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
757 struct scmi_xfer *xfer, ktime_t stop)
758{
759 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
760
761 /*
762 * Poll also on xfer->done so that polling can be forcibly terminated
763 * in case of out-of-order receptions of delayed responses
764 */
765 return info->desc->ops->poll_done(cinfo, xfer) ||
766 try_wait_for_completion(&xfer->done) ||
767 ktime_after(ktime_get(), stop);
768}
769
770/**
771 * scmi_wait_for_message_response - An helper to group all the possible ways of
772 * waiting for a synchronous message response.
773 *
774 * @cinfo: SCMI channel info
775 * @xfer: Reference to the transfer being waited for.
776 *
777 * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
778 * configuration flags like xfer->hdr.poll_completion.
779 *
780 * Return: 0 on Success, error otherwise.
781 */
782static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
783 struct scmi_xfer *xfer)
784{
785 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
786 struct device *dev = info->dev;
787 int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms;
788
789 trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
790 xfer->hdr.protocol_id, xfer->hdr.seq,
791 timeout_ms,
792 xfer->hdr.poll_completion);
793
794 if (xfer->hdr.poll_completion) {
795 /*
796 * Real polling is needed only if transport has NOT declared
797 * itself to support synchronous commands replies.
798 */
799 if (!info->desc->sync_cmds_completed_on_ret) {
800 /*
801 * Poll on xfer using transport provided .poll_done();
802 * assumes no completion interrupt was available.
803 */
804 ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
805
806 spin_until_cond(scmi_xfer_done_no_timeout(cinfo,
807 xfer, stop));
808 if (ktime_after(ktime_get(), stop)) {
809 dev_err(dev,
810 "timed out in resp(caller: %pS) - polling\n",
811 (void *)_RET_IP_);
812 ret = -ETIMEDOUT;
813 }
814 }
815
816 if (!ret) {
817 unsigned long flags;
818
819 /*
820 * Do not fetch_response if an out-of-order delayed
821 * response is being processed.
822 */
823 spin_lock_irqsave(&xfer->lock, flags);
824 if (xfer->state == SCMI_XFER_SENT_OK) {
825 info->desc->ops->fetch_response(cinfo, xfer);
826 xfer->state = SCMI_XFER_RESP_OK;
827 }
828 spin_unlock_irqrestore(&xfer->lock, flags);
829 }
830 } else {
831 /* And we wait for the response. */
832 if (!wait_for_completion_timeout(&xfer->done,
833 msecs_to_jiffies(timeout_ms))) {
834 dev_err(dev, "timed out in resp(caller: %pS)\n",
835 (void *)_RET_IP_);
836 ret = -ETIMEDOUT;
837 }
838 }
839
840 return ret;
841}
842
843/**
844 * do_xfer() - Do one transfer
845 *
846 * @ph: Pointer to SCMI protocol handle
847 * @xfer: Transfer to initiate and wait for response
848 *
849 * Return: -ETIMEDOUT in case of no response, if transmit error,
850 * return corresponding error, else if all goes well,
851 * return 0.
852 */
853static int do_xfer(const struct scmi_protocol_handle *ph,
854 struct scmi_xfer *xfer)
855{
856 int ret;
857 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
858 struct scmi_info *info = handle_to_scmi_info(pi->handle);
859 struct device *dev = info->dev;
860 struct scmi_chan_info *cinfo;
861
862 /* Check for polling request on custom command xfers at first */
863 if (xfer->hdr.poll_completion && !is_transport_polling_capable(info)) {
864 dev_warn_once(dev,
865 "Polling mode is not supported by transport.\n");
866 return -EINVAL;
867 }
868
869 cinfo = idr_find(&info->tx_idr, pi->proto->id);
870 if (unlikely(!cinfo))
871 return -EINVAL;
872
873 /* True ONLY if also supported by transport. */
874 if (is_polling_enabled(cinfo, info))
875 xfer->hdr.poll_completion = true;
876
877 /*
878 * Initialise protocol id now from protocol handle to avoid it being
879 * overridden by mistake (or malice) by the protocol code mangling with
880 * the scmi_xfer structure prior to this.
881 */
882 xfer->hdr.protocol_id = pi->proto->id;
883 reinit_completion(&xfer->done);
884
885 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
886 xfer->hdr.protocol_id, xfer->hdr.seq,
887 xfer->hdr.poll_completion);
888
889 xfer->state = SCMI_XFER_SENT_OK;
890 /*
891 * Even though spinlocking is not needed here since no race is possible
892 * on xfer->state due to the monotonically increasing tokens allocation,
893 * we must anyway ensure xfer->state initialization is not re-ordered
894 * after the .send_message() to be sure that on the RX path an early
895 * ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
896 */
897 smp_mb();
898
899 ret = info->desc->ops->send_message(cinfo, xfer);
900 if (ret < 0) {
901 dev_dbg(dev, "Failed to send message %d\n", ret);
902 return ret;
903 }
904
905 ret = scmi_wait_for_message_response(cinfo, xfer);
906 if (!ret && xfer->hdr.status)
907 ret = scmi_to_linux_errno(xfer->hdr.status);
908
909 if (info->desc->ops->mark_txdone)
910 info->desc->ops->mark_txdone(cinfo, ret, xfer);
911
912 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
913 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
914
915 return ret;
916}
917
918static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
919 struct scmi_xfer *xfer)
920{
921 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
922 struct scmi_info *info = handle_to_scmi_info(pi->handle);
923
924 xfer->rx.len = info->desc->max_msg_size;
925}
926
927#define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
928
929/**
930 * do_xfer_with_response() - Do one transfer and wait until the delayed
931 * response is received
932 *
933 * @ph: Pointer to SCMI protocol handle
934 * @xfer: Transfer to initiate and wait for response
935 *
936 * Using asynchronous commands in atomic/polling mode should be avoided since
937 * it could cause long busy-waiting here, so ignore polling for the delayed
938 * response and WARN if it was requested for this command transaction since
939 * upper layers should refrain from issuing such kind of requests.
940 *
941 * The only other option would have been to refrain from using any asynchronous
942 * command even if made available, when an atomic transport is detected, and
943 * instead forcibly use the synchronous version (thing that can be easily
944 * attained at the protocol layer), but this would also have led to longer
945 * stalls of the channel for synchronous commands and possibly timeouts.
946 * (in other words there is usually a good reason if a platform provides an
947 * asynchronous version of a command and we should prefer to use it...just not
948 * when using atomic/polling mode)
949 *
950 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
951 * return corresponding error, else if all goes well, return 0.
952 */
953static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
954 struct scmi_xfer *xfer)
955{
956 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
957 DECLARE_COMPLETION_ONSTACK(async_response);
958
959 xfer->async_done = &async_response;
960
961 /*
962 * Delayed responses should not be polled, so an async command should
963 * not have been used when requiring an atomic/poll context; WARN and
964 * perform instead a sleeping wait.
965 * (Note Async + IgnoreDelayedResponses are sent via do_xfer)
966 */
967 WARN_ON_ONCE(xfer->hdr.poll_completion);
968
969 ret = do_xfer(ph, xfer);
970 if (!ret) {
971 if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
972 dev_err(ph->dev,
973 "timed out in delayed resp(caller: %pS)\n",
974 (void *)_RET_IP_);
975 ret = -ETIMEDOUT;
976 } else if (xfer->hdr.status) {
977 ret = scmi_to_linux_errno(xfer->hdr.status);
978 }
979 }
980
981 xfer->async_done = NULL;
982 return ret;
983}
984
985/**
986 * xfer_get_init() - Allocate and initialise one message for transmit
987 *
988 * @ph: Pointer to SCMI protocol handle
989 * @msg_id: Message identifier
990 * @tx_size: transmit message size
991 * @rx_size: receive message size
992 * @p: pointer to the allocated and initialised message
993 *
994 * This function allocates the message using @scmi_xfer_get and
995 * initialise the header.
996 *
997 * Return: 0 if all went fine with @p pointing to message, else
998 * corresponding error.
999 */
1000static int xfer_get_init(const struct scmi_protocol_handle *ph,
1001 u8 msg_id, size_t tx_size, size_t rx_size,
1002 struct scmi_xfer **p)
1003{
1004 int ret;
1005 struct scmi_xfer *xfer;
1006 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1007 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1008 struct scmi_xfers_info *minfo = &info->tx_minfo;
1009 struct device *dev = info->dev;
1010
1011 /* Ensure we have sane transfer sizes */
1012 if (rx_size > info->desc->max_msg_size ||
1013 tx_size > info->desc->max_msg_size)
1014 return -ERANGE;
1015
1016 xfer = scmi_xfer_get(pi->handle, minfo, true);
1017 if (IS_ERR(xfer)) {
1018 ret = PTR_ERR(xfer);
1019 dev_err(dev, "failed to get free message slot(%d)\n", ret);
1020 return ret;
1021 }
1022
1023 xfer->tx.len = tx_size;
1024 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1025 xfer->hdr.type = MSG_TYPE_COMMAND;
1026 xfer->hdr.id = msg_id;
1027 xfer->hdr.poll_completion = false;
1028
1029 *p = xfer;
1030
1031 return 0;
1032}
1033
1034/**
1035 * version_get() - command to get the revision of the SCMI entity
1036 *
1037 * @ph: Pointer to SCMI protocol handle
1038 * @version: Holds returned version of protocol.
1039 *
1040 * Updates the SCMI information in the internal data structure.
1041 *
1042 * Return: 0 if all went fine, else return appropriate error.
1043 */
1044static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1045{
1046 int ret;
1047 __le32 *rev_info;
1048 struct scmi_xfer *t;
1049
1050 ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1051 if (ret)
1052 return ret;
1053
1054 ret = do_xfer(ph, t);
1055 if (!ret) {
1056 rev_info = t->rx.buf;
1057 *version = le32_to_cpu(*rev_info);
1058 }
1059
1060 xfer_put(ph, t);
1061 return ret;
1062}
1063
1064/**
1065 * scmi_set_protocol_priv - Set protocol specific data at init time
1066 *
1067 * @ph: A reference to the protocol handle.
1068 * @priv: The private data to set.
1069 *
1070 * Return: 0 on Success
1071 */
1072static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1073 void *priv)
1074{
1075 struct scmi_protocol_instance *pi = ph_to_pi(ph);
1076
1077 pi->priv = priv;
1078
1079 return 0;
1080}
1081
1082/**
1083 * scmi_get_protocol_priv - Set protocol specific data at init time
1084 *
1085 * @ph: A reference to the protocol handle.
1086 *
1087 * Return: Protocol private data if any was set.
1088 */
1089static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1090{
1091 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1092
1093 return pi->priv;
1094}
1095
1096static const struct scmi_xfer_ops xfer_ops = {
1097 .version_get = version_get,
1098 .xfer_get_init = xfer_get_init,
1099 .reset_rx_to_maxsz = reset_rx_to_maxsz,
1100 .do_xfer = do_xfer,
1101 .do_xfer_with_response = do_xfer_with_response,
1102 .xfer_put = xfer_put,
1103};
1104
1105/**
1106 * scmi_revision_area_get - Retrieve version memory area.
1107 *
1108 * @ph: A reference to the protocol handle.
1109 *
1110 * A helper to grab the version memory area reference during SCMI Base protocol
1111 * initialization.
1112 *
1113 * Return: A reference to the version memory area associated to the SCMI
1114 * instance underlying this protocol handle.
1115 */
1116struct scmi_revision_info *
1117scmi_revision_area_get(const struct scmi_protocol_handle *ph)
1118{
1119 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1120
1121 return pi->handle->version;
1122}
1123
1124/**
1125 * scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
1126 * instance descriptor.
1127 * @info: The reference to the related SCMI instance.
1128 * @proto: The protocol descriptor.
1129 *
1130 * Allocate a new protocol instance descriptor, using the provided @proto
1131 * description, against the specified SCMI instance @info, and initialize it;
1132 * all resources management is handled via a dedicated per-protocol devres
1133 * group.
1134 *
1135 * Context: Assumes to be called with @protocols_mtx already acquired.
1136 * Return: A reference to a freshly allocated and initialized protocol instance
1137 * or ERR_PTR on failure. On failure the @proto reference is at first
1138 * put using @scmi_protocol_put() before releasing all the devres group.
1139 */
1140static struct scmi_protocol_instance *
1141scmi_alloc_init_protocol_instance(struct scmi_info *info,
1142 const struct scmi_protocol *proto)
1143{
1144 int ret = -ENOMEM;
1145 void *gid;
1146 struct scmi_protocol_instance *pi;
1147 const struct scmi_handle *handle = &info->handle;
1148
1149 /* Protocol specific devres group */
1150 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
1151 if (!gid) {
1152 scmi_protocol_put(proto->id);
1153 goto out;
1154 }
1155
1156 pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
1157 if (!pi)
1158 goto clean;
1159
1160 pi->gid = gid;
1161 pi->proto = proto;
1162 pi->handle = handle;
1163 pi->ph.dev = handle->dev;
1164 pi->ph.xops = &xfer_ops;
1165 pi->ph.set_priv = scmi_set_protocol_priv;
1166 pi->ph.get_priv = scmi_get_protocol_priv;
1167 refcount_set(&pi->users, 1);
1168 /* proto->init is assured NON NULL by scmi_protocol_register */
1169 ret = pi->proto->instance_init(&pi->ph);
1170 if (ret)
1171 goto clean;
1172
1173 ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
1174 GFP_KERNEL);
1175 if (ret != proto->id)
1176 goto clean;
1177
1178 /*
1179 * Warn but ignore events registration errors since we do not want
1180 * to skip whole protocols if their notifications are messed up.
1181 */
1182 if (pi->proto->events) {
1183 ret = scmi_register_protocol_events(handle, pi->proto->id,
1184 &pi->ph,
1185 pi->proto->events);
1186 if (ret)
1187 dev_warn(handle->dev,
1188 "Protocol:%X - Events Registration Failed - err:%d\n",
1189 pi->proto->id, ret);
1190 }
1191
1192 devres_close_group(handle->dev, pi->gid);
1193 dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
1194
1195 return pi;
1196
1197clean:
1198 /* Take care to put the protocol module's owner before releasing all */
1199 scmi_protocol_put(proto->id);
1200 devres_release_group(handle->dev, gid);
1201out:
1202 return ERR_PTR(ret);
1203}
1204
1205/**
1206 * scmi_get_protocol_instance - Protocol initialization helper.
1207 * @handle: A reference to the SCMI platform instance.
1208 * @protocol_id: The protocol being requested.
1209 *
1210 * In case the required protocol has never been requested before for this
1211 * instance, allocate and initialize all the needed structures while handling
1212 * resource allocation with a dedicated per-protocol devres subgroup.
1213 *
1214 * Return: A reference to an initialized protocol instance or error on failure:
1215 * in particular returns -EPROBE_DEFER when the desired protocol could
1216 * NOT be found.
1217 */
1218static struct scmi_protocol_instance * __must_check
1219scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
1220{
1221 struct scmi_protocol_instance *pi;
1222 struct scmi_info *info = handle_to_scmi_info(handle);
1223
1224 mutex_lock(&info->protocols_mtx);
1225 pi = idr_find(&info->protocols, protocol_id);
1226
1227 if (pi) {
1228 refcount_inc(&pi->users);
1229 } else {
1230 const struct scmi_protocol *proto;
1231
1232 /* Fails if protocol not registered on bus */
1233 proto = scmi_protocol_get(protocol_id);
1234 if (proto)
1235 pi = scmi_alloc_init_protocol_instance(info, proto);
1236 else
1237 pi = ERR_PTR(-EPROBE_DEFER);
1238 }
1239 mutex_unlock(&info->protocols_mtx);
1240
1241 return pi;
1242}
1243
1244/**
1245 * scmi_protocol_acquire - Protocol acquire
1246 * @handle: A reference to the SCMI platform instance.
1247 * @protocol_id: The protocol being requested.
1248 *
1249 * Register a new user for the requested protocol on the specified SCMI
1250 * platform instance, possibly triggering its initialization on first user.
1251 *
1252 * Return: 0 if protocol was acquired successfully.
1253 */
1254int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
1255{
1256 return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
1257}
1258
1259/**
1260 * scmi_protocol_release - Protocol de-initialization helper.
1261 * @handle: A reference to the SCMI platform instance.
1262 * @protocol_id: The protocol being requested.
1263 *
1264 * Remove one user for the specified protocol and triggers de-initialization
1265 * and resources de-allocation once the last user has gone.
1266 */
1267void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
1268{
1269 struct scmi_info *info = handle_to_scmi_info(handle);
1270 struct scmi_protocol_instance *pi;
1271
1272 mutex_lock(&info->protocols_mtx);
1273 pi = idr_find(&info->protocols, protocol_id);
1274 if (WARN_ON(!pi))
1275 goto out;
1276
1277 if (refcount_dec_and_test(&pi->users)) {
1278 void *gid = pi->gid;
1279
1280 if (pi->proto->events)
1281 scmi_deregister_protocol_events(handle, protocol_id);
1282
1283 if (pi->proto->instance_deinit)
1284 pi->proto->instance_deinit(&pi->ph);
1285
1286 idr_remove(&info->protocols, protocol_id);
1287
1288 scmi_protocol_put(protocol_id);
1289
1290 devres_release_group(handle->dev, gid);
1291 dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
1292 protocol_id);
1293 }
1294
1295out:
1296 mutex_unlock(&info->protocols_mtx);
1297}
1298
1299void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
1300 u8 *prot_imp)
1301{
1302 const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1303 struct scmi_info *info = handle_to_scmi_info(pi->handle);
1304
1305 info->protocols_imp = prot_imp;
1306}
1307
1308static bool
1309scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
1310{
1311 int i;
1312 struct scmi_info *info = handle_to_scmi_info(handle);
1313
1314 if (!info->protocols_imp)
1315 return false;
1316
1317 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
1318 if (info->protocols_imp[i] == prot_id)
1319 return true;
1320 return false;
1321}
1322
1323struct scmi_protocol_devres {
1324 const struct scmi_handle *handle;
1325 u8 protocol_id;
1326};
1327
1328static void scmi_devm_release_protocol(struct device *dev, void *res)
1329{
1330 struct scmi_protocol_devres *dres = res;
1331
1332 scmi_protocol_release(dres->handle, dres->protocol_id);
1333}
1334
1335/**
1336 * scmi_devm_protocol_get - Devres managed get protocol operations and handle
1337 * @sdev: A reference to an scmi_device whose embedded struct device is to
1338 * be used for devres accounting.
1339 * @protocol_id: The protocol being requested.
1340 * @ph: A pointer reference used to pass back the associated protocol handle.
1341 *
1342 * Get hold of a protocol accounting for its usage, eventually triggering its
1343 * initialization, and returning the protocol specific operations and related
1344 * protocol handle which will be used as first argument in most of the
1345 * protocols operations methods.
1346 * Being a devres based managed method, protocol hold will be automatically
1347 * released, and possibly de-initialized on last user, once the SCMI driver
1348 * owning the scmi_device is unbound from it.
1349 *
1350 * Return: A reference to the requested protocol operations or error.
1351 * Must be checked for errors by caller.
1352 */
1353static const void __must_check *
1354scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
1355 struct scmi_protocol_handle **ph)
1356{
1357 struct scmi_protocol_instance *pi;
1358 struct scmi_protocol_devres *dres;
1359 struct scmi_handle *handle = sdev->handle;
1360
1361 if (!ph)
1362 return ERR_PTR(-EINVAL);
1363
1364 dres = devres_alloc(scmi_devm_release_protocol,
1365 sizeof(*dres), GFP_KERNEL);
1366 if (!dres)
1367 return ERR_PTR(-ENOMEM);
1368
1369 pi = scmi_get_protocol_instance(handle, protocol_id);
1370 if (IS_ERR(pi)) {
1371 devres_free(dres);
1372 return pi;
1373 }
1374
1375 dres->handle = handle;
1376 dres->protocol_id = protocol_id;
1377 devres_add(&sdev->dev, dres);
1378
1379 *ph = &pi->ph;
1380
1381 return pi->proto->ops;
1382}
1383
1384static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
1385{
1386 struct scmi_protocol_devres *dres = res;
1387
1388 if (WARN_ON(!dres || !data))
1389 return 0;
1390
1391 return dres->protocol_id == *((u8 *)data);
1392}
1393
1394/**
1395 * scmi_devm_protocol_put - Devres managed put protocol operations and handle
1396 * @sdev: A reference to an scmi_device whose embedded struct device is to
1397 * be used for devres accounting.
1398 * @protocol_id: The protocol being requested.
1399 *
1400 * Explicitly release a protocol hold previously obtained calling the above
1401 * @scmi_devm_protocol_get.
1402 */
1403static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
1404{
1405 int ret;
1406
1407 ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
1408 scmi_devm_protocol_match, &protocol_id);
1409 WARN_ON(ret);
1410}
1411
1412/**
1413 * scmi_is_transport_atomic - Method to check if underlying transport for an
1414 * SCMI instance is configured as atomic.
1415 *
1416 * @handle: A reference to the SCMI platform instance.
1417 * @atomic_threshold: An optional return value for the system wide currently
1418 * configured threshold for atomic operations.
1419 *
1420 * Return: True if transport is configured as atomic
1421 */
1422static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
1423 unsigned int *atomic_threshold)
1424{
1425 bool ret;
1426 struct scmi_info *info = handle_to_scmi_info(handle);
1427
1428 ret = info->desc->atomic_enabled && is_transport_polling_capable(info);
1429 if (ret && atomic_threshold)
1430 *atomic_threshold = info->atomic_threshold;
1431
1432 return ret;
1433}
1434
1435static inline
1436struct scmi_handle *scmi_handle_get_from_info_unlocked(struct scmi_info *info)
1437{
1438 info->users++;
1439 return &info->handle;
1440}
1441
1442/**
1443 * scmi_handle_get() - Get the SCMI handle for a device
1444 *
1445 * @dev: pointer to device for which we want SCMI handle
1446 *
1447 * NOTE: The function does not track individual clients of the framework
1448 * and is expected to be maintained by caller of SCMI protocol library.
1449 * scmi_handle_put must be balanced with successful scmi_handle_get
1450 *
1451 * Return: pointer to handle if successful, NULL on error
1452 */
1453struct scmi_handle *scmi_handle_get(struct device *dev)
1454{
1455 struct list_head *p;
1456 struct scmi_info *info;
1457 struct scmi_handle *handle = NULL;
1458
1459 mutex_lock(&scmi_list_mutex);
1460 list_for_each(p, &scmi_list) {
1461 info = list_entry(p, struct scmi_info, node);
1462 if (dev->parent == info->dev) {
1463 handle = scmi_handle_get_from_info_unlocked(info);
1464 break;
1465 }
1466 }
1467 mutex_unlock(&scmi_list_mutex);
1468
1469 return handle;
1470}
1471
1472/**
1473 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
1474 *
1475 * @handle: handle acquired by scmi_handle_get
1476 *
1477 * NOTE: The function does not track individual clients of the framework
1478 * and is expected to be maintained by caller of SCMI protocol library.
1479 * scmi_handle_put must be balanced with successful scmi_handle_get
1480 *
1481 * Return: 0 is successfully released
1482 * if null was passed, it returns -EINVAL;
1483 */
1484int scmi_handle_put(const struct scmi_handle *handle)
1485{
1486 struct scmi_info *info;
1487
1488 if (!handle)
1489 return -EINVAL;
1490
1491 info = handle_to_scmi_info(handle);
1492 mutex_lock(&scmi_list_mutex);
1493 if (!WARN_ON(!info->users))
1494 info->users--;
1495 mutex_unlock(&scmi_list_mutex);
1496
1497 return 0;
1498}
1499
1500static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1501 struct scmi_xfers_info *info)
1502{
1503 int i;
1504 struct scmi_xfer *xfer;
1505 struct device *dev = sinfo->dev;
1506 const struct scmi_desc *desc = sinfo->desc;
1507
1508 /* Pre-allocated messages, no more than what hdr.seq can support */
1509 if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
1510 dev_err(dev,
1511 "Invalid maximum messages %d, not in range [1 - %lu]\n",
1512 info->max_msg, MSG_TOKEN_MAX);
1513 return -EINVAL;
1514 }
1515
1516 hash_init(info->pending_xfers);
1517
1518 /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
1519 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX),
1520 sizeof(long), GFP_KERNEL);
1521 if (!info->xfer_alloc_table)
1522 return -ENOMEM;
1523
1524 /*
1525 * Preallocate a number of xfers equal to max inflight messages,
1526 * pre-initialize the buffer pointer to pre-allocated buffers and
1527 * attach all of them to the free list
1528 */
1529 INIT_HLIST_HEAD(&info->free_xfers);
1530 for (i = 0; i < info->max_msg; i++) {
1531 xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
1532 if (!xfer)
1533 return -ENOMEM;
1534
1535 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
1536 GFP_KERNEL);
1537 if (!xfer->rx.buf)
1538 return -ENOMEM;
1539
1540 xfer->tx.buf = xfer->rx.buf;
1541 init_completion(&xfer->done);
1542 spin_lock_init(&xfer->lock);
1543
1544 /* Add initialized xfer to the free list */
1545 hlist_add_head(&xfer->node, &info->free_xfers);
1546 }
1547
1548 spin_lock_init(&info->xfer_lock);
1549
1550 return 0;
1551}
1552
1553static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
1554{
1555 const struct scmi_desc *desc = sinfo->desc;
1556
1557 if (!desc->ops->get_max_msg) {
1558 sinfo->tx_minfo.max_msg = desc->max_msg;
1559 sinfo->rx_minfo.max_msg = desc->max_msg;
1560 } else {
1561 struct scmi_chan_info *base_cinfo;
1562
1563 base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
1564 if (!base_cinfo)
1565 return -EINVAL;
1566 sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
1567
1568 /* RX channel is optional so can be skipped */
1569 base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
1570 if (base_cinfo)
1571 sinfo->rx_minfo.max_msg =
1572 desc->ops->get_max_msg(base_cinfo);
1573 }
1574
1575 return 0;
1576}
1577
1578static int scmi_xfer_info_init(struct scmi_info *sinfo)
1579{
1580 int ret;
1581
1582 ret = scmi_channels_max_msg_configure(sinfo);
1583 if (ret)
1584 return ret;
1585
1586 ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1587 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1588 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1589
1590 return ret;
1591}
1592
1593static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
1594 int prot_id, bool tx)
1595{
1596 int ret, idx;
1597 struct scmi_chan_info *cinfo;
1598 struct idr *idr;
1599
1600 /* Transmit channel is first entry i.e. index 0 */
1601 idx = tx ? 0 : 1;
1602 idr = tx ? &info->tx_idr : &info->rx_idr;
1603
1604 /* check if already allocated, used for multiple device per protocol */
1605 cinfo = idr_find(idr, prot_id);
1606 if (cinfo)
1607 return 0;
1608
1609 if (!info->desc->ops->chan_available(dev, idx)) {
1610 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
1611 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
1612 return -EINVAL;
1613 goto idr_alloc;
1614 }
1615
1616 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
1617 if (!cinfo)
1618 return -ENOMEM;
1619
1620 cinfo->dev = dev;
1621
1622 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
1623 if (ret)
1624 return ret;
1625
1626 if (tx && is_polling_required(cinfo, info)) {
1627 if (is_transport_polling_capable(info))
1628 dev_info(dev,
1629 "Enabled polling mode TX channel - prot_id:%d\n",
1630 prot_id);
1631 else
1632 dev_warn(dev,
1633 "Polling mode NOT supported by transport.\n");
1634 }
1635
1636idr_alloc:
1637 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
1638 if (ret != prot_id) {
1639 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
1640 return ret;
1641 }
1642
1643 cinfo->handle = &info->handle;
1644 return 0;
1645}
1646
1647static inline int
1648scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
1649{
1650 int ret = scmi_chan_setup(info, dev, prot_id, true);
1651
1652 if (!ret) /* Rx is optional, hence no error check */
1653 scmi_chan_setup(info, dev, prot_id, false);
1654
1655 return ret;
1656}
1657
1658/**
1659 * scmi_get_protocol_device - Helper to get/create an SCMI device.
1660 *
1661 * @np: A device node representing a valid active protocols for the referred
1662 * SCMI instance.
1663 * @info: The referred SCMI instance for which we are getting/creating this
1664 * device.
1665 * @prot_id: The protocol ID.
1666 * @name: The device name.
1667 *
1668 * Referring to the specific SCMI instance identified by @info, this helper
1669 * takes care to return a properly initialized device matching the requested
1670 * @proto_id and @name: if device was still not existent it is created as a
1671 * child of the specified SCMI instance @info and its transport properly
1672 * initialized as usual.
1673 *
1674 * Return: A properly initialized scmi device, NULL otherwise.
1675 */
1676static inline struct scmi_device *
1677scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1678 int prot_id, const char *name)
1679{
1680 struct scmi_device *sdev;
1681
1682 /* Already created for this parent SCMI instance ? */
1683 sdev = scmi_child_dev_find(info->dev, prot_id, name);
1684 if (sdev)
1685 return sdev;
1686
1687 pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1688
1689 sdev = scmi_device_create(np, info->dev, prot_id, name);
1690 if (!sdev) {
1691 dev_err(info->dev, "failed to create %d protocol device\n",
1692 prot_id);
1693 return NULL;
1694 }
1695
1696 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1697 dev_err(&sdev->dev, "failed to setup transport\n");
1698 scmi_device_destroy(sdev);
1699 return NULL;
1700 }
1701
1702 return sdev;
1703}
1704
1705static inline void
1706scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
1707 int prot_id, const char *name)
1708{
1709 struct scmi_device *sdev;
1710
1711 sdev = scmi_get_protocol_device(np, info, prot_id, name);
1712 if (!sdev)
1713 return;
1714
1715 /* setup handle now as the transport is ready */
1716 scmi_set_handle(sdev);
1717}
1718
1719/**
1720 * scmi_create_protocol_devices - Create devices for all pending requests for
1721 * this SCMI instance.
1722 *
1723 * @np: The device node describing the protocol
1724 * @info: The SCMI instance descriptor
1725 * @prot_id: The protocol ID
1726 *
1727 * All devices previously requested for this instance (if any) are found and
1728 * created by scanning the proper @&scmi_requested_devices entry.
1729 */
1730static void scmi_create_protocol_devices(struct device_node *np,
1731 struct scmi_info *info, int prot_id)
1732{
1733 struct list_head *phead;
1734
1735 mutex_lock(&scmi_requested_devices_mtx);
1736 phead = idr_find(&scmi_requested_devices, prot_id);
1737 if (phead) {
1738 struct scmi_requested_dev *rdev;
1739
1740 list_for_each_entry(rdev, phead, node)
1741 scmi_create_protocol_device(np, info, prot_id,
1742 rdev->id_table->name);
1743 }
1744 mutex_unlock(&scmi_requested_devices_mtx);
1745}
1746
1747/**
1748 * scmi_protocol_device_request - Helper to request a device
1749 *
1750 * @id_table: A protocol/name pair descriptor for the device to be created.
1751 *
1752 * This helper let an SCMI driver request specific devices identified by the
1753 * @id_table to be created for each active SCMI instance.
1754 *
1755 * The requested device name MUST NOT be already existent for any protocol;
1756 * at first the freshly requested @id_table is annotated in the IDR table
1757 * @scmi_requested_devices, then a matching device is created for each already
1758 * active SCMI instance. (if any)
1759 *
1760 * This way the requested device is created straight-away for all the already
1761 * initialized(probed) SCMI instances (handles) and it remains also annotated
1762 * as pending creation if the requesting SCMI driver was loaded before some
1763 * SCMI instance and related transports were available: when such late instance
1764 * is probed, its probe will take care to scan the list of pending requested
1765 * devices and create those on its own (see @scmi_create_protocol_devices and
1766 * its enclosing loop)
1767 *
1768 * Return: 0 on Success
1769 */
1770int scmi_protocol_device_request(const struct scmi_device_id *id_table)
1771{
1772 int ret = 0;
1773 unsigned int id = 0;
1774 struct list_head *head, *phead = NULL;
1775 struct scmi_requested_dev *rdev;
1776 struct scmi_info *info;
1777
1778 pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1779 id_table->name, id_table->protocol_id);
1780
1781 /*
1782 * Search for the matching protocol rdev list and then search
1783 * of any existent equally named device...fails if any duplicate found.
1784 */
1785 mutex_lock(&scmi_requested_devices_mtx);
1786 idr_for_each_entry(&scmi_requested_devices, head, id) {
1787 if (!phead) {
1788 /* A list found registered in the IDR is never empty */
1789 rdev = list_first_entry(head, struct scmi_requested_dev,
1790 node);
1791 if (rdev->id_table->protocol_id ==
1792 id_table->protocol_id)
1793 phead = head;
1794 }
1795 list_for_each_entry(rdev, head, node) {
1796 if (!strcmp(rdev->id_table->name, id_table->name)) {
1797 pr_err("Ignoring duplicate request [%d] %s\n",
1798 rdev->id_table->protocol_id,
1799 rdev->id_table->name);
1800 ret = -EINVAL;
1801 goto out;
1802 }
1803 }
1804 }
1805
1806 /*
1807 * No duplicate found for requested id_table, so let's create a new
1808 * requested device entry for this new valid request.
1809 */
1810 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1811 if (!rdev) {
1812 ret = -ENOMEM;
1813 goto out;
1814 }
1815 rdev->id_table = id_table;
1816
1817 /*
1818 * Append the new requested device table descriptor to the head of the
1819 * related protocol list, eventually creating such head if not already
1820 * there.
1821 */
1822 if (!phead) {
1823 phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1824 if (!phead) {
1825 kfree(rdev);
1826 ret = -ENOMEM;
1827 goto out;
1828 }
1829 INIT_LIST_HEAD(phead);
1830
1831 ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1832 id_table->protocol_id,
1833 id_table->protocol_id + 1, GFP_KERNEL);
1834 if (ret != id_table->protocol_id) {
1835 pr_err("Failed to save SCMI device - ret:%d\n", ret);
1836 kfree(rdev);
1837 kfree(phead);
1838 ret = -EINVAL;
1839 goto out;
1840 }
1841 ret = 0;
1842 }
1843 list_add(&rdev->node, phead);
1844
1845 /*
1846 * Now effectively create and initialize the requested device for every
1847 * already initialized SCMI instance which has registered the requested
1848 * protocol as a valid active one: i.e. defined in DT and supported by
1849 * current platform FW.
1850 */
1851 mutex_lock(&scmi_list_mutex);
1852 list_for_each_entry(info, &scmi_list, node) {
1853 struct device_node *child;
1854
1855 child = idr_find(&info->active_protocols,
1856 id_table->protocol_id);
1857 if (child) {
1858 struct scmi_device *sdev;
1859
1860 sdev = scmi_get_protocol_device(child, info,
1861 id_table->protocol_id,
1862 id_table->name);
1863 /* Set handle if not already set: device existed */
1864 if (sdev && !sdev->handle)
1865 sdev->handle =
1866 scmi_handle_get_from_info_unlocked(info);
1867 } else {
1868 dev_err(info->dev,
1869 "Failed. SCMI protocol %d not active.\n",
1870 id_table->protocol_id);
1871 }
1872 }
1873 mutex_unlock(&scmi_list_mutex);
1874
1875out:
1876 mutex_unlock(&scmi_requested_devices_mtx);
1877
1878 return ret;
1879}
1880
1881/**
1882 * scmi_protocol_device_unrequest - Helper to unrequest a device
1883 *
1884 * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1885 *
1886 * An helper to let an SCMI driver release its request about devices; note that
1887 * devices are created and initialized once the first SCMI driver request them
1888 * but they destroyed only on SCMI core unloading/unbinding.
1889 *
1890 * The current SCMI transport layer uses such devices as internal references and
1891 * as such they could be shared as same transport between multiple drivers so
1892 * that cannot be safely destroyed till the whole SCMI stack is removed.
1893 * (unless adding further burden of refcounting.)
1894 */
1895void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table)
1896{
1897 struct list_head *phead;
1898
1899 pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1900 id_table->name, id_table->protocol_id);
1901
1902 mutex_lock(&scmi_requested_devices_mtx);
1903 phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1904 if (phead) {
1905 struct scmi_requested_dev *victim, *tmp;
1906
1907 list_for_each_entry_safe(victim, tmp, phead, node) {
1908 if (!strcmp(victim->id_table->name, id_table->name)) {
1909 list_del(&victim->node);
1910 kfree(victim);
1911 break;
1912 }
1913 }
1914
1915 if (list_empty(phead)) {
1916 idr_remove(&scmi_requested_devices,
1917 id_table->protocol_id);
1918 kfree(phead);
1919 }
1920 }
1921 mutex_unlock(&scmi_requested_devices_mtx);
1922}
1923
1924static int scmi_cleanup_txrx_channels(struct scmi_info *info)
1925{
1926 int ret;
1927 struct idr *idr = &info->tx_idr;
1928
1929 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1930 idr_destroy(&info->tx_idr);
1931
1932 idr = &info->rx_idr;
1933 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1934 idr_destroy(&info->rx_idr);
1935
1936 return ret;
1937}
1938
1939static int scmi_probe(struct platform_device *pdev)
1940{
1941 int ret;
1942 struct scmi_handle *handle;
1943 const struct scmi_desc *desc;
1944 struct scmi_info *info;
1945 struct device *dev = &pdev->dev;
1946 struct device_node *child, *np = dev->of_node;
1947
1948 desc = of_device_get_match_data(dev);
1949 if (!desc)
1950 return -EINVAL;
1951
1952 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1953 if (!info)
1954 return -ENOMEM;
1955
1956 info->dev = dev;
1957 info->desc = desc;
1958 INIT_LIST_HEAD(&info->node);
1959 idr_init(&info->protocols);
1960 mutex_init(&info->protocols_mtx);
1961 idr_init(&info->active_protocols);
1962
1963 platform_set_drvdata(pdev, info);
1964 idr_init(&info->tx_idr);
1965 idr_init(&info->rx_idr);
1966
1967 handle = &info->handle;
1968 handle->dev = info->dev;
1969 handle->version = &info->version;
1970 handle->devm_protocol_get = scmi_devm_protocol_get;
1971 handle->devm_protocol_put = scmi_devm_protocol_put;
1972
1973 /* System wide atomic threshold for atomic ops .. if any */
1974 if (!of_property_read_u32(np, "atomic-threshold-us",
1975 &info->atomic_threshold))
1976 dev_info(dev,
1977 "SCMI System wide atomic threshold set to %d us\n",
1978 info->atomic_threshold);
1979 handle->is_transport_atomic = scmi_is_transport_atomic;
1980
1981 if (desc->ops->link_supplier) {
1982 ret = desc->ops->link_supplier(dev);
1983 if (ret)
1984 return ret;
1985 }
1986
1987 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
1988 if (ret)
1989 return ret;
1990
1991 ret = scmi_xfer_info_init(info);
1992 if (ret)
1993 goto clear_txrx_setup;
1994
1995 if (scmi_notification_init(handle))
1996 dev_err(dev, "SCMI Notifications NOT available.\n");
1997
1998 if (info->desc->atomic_enabled && !is_transport_polling_capable(info))
1999 dev_err(dev,
2000 "Transport is not polling capable. Atomic mode not supported.\n");
2001
2002 /*
2003 * Trigger SCMI Base protocol initialization.
2004 * It's mandatory and won't be ever released/deinit until the
2005 * SCMI stack is shutdown/unloaded as a whole.
2006 */
2007 ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
2008 if (ret) {
2009 dev_err(dev, "unable to communicate with SCMI\n");
2010 goto notification_exit;
2011 }
2012
2013 mutex_lock(&scmi_list_mutex);
2014 list_add_tail(&info->node, &scmi_list);
2015 mutex_unlock(&scmi_list_mutex);
2016
2017 for_each_available_child_of_node(np, child) {
2018 u32 prot_id;
2019
2020 if (of_property_read_u32(child, "reg", &prot_id))
2021 continue;
2022
2023 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2024 dev_err(dev, "Out of range protocol %d\n", prot_id);
2025
2026 if (!scmi_is_protocol_implemented(handle, prot_id)) {
2027 dev_err(dev, "SCMI protocol %d not implemented\n",
2028 prot_id);
2029 continue;
2030 }
2031
2032 /*
2033 * Save this valid DT protocol descriptor amongst
2034 * @active_protocols for this SCMI instance/
2035 */
2036 ret = idr_alloc(&info->active_protocols, child,
2037 prot_id, prot_id + 1, GFP_KERNEL);
2038 if (ret != prot_id) {
2039 dev_err(dev, "SCMI protocol %d already activated. Skip\n",
2040 prot_id);
2041 continue;
2042 }
2043
2044 of_node_get(child);
2045 scmi_create_protocol_devices(child, info, prot_id);
2046 }
2047
2048 return 0;
2049
2050notification_exit:
2051 scmi_notification_exit(&info->handle);
2052clear_txrx_setup:
2053 scmi_cleanup_txrx_channels(info);
2054 return ret;
2055}
2056
2057void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
2058{
2059 idr_remove(idr, id);
2060}
2061
2062static int scmi_remove(struct platform_device *pdev)
2063{
2064 int ret = 0, id;
2065 struct scmi_info *info = platform_get_drvdata(pdev);
2066 struct device_node *child;
2067
2068 mutex_lock(&scmi_list_mutex);
2069 if (info->users)
2070 ret = -EBUSY;
2071 else
2072 list_del(&info->node);
2073 mutex_unlock(&scmi_list_mutex);
2074
2075 if (ret)
2076 return ret;
2077
2078 scmi_notification_exit(&info->handle);
2079
2080 mutex_lock(&info->protocols_mtx);
2081 idr_destroy(&info->protocols);
2082 mutex_unlock(&info->protocols_mtx);
2083
2084 idr_for_each_entry(&info->active_protocols, child, id)
2085 of_node_put(child);
2086 idr_destroy(&info->active_protocols);
2087
2088 /* Safe to free channels since no more users */
2089 return scmi_cleanup_txrx_channels(info);
2090}
2091
2092static ssize_t protocol_version_show(struct device *dev,
2093 struct device_attribute *attr, char *buf)
2094{
2095 struct scmi_info *info = dev_get_drvdata(dev);
2096
2097 return sprintf(buf, "%u.%u\n", info->version.major_ver,
2098 info->version.minor_ver);
2099}
2100static DEVICE_ATTR_RO(protocol_version);
2101
2102static ssize_t firmware_version_show(struct device *dev,
2103 struct device_attribute *attr, char *buf)
2104{
2105 struct scmi_info *info = dev_get_drvdata(dev);
2106
2107 return sprintf(buf, "0x%x\n", info->version.impl_ver);
2108}
2109static DEVICE_ATTR_RO(firmware_version);
2110
2111static ssize_t vendor_id_show(struct device *dev,
2112 struct device_attribute *attr, char *buf)
2113{
2114 struct scmi_info *info = dev_get_drvdata(dev);
2115
2116 return sprintf(buf, "%s\n", info->version.vendor_id);
2117}
2118static DEVICE_ATTR_RO(vendor_id);
2119
2120static ssize_t sub_vendor_id_show(struct device *dev,
2121 struct device_attribute *attr, char *buf)
2122{
2123 struct scmi_info *info = dev_get_drvdata(dev);
2124
2125 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
2126}
2127static DEVICE_ATTR_RO(sub_vendor_id);
2128
2129static struct attribute *versions_attrs[] = {
2130 &dev_attr_firmware_version.attr,
2131 &dev_attr_protocol_version.attr,
2132 &dev_attr_vendor_id.attr,
2133 &dev_attr_sub_vendor_id.attr,
2134 NULL,
2135};
2136ATTRIBUTE_GROUPS(versions);
2137
2138/* Each compatible listed below must have descriptor associated with it */
2139static const struct of_device_id scmi_of_match[] = {
2140#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
2141 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
2142#endif
2143#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
2144 { .compatible = "linaro,scmi-optee", .data = &scmi_optee_desc },
2145#endif
2146#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC
2147 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
2148#endif
2149#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
2150 { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc},
2151#endif
2152 { /* Sentinel */ },
2153};
2154
2155MODULE_DEVICE_TABLE(of, scmi_of_match);
2156
2157static struct platform_driver scmi_driver = {
2158 .driver = {
2159 .name = "arm-scmi",
2160 .of_match_table = scmi_of_match,
2161 .dev_groups = versions_groups,
2162 },
2163 .probe = scmi_probe,
2164 .remove = scmi_remove,
2165};
2166
2167/**
2168 * __scmi_transports_setup - Common helper to call transport-specific
2169 * .init/.exit code if provided.
2170 *
2171 * @init: A flag to distinguish between init and exit.
2172 *
2173 * Note that, if provided, we invoke .init/.exit functions for all the
2174 * transports currently compiled in.
2175 *
2176 * Return: 0 on Success.
2177 */
2178static inline int __scmi_transports_setup(bool init)
2179{
2180 int ret = 0;
2181 const struct of_device_id *trans;
2182
2183 for (trans = scmi_of_match; trans->data; trans++) {
2184 const struct scmi_desc *tdesc = trans->data;
2185
2186 if ((init && !tdesc->transport_init) ||
2187 (!init && !tdesc->transport_exit))
2188 continue;
2189
2190 if (init)
2191 ret = tdesc->transport_init();
2192 else
2193 tdesc->transport_exit();
2194
2195 if (ret) {
2196 pr_err("SCMI transport %s FAILED initialization!\n",
2197 trans->compatible);
2198 break;
2199 }
2200 }
2201
2202 return ret;
2203}
2204
2205static int __init scmi_transports_init(void)
2206{
2207 return __scmi_transports_setup(true);
2208}
2209
2210static void __exit scmi_transports_exit(void)
2211{
2212 __scmi_transports_setup(false);
2213}
2214
2215static int __init scmi_driver_init(void)
2216{
2217 int ret;
2218
2219 /* Bail out if no SCMI transport was configured */
2220 if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
2221 return -EINVAL;
2222
2223 scmi_bus_init();
2224
2225 /* Initialize any compiled-in transport which provided an init/exit */
2226 ret = scmi_transports_init();
2227 if (ret)
2228 return ret;
2229
2230 scmi_base_register();
2231
2232 scmi_clock_register();
2233 scmi_perf_register();
2234 scmi_power_register();
2235 scmi_reset_register();
2236 scmi_sensors_register();
2237 scmi_voltage_register();
2238 scmi_system_register();
2239
2240 return platform_driver_register(&scmi_driver);
2241}
2242subsys_initcall(scmi_driver_init);
2243
2244static void __exit scmi_driver_exit(void)
2245{
2246 scmi_base_unregister();
2247
2248 scmi_clock_unregister();
2249 scmi_perf_unregister();
2250 scmi_power_unregister();
2251 scmi_reset_unregister();
2252 scmi_sensors_unregister();
2253 scmi_voltage_unregister();
2254 scmi_system_unregister();
2255
2256 scmi_bus_exit();
2257
2258 scmi_transports_exit();
2259
2260 platform_driver_unregister(&scmi_driver);
2261}
2262module_exit(scmi_driver_exit);
2263
2264MODULE_ALIAS("platform:arm-scmi");
2265MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
2266MODULE_DESCRIPTION("ARM SCMI protocol driver");
2267MODULE_LICENSE("GPL v2");