Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
3#include <linux/fwctl.h>
4#include <linux/device.h>
5#include <cxl/mailbox.h>
6#include <cxl/features.h>
7#include <uapi/fwctl/cxl.h>
8#include "cxl.h"
9#include "core.h"
10#include "cxlmem.h"
11
12/**
13 * DOC: cxl features
14 *
15 * CXL Features:
16 * A CXL device that includes a mailbox supports commands that allows
17 * listing, getting, and setting of optionally defined features such
18 * as memory sparing or post package sparing. Vendors may define custom
19 * features for the device.
20 */
21
22/* All the features below are exclusive to the kernel */
23static const uuid_t cxl_exclusive_feats[] = {
24 CXL_FEAT_PATROL_SCRUB_UUID,
25 CXL_FEAT_ECS_UUID,
26 CXL_FEAT_SPPR_UUID,
27 CXL_FEAT_HPPR_UUID,
28 CXL_FEAT_CACHELINE_SPARING_UUID,
29 CXL_FEAT_ROW_SPARING_UUID,
30 CXL_FEAT_BANK_SPARING_UUID,
31 CXL_FEAT_RANK_SPARING_UUID,
32};
33
34static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid)
35{
36 for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) {
37 if (uuid_equal(uuid, &cxl_exclusive_feats[i]))
38 return true;
39 }
40
41 return false;
42}
43
44static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry)
45{
46 return is_cxl_feature_exclusive_by_uuid(&entry->uuid);
47}
48
49struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
50{
51 return cxlds->cxlfs;
52}
53EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL");
54
55static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox)
56{
57 struct cxl_mbox_get_sup_feats_out mbox_out;
58 struct cxl_mbox_get_sup_feats_in mbox_in;
59 struct cxl_mbox_cmd mbox_cmd;
60 int rc;
61
62 memset(&mbox_in, 0, sizeof(mbox_in));
63 mbox_in.count = cpu_to_le32(sizeof(mbox_out));
64 memset(&mbox_out, 0, sizeof(mbox_out));
65 mbox_cmd = (struct cxl_mbox_cmd) {
66 .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
67 .size_in = sizeof(mbox_in),
68 .payload_in = &mbox_in,
69 .size_out = sizeof(mbox_out),
70 .payload_out = &mbox_out,
71 .min_out = sizeof(mbox_out),
72 };
73 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
74 if (rc < 0)
75 return rc;
76
77 return le16_to_cpu(mbox_out.supported_feats);
78}
79
80static struct cxl_feat_entries *
81get_supported_features(struct cxl_features_state *cxlfs)
82{
83 int remain_feats, max_size, max_feats, start, rc, hdr_size;
84 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
85 int feat_size = sizeof(struct cxl_feat_entry);
86 struct cxl_mbox_get_sup_feats_in mbox_in;
87 struct cxl_feat_entry *entry;
88 struct cxl_mbox_cmd mbox_cmd;
89 int user_feats = 0;
90 int count;
91
92 count = cxl_get_supported_features_count(cxl_mbox);
93 if (count <= 0)
94 return NULL;
95
96 struct cxl_feat_entries *entries __free(kvfree) =
97 kvmalloc(struct_size(entries, ent, count), GFP_KERNEL);
98 if (!entries)
99 return NULL;
100
101 struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) =
102 kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
103 if (!mbox_out)
104 return NULL;
105
106 hdr_size = struct_size(mbox_out, ents, 0);
107 max_size = cxl_mbox->payload_size - hdr_size;
108 /* max feat entries that can fit in mailbox max payload size */
109 max_feats = max_size / feat_size;
110 entry = entries->ent;
111
112 start = 0;
113 remain_feats = count;
114 do {
115 int retrieved, alloc_size, copy_feats;
116 int num_entries;
117
118 if (remain_feats > max_feats) {
119 alloc_size = struct_size(mbox_out, ents, max_feats);
120 remain_feats = remain_feats - max_feats;
121 copy_feats = max_feats;
122 } else {
123 alloc_size = struct_size(mbox_out, ents, remain_feats);
124 copy_feats = remain_feats;
125 remain_feats = 0;
126 }
127
128 memset(&mbox_in, 0, sizeof(mbox_in));
129 mbox_in.count = cpu_to_le32(alloc_size);
130 mbox_in.start_idx = cpu_to_le16(start);
131 memset(mbox_out, 0, alloc_size);
132 mbox_cmd = (struct cxl_mbox_cmd) {
133 .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES,
134 .size_in = sizeof(mbox_in),
135 .payload_in = &mbox_in,
136 .size_out = alloc_size,
137 .payload_out = mbox_out,
138 .min_out = hdr_size,
139 };
140 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
141 if (rc < 0)
142 return NULL;
143
144 if (mbox_cmd.size_out <= hdr_size)
145 return NULL;
146
147 /*
148 * Make sure retrieved out buffer is multiple of feature
149 * entries.
150 */
151 retrieved = mbox_cmd.size_out - hdr_size;
152 if (retrieved % feat_size)
153 return NULL;
154
155 num_entries = le16_to_cpu(mbox_out->num_entries);
156 /*
157 * If the reported output entries * defined entry size !=
158 * retrieved output bytes, then the output package is incorrect.
159 */
160 if (num_entries * feat_size != retrieved)
161 return NULL;
162
163 memcpy(entry, mbox_out->ents, retrieved);
164 for (int i = 0; i < num_entries; i++) {
165 if (!is_cxl_feature_exclusive(entry + i))
166 user_feats++;
167 }
168 entry += num_entries;
169 /*
170 * If the number of output entries is less than expected, add the
171 * remaining entries to the next batch.
172 */
173 remain_feats += copy_feats - num_entries;
174 start += num_entries;
175 } while (remain_feats);
176
177 entries->num_features = count;
178 entries->num_user_features = user_feats;
179
180 return no_free_ptr(entries);
181}
182
183static void free_cxlfs(void *_cxlfs)
184{
185 struct cxl_features_state *cxlfs = _cxlfs;
186 struct cxl_dev_state *cxlds = cxlfs->cxlds;
187
188 cxlds->cxlfs = NULL;
189 kvfree(cxlfs->entries);
190 kfree(cxlfs);
191}
192
193/**
194 * devm_cxl_setup_features() - Allocate and initialize features context
195 * @cxlds: CXL device context
196 *
197 * Return 0 on success or -errno on failure.
198 */
199int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
200{
201 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox;
202
203 if (cxl_mbox->feat_cap < CXL_FEATURES_RO)
204 return -ENODEV;
205
206 struct cxl_features_state *cxlfs __free(kfree) =
207 kzalloc(sizeof(*cxlfs), GFP_KERNEL);
208 if (!cxlfs)
209 return -ENOMEM;
210
211 cxlfs->cxlds = cxlds;
212
213 cxlfs->entries = get_supported_features(cxlfs);
214 if (!cxlfs->entries)
215 return -ENOMEM;
216
217 cxlds->cxlfs = cxlfs;
218
219 return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs));
220}
221EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL");
222
223size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
224 enum cxl_get_feat_selection selection,
225 void *feat_out, size_t feat_out_size, u16 offset,
226 u16 *return_code)
227{
228 size_t data_to_rd_size, size_out;
229 struct cxl_mbox_get_feat_in pi;
230 struct cxl_mbox_cmd mbox_cmd;
231 size_t data_rcvd_size = 0;
232 int rc;
233
234 if (return_code)
235 *return_code = CXL_MBOX_CMD_RC_INPUT;
236
237 if (!feat_out || !feat_out_size)
238 return 0;
239
240 size_out = min(feat_out_size, cxl_mbox->payload_size);
241 uuid_copy(&pi.uuid, feat_uuid);
242 pi.selection = selection;
243 do {
244 data_to_rd_size = min(feat_out_size - data_rcvd_size,
245 cxl_mbox->payload_size);
246 pi.offset = cpu_to_le16(offset + data_rcvd_size);
247 pi.count = cpu_to_le16(data_to_rd_size);
248
249 mbox_cmd = (struct cxl_mbox_cmd) {
250 .opcode = CXL_MBOX_OP_GET_FEATURE,
251 .size_in = sizeof(pi),
252 .payload_in = &pi,
253 .size_out = size_out,
254 .payload_out = feat_out + data_rcvd_size,
255 .min_out = data_to_rd_size,
256 };
257 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
258 if (rc < 0 || !mbox_cmd.size_out) {
259 if (return_code)
260 *return_code = mbox_cmd.return_code;
261 return 0;
262 }
263 data_rcvd_size += mbox_cmd.size_out;
264 } while (data_rcvd_size < feat_out_size);
265
266 if (return_code)
267 *return_code = CXL_MBOX_CMD_RC_SUCCESS;
268
269 return data_rcvd_size;
270}
271
272/*
273 * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be
274 * available in the mailbox for storing the actual feature data so that
275 * the feature data transfer would work as expected.
276 */
277#define FEAT_DATA_MIN_PAYLOAD_SIZE 10
278int cxl_set_feature(struct cxl_mailbox *cxl_mbox,
279 const uuid_t *feat_uuid, u8 feat_version,
280 const void *feat_data, size_t feat_data_size,
281 u32 feat_flag, u16 offset, u16 *return_code)
282{
283 size_t data_in_size, data_sent_size = 0;
284 struct cxl_mbox_cmd mbox_cmd;
285 size_t hdr_size;
286
287 if (return_code)
288 *return_code = CXL_MBOX_CMD_RC_INPUT;
289
290 struct cxl_mbox_set_feat_in *pi __free(kfree) =
291 kzalloc(cxl_mbox->payload_size, GFP_KERNEL);
292 if (!pi)
293 return -ENOMEM;
294
295 uuid_copy(&pi->uuid, feat_uuid);
296 pi->version = feat_version;
297 feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK;
298 feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET;
299 hdr_size = sizeof(pi->hdr);
300 /*
301 * Check minimum mbox payload size is available for
302 * the feature data transfer.
303 */
304 if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size)
305 return -ENOMEM;
306
307 if (hdr_size + feat_data_size <= cxl_mbox->payload_size) {
308 pi->flags = cpu_to_le32(feat_flag |
309 CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER);
310 data_in_size = feat_data_size;
311 } else {
312 pi->flags = cpu_to_le32(feat_flag |
313 CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER);
314 data_in_size = cxl_mbox->payload_size - hdr_size;
315 }
316
317 do {
318 int rc;
319
320 pi->offset = cpu_to_le16(offset + data_sent_size);
321 memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size);
322 mbox_cmd = (struct cxl_mbox_cmd) {
323 .opcode = CXL_MBOX_OP_SET_FEATURE,
324 .size_in = hdr_size + data_in_size,
325 .payload_in = pi,
326 };
327 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
328 if (rc < 0) {
329 if (return_code)
330 *return_code = mbox_cmd.return_code;
331 return rc;
332 }
333
334 data_sent_size += data_in_size;
335 if (data_sent_size >= feat_data_size) {
336 if (return_code)
337 *return_code = CXL_MBOX_CMD_RC_SUCCESS;
338 return 0;
339 }
340
341 if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) {
342 data_in_size = feat_data_size - data_sent_size;
343 pi->flags = cpu_to_le32(feat_flag |
344 CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER);
345 } else {
346 pi->flags = cpu_to_le32(feat_flag |
347 CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER);
348 }
349 } while (true);
350}
351
352/* FWCTL support */
353
354static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev)
355{
356 return to_cxl_memdev(fwctl_dev->dev.parent);
357}
358
359static int cxlctl_open_uctx(struct fwctl_uctx *uctx)
360{
361 return 0;
362}
363
364static void cxlctl_close_uctx(struct fwctl_uctx *uctx)
365{
366}
367
368struct cxl_feat_entry *
369cxl_feature_info(struct cxl_features_state *cxlfs,
370 const uuid_t *uuid)
371{
372 struct cxl_feat_entry *feat;
373
374 for (int i = 0; i < cxlfs->entries->num_features; i++) {
375 feat = &cxlfs->entries->ent[i];
376 if (uuid_equal(uuid, &feat->uuid))
377 return feat;
378 }
379
380 return ERR_PTR(-EINVAL);
381}
382
383static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs,
384 const struct fwctl_rpc_cxl *rpc_in,
385 size_t *out_len)
386{
387 const struct cxl_mbox_get_sup_feats_in *feat_in;
388 struct cxl_mbox_get_sup_feats_out *feat_out;
389 struct cxl_feat_entry *pos;
390 size_t out_size;
391 int requested;
392 u32 count;
393 u16 start;
394 int i;
395
396 if (rpc_in->op_size != sizeof(*feat_in))
397 return ERR_PTR(-EINVAL);
398
399 feat_in = &rpc_in->get_sup_feats_in;
400 count = le32_to_cpu(feat_in->count);
401 start = le16_to_cpu(feat_in->start_idx);
402 requested = count / sizeof(*pos);
403
404 /*
405 * Make sure that the total requested number of entries is not greater
406 * than the total number of supported features allowed for userspace.
407 */
408 if (start >= cxlfs->entries->num_features)
409 return ERR_PTR(-EINVAL);
410
411 requested = min_t(int, requested, cxlfs->entries->num_features - start);
412
413 out_size = sizeof(struct fwctl_rpc_cxl_out) +
414 struct_size(feat_out, ents, requested);
415
416 struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
417 kvzalloc(out_size, GFP_KERNEL);
418 if (!rpc_out)
419 return ERR_PTR(-ENOMEM);
420
421 rpc_out->size = struct_size(feat_out, ents, requested);
422 feat_out = &rpc_out->get_sup_feats_out;
423
424 for (i = start, pos = &feat_out->ents[0];
425 i < cxlfs->entries->num_features; i++, pos++) {
426 if (i - start == requested)
427 break;
428
429 memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos));
430 /*
431 * If the feature is exclusive, set the set_feat_size to 0 to
432 * indicate that the feature is not changeable.
433 */
434 if (is_cxl_feature_exclusive(pos)) {
435 u32 flags;
436
437 pos->set_feat_size = 0;
438 flags = le32_to_cpu(pos->flags);
439 flags &= ~CXL_FEATURE_F_CHANGEABLE;
440 pos->flags = cpu_to_le32(flags);
441 }
442 }
443
444 feat_out->num_entries = cpu_to_le16(requested);
445 feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features);
446 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
447 *out_len = out_size;
448
449 return no_free_ptr(rpc_out);
450}
451
452static void *cxlctl_get_feature(struct cxl_features_state *cxlfs,
453 const struct fwctl_rpc_cxl *rpc_in,
454 size_t *out_len)
455{
456 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
457 const struct cxl_mbox_get_feat_in *feat_in;
458 u16 offset, count, return_code;
459 size_t out_size = *out_len;
460
461 if (rpc_in->op_size != sizeof(*feat_in))
462 return ERR_PTR(-EINVAL);
463
464 feat_in = &rpc_in->get_feat_in;
465 offset = le16_to_cpu(feat_in->offset);
466 count = le16_to_cpu(feat_in->count);
467
468 if (!count)
469 return ERR_PTR(-EINVAL);
470
471 struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
472 kvzalloc(out_size, GFP_KERNEL);
473 if (!rpc_out)
474 return ERR_PTR(-ENOMEM);
475
476 out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid,
477 feat_in->selection, rpc_out->payload,
478 count, offset, &return_code);
479 *out_len = sizeof(struct fwctl_rpc_cxl_out);
480 if (!out_size) {
481 rpc_out->size = 0;
482 rpc_out->retval = return_code;
483 return no_free_ptr(rpc_out);
484 }
485
486 rpc_out->size = out_size;
487 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
488 *out_len += out_size;
489
490 return no_free_ptr(rpc_out);
491}
492
493static void *cxlctl_set_feature(struct cxl_features_state *cxlfs,
494 const struct fwctl_rpc_cxl *rpc_in,
495 size_t *out_len)
496{
497 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
498 const struct cxl_mbox_set_feat_in *feat_in;
499 size_t out_size, data_size;
500 u16 offset, return_code;
501 u32 flags;
502 int rc;
503
504 if (rpc_in->op_size <= sizeof(feat_in->hdr))
505 return ERR_PTR(-EINVAL);
506
507 feat_in = &rpc_in->set_feat_in;
508
509 if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid))
510 return ERR_PTR(-EPERM);
511
512 offset = le16_to_cpu(feat_in->offset);
513 flags = le32_to_cpu(feat_in->flags);
514 out_size = *out_len;
515
516 struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) =
517 kvzalloc(out_size, GFP_KERNEL);
518 if (!rpc_out)
519 return ERR_PTR(-ENOMEM);
520
521 rpc_out->size = 0;
522
523 data_size = rpc_in->op_size - sizeof(feat_in->hdr);
524 rc = cxl_set_feature(cxl_mbox, &feat_in->uuid,
525 feat_in->version, feat_in->feat_data,
526 data_size, flags, offset, &return_code);
527 *out_len = sizeof(*rpc_out);
528 if (rc) {
529 rpc_out->retval = return_code;
530 return no_free_ptr(rpc_out);
531 }
532
533 rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS;
534
535 return no_free_ptr(rpc_out);
536}
537
538static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs,
539 const struct fwctl_rpc_cxl *rpc_in,
540 enum fwctl_rpc_scope scope)
541{
542 u16 effects, imm_mask, reset_mask;
543 struct cxl_feat_entry *feat;
544 u32 flags;
545
546 if (rpc_in->op_size < sizeof(uuid_t))
547 return false;
548
549 feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid);
550 if (IS_ERR(feat))
551 return false;
552
553 /* Ensure that the attribute is changeable */
554 flags = le32_to_cpu(feat->flags);
555 if (!(flags & CXL_FEATURE_F_CHANGEABLE))
556 return false;
557
558 effects = le16_to_cpu(feat->effects);
559
560 /*
561 * Reserved bits are set, rejecting since the effects is not
562 * comprehended by the driver.
563 */
564 if (effects & CXL_CMD_EFFECTS_RESERVED) {
565 dev_warn_once(cxlfs->cxlds->dev,
566 "Reserved bits set in the Feature effects field!\n");
567 return false;
568 }
569
570 /* Currently no user background command support */
571 if (effects & CXL_CMD_BACKGROUND)
572 return false;
573
574 /* Effects cause immediate change, highest security scope is needed */
575 imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE |
576 CXL_CMD_DATA_CHANGE_IMMEDIATE |
577 CXL_CMD_POLICY_CHANGE_IMMEDIATE |
578 CXL_CMD_LOG_CHANGE_IMMEDIATE;
579
580 reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET |
581 CXL_CMD_CONFIG_CHANGE_CONV_RESET |
582 CXL_CMD_CONFIG_CHANGE_CXL_RESET;
583
584 /* If no immediate or reset effect set, The hardware has a bug */
585 if (!(effects & imm_mask) && !(effects & reset_mask))
586 return false;
587
588 /*
589 * If the Feature setting causes immediate configuration change
590 * then we need the full write permission policy.
591 */
592 if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL)
593 return true;
594
595 /*
596 * If the Feature setting only causes configuration change
597 * after a reset, then the lesser level of write permission
598 * policy is ok.
599 */
600 if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE)
601 return true;
602
603 return false;
604}
605
606static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs,
607 const struct fwctl_rpc_cxl *rpc_in,
608 enum fwctl_rpc_scope scope,
609 u16 opcode)
610{
611 struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox;
612
613 switch (opcode) {
614 case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
615 case CXL_MBOX_OP_GET_FEATURE:
616 return cxl_mbox->feat_cap >= CXL_FEATURES_RO;
617 case CXL_MBOX_OP_SET_FEATURE:
618 if (cxl_mbox->feat_cap < CXL_FEATURES_RW)
619 return false;
620 return cxlctl_validate_set_features(cxlfs, rpc_in, scope);
621 default:
622 return false;
623 }
624}
625
626static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs,
627 const struct fwctl_rpc_cxl *rpc_in,
628 size_t *out_len, u16 opcode)
629{
630 switch (opcode) {
631 case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
632 return cxlctl_get_supported_features(cxlfs, rpc_in, out_len);
633 case CXL_MBOX_OP_GET_FEATURE:
634 return cxlctl_get_feature(cxlfs, rpc_in, out_len);
635 case CXL_MBOX_OP_SET_FEATURE:
636 return cxlctl_set_feature(cxlfs, rpc_in, out_len);
637 default:
638 return ERR_PTR(-EOPNOTSUPP);
639 }
640}
641
642static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
643 void *in, size_t in_len, size_t *out_len)
644{
645 struct fwctl_device *fwctl_dev = uctx->fwctl;
646 struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev);
647 struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds);
648 const struct fwctl_rpc_cxl *rpc_in = in;
649 u16 opcode = rpc_in->opcode;
650
651 if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode))
652 return ERR_PTR(-EINVAL);
653
654 return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode);
655}
656
657static const struct fwctl_ops cxlctl_ops = {
658 .device_type = FWCTL_DEVICE_TYPE_CXL,
659 .uctx_size = sizeof(struct fwctl_uctx),
660 .open_uctx = cxlctl_open_uctx,
661 .close_uctx = cxlctl_close_uctx,
662 .fw_rpc = cxlctl_fw_rpc,
663};
664
665DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T))
666
667static void free_memdev_fwctl(void *_fwctl_dev)
668{
669 struct fwctl_device *fwctl_dev = _fwctl_dev;
670
671 fwctl_unregister(fwctl_dev);
672 fwctl_put(fwctl_dev);
673}
674
675int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd)
676{
677 struct cxl_dev_state *cxlds = cxlmd->cxlds;
678 struct cxl_features_state *cxlfs;
679 int rc;
680
681 cxlfs = to_cxlfs(cxlds);
682 if (!cxlfs)
683 return -ENODEV;
684
685 /* No need to setup FWCTL if there are no user allowed features found */
686 if (!cxlfs->entries->num_user_features)
687 return -ENODEV;
688
689 struct fwctl_device *fwctl_dev __free(free_fwctl_dev) =
690 _fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev));
691 if (!fwctl_dev)
692 return -ENOMEM;
693
694 rc = fwctl_register(fwctl_dev);
695 if (rc)
696 return rc;
697
698 return devm_add_action_or_reset(host, free_memdev_fwctl,
699 no_free_ptr(fwctl_dev));
700}
701EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL");
702
703MODULE_IMPORT_NS("FWCTL");