Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef __CXL_CORE_H__
5#define __CXL_CORE_H__
6
7#include <cxl/mailbox.h>
8#include <linux/rwsem.h>
9
10extern const struct device_type cxl_nvdimm_bridge_type;
11extern const struct device_type cxl_nvdimm_type;
12extern const struct device_type cxl_pmu_type;
13
14extern struct attribute_group cxl_base_attribute_group;
15
16enum cxl_detach_mode {
17 DETACH_ONLY,
18 DETACH_INVALIDATE,
19};
20
21#ifdef CONFIG_CXL_REGION
22extern struct device_attribute dev_attr_create_pmem_region;
23extern struct device_attribute dev_attr_create_ram_region;
24extern struct device_attribute dev_attr_delete_region;
25extern struct device_attribute dev_attr_region;
26extern const struct device_type cxl_pmem_region_type;
27extern const struct device_type cxl_dax_region_type;
28extern const struct device_type cxl_region_type;
29
30int cxl_decoder_detach(struct cxl_region *cxlr,
31 struct cxl_endpoint_decoder *cxled, int pos,
32 enum cxl_detach_mode mode);
33
34#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
35#define CXL_REGION_TYPE(x) (&cxl_region_type)
36#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
37#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
38#define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type)
39int cxl_region_init(void);
40void cxl_region_exit(void);
41int cxl_get_poison_by_endpoint(struct cxl_port *port);
42struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
43u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
44 u64 dpa);
45
46#else
47static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
48 const struct cxl_memdev *cxlmd, u64 dpa)
49{
50 return ULLONG_MAX;
51}
52static inline
53struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
54{
55 return NULL;
56}
57static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
58{
59 return 0;
60}
61static inline int cxl_decoder_detach(struct cxl_region *cxlr,
62 struct cxl_endpoint_decoder *cxled,
63 int pos, enum cxl_detach_mode mode)
64{
65 return 0;
66}
67static inline int cxl_region_init(void)
68{
69 return 0;
70}
71static inline void cxl_region_exit(void)
72{
73}
74#define CXL_REGION_ATTR(x) NULL
75#define CXL_REGION_TYPE(x) NULL
76#define SET_CXL_REGION_ATTR(x)
77#define CXL_PMEM_REGION_TYPE(x) NULL
78#define CXL_DAX_REGION_TYPE(x) NULL
79#endif
80
81struct cxl_send_command;
82struct cxl_mem_query_commands;
83int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
84 struct cxl_mem_query_commands __user *q);
85int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s);
86void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
87 resource_size_t length);
88
89struct dentry *cxl_debugfs_create_dir(const char *dir);
90int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
91 enum cxl_partition_mode mode);
92int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
93int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
94resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
95resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
96bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
97
98enum cxl_rcrb {
99 CXL_RCRB_DOWNSTREAM,
100 CXL_RCRB_UPSTREAM,
101};
102struct cxl_rcrb_info;
103resource_size_t __rcrb_to_component(struct device *dev,
104 struct cxl_rcrb_info *ri,
105 enum cxl_rcrb which);
106u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
107
108#define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0)
109#define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0)
110#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
111#define PCI_CAP_EXP_SIZEOF 0x3c
112
113struct cxl_rwsem {
114 /*
115 * All changes to HPA (interleave configuration) occur with this
116 * lock held for write.
117 */
118 struct rw_semaphore region;
119 /*
120 * All changes to a device DPA space occur with this lock held
121 * for write.
122 */
123 struct rw_semaphore dpa;
124};
125
126extern struct cxl_rwsem cxl_rwsem;
127
128int cxl_memdev_init(void);
129void cxl_memdev_exit(void);
130void cxl_mbox_init(void);
131
132enum cxl_poison_trace_type {
133 CXL_POISON_TRACE_LIST,
134 CXL_POISON_TRACE_INJECT,
135 CXL_POISON_TRACE_CLEAR,
136};
137
138enum poison_cmd_enabled_bits;
139bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
140 enum poison_cmd_enabled_bits cmd);
141
142long cxl_pci_get_latency(struct pci_dev *pdev);
143int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
144int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
145 struct access_coordinate *c);
146
147int cxl_ras_init(void);
148void cxl_ras_exit(void);
149int cxl_gpf_port_setup(struct cxl_dport *dport);
150
151struct cxl_hdm;
152int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
153 struct cxl_endpoint_dvsec_info *info);
154int cxl_port_get_possible_dports(struct cxl_port *port);
155
156#ifdef CONFIG_CXL_FEATURES
157struct cxl_feat_entry *
158cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
159size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
160 enum cxl_get_feat_selection selection,
161 void *feat_out, size_t feat_out_size, u16 offset,
162 u16 *return_code);
163int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
164 u8 feat_version, const void *feat_data,
165 size_t feat_data_size, u32 feat_flag, u16 offset,
166 u16 *return_code);
167#endif
168
169#endif /* __CXL_CORE_H__ */