Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright © 2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28#ifndef _VMWGFX_VALIDATION_H_
29#define _VMWGFX_VALIDATION_H_
30
31#include <drm/drm_hashtab.h>
32#include <linux/list.h>
33#include <linux/ww_mutex.h>
34#include <drm/ttm/ttm_execbuf_util.h>
35
36#define VMW_RES_DIRTY_NONE 0
37#define VMW_RES_DIRTY_SET BIT(0)
38#define VMW_RES_DIRTY_CLEAR BIT(1)
39
40/**
41 * struct vmw_validation_mem - Custom interface to provide memory reservations
42 * for the validation code.
43 * @reserve_mem: Callback to reserve memory
44 * @unreserve_mem: Callback to unreserve memory
45 * @gran: Reservation granularity. Contains a hint how much memory should
46 * be reserved in each call to @reserve_mem(). A slow implementation may want
47 * reservation to be done in large batches.
48 */
49struct vmw_validation_mem {
50 int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
51 void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
52 size_t gran;
53};
54
55/**
56 * struct vmw_validation_context - Per command submission validation context
57 * @ht: Hash table used to find resource- or buffer object duplicates
58 * @resource_list: List head for resource validation metadata
59 * @resource_ctx_list: List head for resource validation metadata for
60 * resources that need to be validated before those in @resource_list
61 * @bo_list: List head for buffer objects
62 * @page_list: List of pages used by the memory allocator
63 * @ticket: Ticked used for ww mutex locking
64 * @res_mutex: Pointer to mutex used for resource reserving
65 * @merge_dups: Whether to merge metadata for duplicate resources or
66 * buffer objects
67 * @mem_size_left: Free memory left in the last page in @page_list
68 * @page_address: Kernel virtual address of the last page in @page_list
69 * @vm: A pointer to the memory reservation interface or NULL if no
70 * memory reservation is needed.
71 * @vm_size_left: Amount of reserved memory that so far has not been allocated.
72 * @total_mem: Amount of reserved memory.
73 */
74struct vmw_validation_context {
75 struct drm_open_hash *ht;
76 struct list_head resource_list;
77 struct list_head resource_ctx_list;
78 struct list_head bo_list;
79 struct list_head page_list;
80 struct ww_acquire_ctx ticket;
81 struct mutex *res_mutex;
82 unsigned int merge_dups;
83 unsigned int mem_size_left;
84 u8 *page_address;
85 struct vmw_validation_mem *vm;
86 size_t vm_size_left;
87 size_t total_mem;
88};
89
90struct vmw_buffer_object;
91struct vmw_resource;
92struct vmw_fence_obj;
93
94#if 0
95/**
96 * DECLARE_VAL_CONTEXT - Declare a validation context with initialization
97 * @_name: The name of the variable
98 * @_ht: The hash table used to find dups or NULL if none
99 * @_merge_dups: Whether to merge duplicate buffer object- or resource
100 * entries. If set to true, ideally a hash table pointer should be supplied
101 * as well unless the number of resources and buffer objects per validation
102 * is known to be very small
103 */
104#endif
105#define DECLARE_VAL_CONTEXT(_name, _ht, _merge_dups) \
106 struct vmw_validation_context _name = \
107 { .ht = _ht, \
108 .resource_list = LIST_HEAD_INIT((_name).resource_list), \
109 .resource_ctx_list = LIST_HEAD_INIT((_name).resource_ctx_list), \
110 .bo_list = LIST_HEAD_INIT((_name).bo_list), \
111 .page_list = LIST_HEAD_INIT((_name).page_list), \
112 .res_mutex = NULL, \
113 .merge_dups = _merge_dups, \
114 .mem_size_left = 0, \
115 }
116
117/**
118 * vmw_validation_has_bos - return whether the validation context has
119 * any buffer objects registered.
120 *
121 * @ctx: The validation context
122 * Returns: Whether any buffer objects are registered
123 */
124static inline bool
125vmw_validation_has_bos(struct vmw_validation_context *ctx)
126{
127 return !list_empty(&ctx->bo_list);
128}
129
130/**
131 * vmw_validation_set_val_mem - Register a validation mem object for
132 * validation memory reservation
133 * @ctx: The validation context
134 * @vm: Pointer to a struct vmw_validation_mem
135 *
136 * Must be set before the first attempt to allocate validation memory.
137 */
138static inline void
139vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
140 struct vmw_validation_mem *vm)
141{
142 ctx->vm = vm;
143}
144
145/**
146 * vmw_validation_set_ht - Register a hash table for duplicate finding
147 * @ctx: The validation context
148 * @ht: Pointer to a hash table to use for duplicate finding
149 * This function is intended to be used if the hash table wasn't
150 * available at validation context declaration time
151 */
152static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx,
153 struct drm_open_hash *ht)
154{
155 ctx->ht = ht;
156}
157
158/**
159 * vmw_validation_bo_reserve - Reserve buffer objects registered with a
160 * validation context
161 * @ctx: The validation context
162 * @intr: Perform waits interruptible
163 *
164 * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
165 * code on failure
166 */
167static inline int
168vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
169 bool intr)
170{
171 return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
172 NULL);
173}
174
175/**
176 * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
177 * validation context
178 * @ctx: The validation context
179 *
180 * This function unreserves the buffer objects previously reserved using
181 * vmw_validation_bo_reserve. It's typically used as part of an error path
182 */
183static inline void
184vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
185{
186 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
187}
188
189/**
190 * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
191 * with a validation context
192 * @ctx: The validation context
193 *
194 * This function unreserves the buffer objects previously reserved using
195 * vmw_validation_bo_reserve, and fences them with a fence object.
196 */
197static inline void
198vmw_validation_bo_fence(struct vmw_validation_context *ctx,
199 struct vmw_fence_obj *fence)
200{
201 ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
202 (void *) fence);
203}
204
205/**
206 * vmw_validation_context_init - Initialize a validation context
207 * @ctx: Pointer to the validation context to initialize
208 *
209 * This function initializes a validation context with @merge_dups set
210 * to false
211 */
212static inline void
213vmw_validation_context_init(struct vmw_validation_context *ctx)
214{
215 memset(ctx, 0, sizeof(*ctx));
216 INIT_LIST_HEAD(&ctx->resource_list);
217 INIT_LIST_HEAD(&ctx->resource_ctx_list);
218 INIT_LIST_HEAD(&ctx->bo_list);
219}
220
221/**
222 * vmw_validation_align - Align a validation memory allocation
223 * @val: The size to be aligned
224 *
225 * Returns: @val aligned to the granularity used by the validation memory
226 * allocator.
227 */
228static inline unsigned int vmw_validation_align(unsigned int val)
229{
230 return ALIGN(val, sizeof(long));
231}
232
233int vmw_validation_add_bo(struct vmw_validation_context *ctx,
234 struct vmw_buffer_object *vbo,
235 bool as_mob, bool cpu_blit);
236int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
237 bool interruptible,
238 bool validate_as_mob);
239int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
240void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
241int vmw_validation_add_resource(struct vmw_validation_context *ctx,
242 struct vmw_resource *res,
243 size_t priv_size,
244 u32 dirty,
245 void **p_node,
246 bool *first_usage);
247void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
248int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
249 bool intr);
250void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
251 bool backoff);
252void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
253 void *val_private,
254 struct vmw_buffer_object *vbo,
255 unsigned long backup_offset);
256int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
257
258int vmw_validation_prepare(struct vmw_validation_context *ctx,
259 struct mutex *mutex, bool intr);
260void vmw_validation_revert(struct vmw_validation_context *ctx);
261void vmw_validation_done(struct vmw_validation_context *ctx,
262 struct vmw_fence_obj *fence);
263
264void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
265 unsigned int size);
266int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
267int vmw_validation_preload_res(struct vmw_validation_context *ctx,
268 unsigned int size);
269void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
270 void *val_private, u32 dirty);
271#endif