Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2015-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/pci.h>
25#include <linux/acpi.h>
26#include "kfd_crat.h"
27#include "kfd_priv.h"
28#include "kfd_topology.h"
29#include "kfd_iommu.h"
30#include "amdgpu.h"
31#include "amdgpu_amdkfd.h"
32
33/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
34 * GPU processor ID are expressed with Bit[31]=1.
35 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
36 * used in the CRAT.
37 */
38static uint32_t gpu_processor_id_low = 0x80001000;
39
40/* Return the next available gpu_processor_id and increment it for next GPU
41 * @total_cu_count - Total CUs present in the GPU including ones
42 * masked off
43 */
44static inline unsigned int get_and_inc_gpu_processor_id(
45 unsigned int total_cu_count)
46{
47 int current_id = gpu_processor_id_low;
48
49 gpu_processor_id_low += total_cu_count;
50 return current_id;
51}
52
53/* Static table to describe GPU Cache information */
54struct kfd_gpu_cache_info {
55 uint32_t cache_size;
56 uint32_t cache_level;
57 uint32_t flags;
58 /* Indicates how many Compute Units share this cache
59 * within a SA. Value = 1 indicates the cache is not shared
60 */
61 uint32_t num_cu_shared;
62};
63
64static struct kfd_gpu_cache_info kaveri_cache_info[] = {
65 {
66 /* TCP L1 Cache per CU */
67 .cache_size = 16,
68 .cache_level = 1,
69 .flags = (CRAT_CACHE_FLAGS_ENABLED |
70 CRAT_CACHE_FLAGS_DATA_CACHE |
71 CRAT_CACHE_FLAGS_SIMD_CACHE),
72 .num_cu_shared = 1,
73 },
74 {
75 /* Scalar L1 Instruction Cache (in SQC module) per bank */
76 .cache_size = 16,
77 .cache_level = 1,
78 .flags = (CRAT_CACHE_FLAGS_ENABLED |
79 CRAT_CACHE_FLAGS_INST_CACHE |
80 CRAT_CACHE_FLAGS_SIMD_CACHE),
81 .num_cu_shared = 2,
82 },
83 {
84 /* Scalar L1 Data Cache (in SQC module) per bank */
85 .cache_size = 8,
86 .cache_level = 1,
87 .flags = (CRAT_CACHE_FLAGS_ENABLED |
88 CRAT_CACHE_FLAGS_DATA_CACHE |
89 CRAT_CACHE_FLAGS_SIMD_CACHE),
90 .num_cu_shared = 2,
91 },
92
93 /* TODO: Add L2 Cache information */
94};
95
96
97static struct kfd_gpu_cache_info carrizo_cache_info[] = {
98 {
99 /* TCP L1 Cache per CU */
100 .cache_size = 16,
101 .cache_level = 1,
102 .flags = (CRAT_CACHE_FLAGS_ENABLED |
103 CRAT_CACHE_FLAGS_DATA_CACHE |
104 CRAT_CACHE_FLAGS_SIMD_CACHE),
105 .num_cu_shared = 1,
106 },
107 {
108 /* Scalar L1 Instruction Cache (in SQC module) per bank */
109 .cache_size = 8,
110 .cache_level = 1,
111 .flags = (CRAT_CACHE_FLAGS_ENABLED |
112 CRAT_CACHE_FLAGS_INST_CACHE |
113 CRAT_CACHE_FLAGS_SIMD_CACHE),
114 .num_cu_shared = 4,
115 },
116 {
117 /* Scalar L1 Data Cache (in SQC module) per bank. */
118 .cache_size = 4,
119 .cache_level = 1,
120 .flags = (CRAT_CACHE_FLAGS_ENABLED |
121 CRAT_CACHE_FLAGS_DATA_CACHE |
122 CRAT_CACHE_FLAGS_SIMD_CACHE),
123 .num_cu_shared = 4,
124 },
125
126 /* TODO: Add L2 Cache information */
127};
128
129#define hawaii_cache_info kaveri_cache_info
130#define tonga_cache_info carrizo_cache_info
131#define fiji_cache_info carrizo_cache_info
132#define polaris10_cache_info carrizo_cache_info
133#define polaris11_cache_info carrizo_cache_info
134#define polaris12_cache_info carrizo_cache_info
135#define vegam_cache_info carrizo_cache_info
136
137/* NOTE: L1 cache information has been updated and L2/L3
138 * cache information has been added for Vega10 and
139 * newer ASICs. The unit for cache_size is KiB.
140 * In future, check & update cache details
141 * for every new ASIC is required.
142 */
143
144static struct kfd_gpu_cache_info vega10_cache_info[] = {
145 {
146 /* TCP L1 Cache per CU */
147 .cache_size = 16,
148 .cache_level = 1,
149 .flags = (CRAT_CACHE_FLAGS_ENABLED |
150 CRAT_CACHE_FLAGS_DATA_CACHE |
151 CRAT_CACHE_FLAGS_SIMD_CACHE),
152 .num_cu_shared = 1,
153 },
154 {
155 /* Scalar L1 Instruction Cache per SQC */
156 .cache_size = 32,
157 .cache_level = 1,
158 .flags = (CRAT_CACHE_FLAGS_ENABLED |
159 CRAT_CACHE_FLAGS_INST_CACHE |
160 CRAT_CACHE_FLAGS_SIMD_CACHE),
161 .num_cu_shared = 3,
162 },
163 {
164 /* Scalar L1 Data Cache per SQC */
165 .cache_size = 16,
166 .cache_level = 1,
167 .flags = (CRAT_CACHE_FLAGS_ENABLED |
168 CRAT_CACHE_FLAGS_DATA_CACHE |
169 CRAT_CACHE_FLAGS_SIMD_CACHE),
170 .num_cu_shared = 3,
171 },
172 {
173 /* L2 Data Cache per GPU (Total Tex Cache) */
174 .cache_size = 4096,
175 .cache_level = 2,
176 .flags = (CRAT_CACHE_FLAGS_ENABLED |
177 CRAT_CACHE_FLAGS_DATA_CACHE |
178 CRAT_CACHE_FLAGS_SIMD_CACHE),
179 .num_cu_shared = 16,
180 },
181};
182
183static struct kfd_gpu_cache_info raven_cache_info[] = {
184 {
185 /* TCP L1 Cache per CU */
186 .cache_size = 16,
187 .cache_level = 1,
188 .flags = (CRAT_CACHE_FLAGS_ENABLED |
189 CRAT_CACHE_FLAGS_DATA_CACHE |
190 CRAT_CACHE_FLAGS_SIMD_CACHE),
191 .num_cu_shared = 1,
192 },
193 {
194 /* Scalar L1 Instruction Cache per SQC */
195 .cache_size = 32,
196 .cache_level = 1,
197 .flags = (CRAT_CACHE_FLAGS_ENABLED |
198 CRAT_CACHE_FLAGS_INST_CACHE |
199 CRAT_CACHE_FLAGS_SIMD_CACHE),
200 .num_cu_shared = 3,
201 },
202 {
203 /* Scalar L1 Data Cache per SQC */
204 .cache_size = 16,
205 .cache_level = 1,
206 .flags = (CRAT_CACHE_FLAGS_ENABLED |
207 CRAT_CACHE_FLAGS_DATA_CACHE |
208 CRAT_CACHE_FLAGS_SIMD_CACHE),
209 .num_cu_shared = 3,
210 },
211 {
212 /* L2 Data Cache per GPU (Total Tex Cache) */
213 .cache_size = 1024,
214 .cache_level = 2,
215 .flags = (CRAT_CACHE_FLAGS_ENABLED |
216 CRAT_CACHE_FLAGS_DATA_CACHE |
217 CRAT_CACHE_FLAGS_SIMD_CACHE),
218 .num_cu_shared = 11,
219 },
220};
221
222static struct kfd_gpu_cache_info renoir_cache_info[] = {
223 {
224 /* TCP L1 Cache per CU */
225 .cache_size = 16,
226 .cache_level = 1,
227 .flags = (CRAT_CACHE_FLAGS_ENABLED |
228 CRAT_CACHE_FLAGS_DATA_CACHE |
229 CRAT_CACHE_FLAGS_SIMD_CACHE),
230 .num_cu_shared = 1,
231 },
232 {
233 /* Scalar L1 Instruction Cache per SQC */
234 .cache_size = 32,
235 .cache_level = 1,
236 .flags = (CRAT_CACHE_FLAGS_ENABLED |
237 CRAT_CACHE_FLAGS_INST_CACHE |
238 CRAT_CACHE_FLAGS_SIMD_CACHE),
239 .num_cu_shared = 3,
240 },
241 {
242 /* Scalar L1 Data Cache per SQC */
243 .cache_size = 16,
244 .cache_level = 1,
245 .flags = (CRAT_CACHE_FLAGS_ENABLED |
246 CRAT_CACHE_FLAGS_DATA_CACHE |
247 CRAT_CACHE_FLAGS_SIMD_CACHE),
248 .num_cu_shared = 3,
249 },
250 {
251 /* L2 Data Cache per GPU (Total Tex Cache) */
252 .cache_size = 1024,
253 .cache_level = 2,
254 .flags = (CRAT_CACHE_FLAGS_ENABLED |
255 CRAT_CACHE_FLAGS_DATA_CACHE |
256 CRAT_CACHE_FLAGS_SIMD_CACHE),
257 .num_cu_shared = 8,
258 },
259};
260
261static struct kfd_gpu_cache_info vega12_cache_info[] = {
262 {
263 /* TCP L1 Cache per CU */
264 .cache_size = 16,
265 .cache_level = 1,
266 .flags = (CRAT_CACHE_FLAGS_ENABLED |
267 CRAT_CACHE_FLAGS_DATA_CACHE |
268 CRAT_CACHE_FLAGS_SIMD_CACHE),
269 .num_cu_shared = 1,
270 },
271 {
272 /* Scalar L1 Instruction Cache per SQC */
273 .cache_size = 32,
274 .cache_level = 1,
275 .flags = (CRAT_CACHE_FLAGS_ENABLED |
276 CRAT_CACHE_FLAGS_INST_CACHE |
277 CRAT_CACHE_FLAGS_SIMD_CACHE),
278 .num_cu_shared = 3,
279 },
280 {
281 /* Scalar L1 Data Cache per SQC */
282 .cache_size = 16,
283 .cache_level = 1,
284 .flags = (CRAT_CACHE_FLAGS_ENABLED |
285 CRAT_CACHE_FLAGS_DATA_CACHE |
286 CRAT_CACHE_FLAGS_SIMD_CACHE),
287 .num_cu_shared = 3,
288 },
289 {
290 /* L2 Data Cache per GPU (Total Tex Cache) */
291 .cache_size = 2048,
292 .cache_level = 2,
293 .flags = (CRAT_CACHE_FLAGS_ENABLED |
294 CRAT_CACHE_FLAGS_DATA_CACHE |
295 CRAT_CACHE_FLAGS_SIMD_CACHE),
296 .num_cu_shared = 5,
297 },
298};
299
300static struct kfd_gpu_cache_info vega20_cache_info[] = {
301 {
302 /* TCP L1 Cache per CU */
303 .cache_size = 16,
304 .cache_level = 1,
305 .flags = (CRAT_CACHE_FLAGS_ENABLED |
306 CRAT_CACHE_FLAGS_DATA_CACHE |
307 CRAT_CACHE_FLAGS_SIMD_CACHE),
308 .num_cu_shared = 1,
309 },
310 {
311 /* Scalar L1 Instruction Cache per SQC */
312 .cache_size = 32,
313 .cache_level = 1,
314 .flags = (CRAT_CACHE_FLAGS_ENABLED |
315 CRAT_CACHE_FLAGS_INST_CACHE |
316 CRAT_CACHE_FLAGS_SIMD_CACHE),
317 .num_cu_shared = 3,
318 },
319 {
320 /* Scalar L1 Data Cache per SQC */
321 .cache_size = 16,
322 .cache_level = 1,
323 .flags = (CRAT_CACHE_FLAGS_ENABLED |
324 CRAT_CACHE_FLAGS_DATA_CACHE |
325 CRAT_CACHE_FLAGS_SIMD_CACHE),
326 .num_cu_shared = 3,
327 },
328 {
329 /* L2 Data Cache per GPU (Total Tex Cache) */
330 .cache_size = 8192,
331 .cache_level = 2,
332 .flags = (CRAT_CACHE_FLAGS_ENABLED |
333 CRAT_CACHE_FLAGS_DATA_CACHE |
334 CRAT_CACHE_FLAGS_SIMD_CACHE),
335 .num_cu_shared = 16,
336 },
337};
338
339static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
340 {
341 /* TCP L1 Cache per CU */
342 .cache_size = 16,
343 .cache_level = 1,
344 .flags = (CRAT_CACHE_FLAGS_ENABLED |
345 CRAT_CACHE_FLAGS_DATA_CACHE |
346 CRAT_CACHE_FLAGS_SIMD_CACHE),
347 .num_cu_shared = 1,
348 },
349 {
350 /* Scalar L1 Instruction Cache per SQC */
351 .cache_size = 32,
352 .cache_level = 1,
353 .flags = (CRAT_CACHE_FLAGS_ENABLED |
354 CRAT_CACHE_FLAGS_INST_CACHE |
355 CRAT_CACHE_FLAGS_SIMD_CACHE),
356 .num_cu_shared = 2,
357 },
358 {
359 /* Scalar L1 Data Cache per SQC */
360 .cache_size = 16,
361 .cache_level = 1,
362 .flags = (CRAT_CACHE_FLAGS_ENABLED |
363 CRAT_CACHE_FLAGS_DATA_CACHE |
364 CRAT_CACHE_FLAGS_SIMD_CACHE),
365 .num_cu_shared = 2,
366 },
367 {
368 /* L2 Data Cache per GPU (Total Tex Cache) */
369 .cache_size = 8192,
370 .cache_level = 2,
371 .flags = (CRAT_CACHE_FLAGS_ENABLED |
372 CRAT_CACHE_FLAGS_DATA_CACHE |
373 CRAT_CACHE_FLAGS_SIMD_CACHE),
374 .num_cu_shared = 14,
375 },
376};
377
378static struct kfd_gpu_cache_info navi10_cache_info[] = {
379 {
380 /* TCP L1 Cache per CU */
381 .cache_size = 16,
382 .cache_level = 1,
383 .flags = (CRAT_CACHE_FLAGS_ENABLED |
384 CRAT_CACHE_FLAGS_DATA_CACHE |
385 CRAT_CACHE_FLAGS_SIMD_CACHE),
386 .num_cu_shared = 1,
387 },
388 {
389 /* Scalar L1 Instruction Cache per SQC */
390 .cache_size = 32,
391 .cache_level = 1,
392 .flags = (CRAT_CACHE_FLAGS_ENABLED |
393 CRAT_CACHE_FLAGS_INST_CACHE |
394 CRAT_CACHE_FLAGS_SIMD_CACHE),
395 .num_cu_shared = 2,
396 },
397 {
398 /* Scalar L1 Data Cache per SQC */
399 .cache_size = 16,
400 .cache_level = 1,
401 .flags = (CRAT_CACHE_FLAGS_ENABLED |
402 CRAT_CACHE_FLAGS_DATA_CACHE |
403 CRAT_CACHE_FLAGS_SIMD_CACHE),
404 .num_cu_shared = 2,
405 },
406 {
407 /* GL1 Data Cache per SA */
408 .cache_size = 128,
409 .cache_level = 1,
410 .flags = (CRAT_CACHE_FLAGS_ENABLED |
411 CRAT_CACHE_FLAGS_DATA_CACHE |
412 CRAT_CACHE_FLAGS_SIMD_CACHE),
413 .num_cu_shared = 10,
414 },
415 {
416 /* L2 Data Cache per GPU (Total Tex Cache) */
417 .cache_size = 4096,
418 .cache_level = 2,
419 .flags = (CRAT_CACHE_FLAGS_ENABLED |
420 CRAT_CACHE_FLAGS_DATA_CACHE |
421 CRAT_CACHE_FLAGS_SIMD_CACHE),
422 .num_cu_shared = 10,
423 },
424};
425
426static struct kfd_gpu_cache_info vangogh_cache_info[] = {
427 {
428 /* TCP L1 Cache per CU */
429 .cache_size = 16,
430 .cache_level = 1,
431 .flags = (CRAT_CACHE_FLAGS_ENABLED |
432 CRAT_CACHE_FLAGS_DATA_CACHE |
433 CRAT_CACHE_FLAGS_SIMD_CACHE),
434 .num_cu_shared = 1,
435 },
436 {
437 /* Scalar L1 Instruction Cache per SQC */
438 .cache_size = 32,
439 .cache_level = 1,
440 .flags = (CRAT_CACHE_FLAGS_ENABLED |
441 CRAT_CACHE_FLAGS_INST_CACHE |
442 CRAT_CACHE_FLAGS_SIMD_CACHE),
443 .num_cu_shared = 2,
444 },
445 {
446 /* Scalar L1 Data Cache per SQC */
447 .cache_size = 16,
448 .cache_level = 1,
449 .flags = (CRAT_CACHE_FLAGS_ENABLED |
450 CRAT_CACHE_FLAGS_DATA_CACHE |
451 CRAT_CACHE_FLAGS_SIMD_CACHE),
452 .num_cu_shared = 2,
453 },
454 {
455 /* GL1 Data Cache per SA */
456 .cache_size = 128,
457 .cache_level = 1,
458 .flags = (CRAT_CACHE_FLAGS_ENABLED |
459 CRAT_CACHE_FLAGS_DATA_CACHE |
460 CRAT_CACHE_FLAGS_SIMD_CACHE),
461 .num_cu_shared = 8,
462 },
463 {
464 /* L2 Data Cache per GPU (Total Tex Cache) */
465 .cache_size = 1024,
466 .cache_level = 2,
467 .flags = (CRAT_CACHE_FLAGS_ENABLED |
468 CRAT_CACHE_FLAGS_DATA_CACHE |
469 CRAT_CACHE_FLAGS_SIMD_CACHE),
470 .num_cu_shared = 8,
471 },
472};
473
474static struct kfd_gpu_cache_info navi14_cache_info[] = {
475 {
476 /* TCP L1 Cache per CU */
477 .cache_size = 16,
478 .cache_level = 1,
479 .flags = (CRAT_CACHE_FLAGS_ENABLED |
480 CRAT_CACHE_FLAGS_DATA_CACHE |
481 CRAT_CACHE_FLAGS_SIMD_CACHE),
482 .num_cu_shared = 1,
483 },
484 {
485 /* Scalar L1 Instruction Cache per SQC */
486 .cache_size = 32,
487 .cache_level = 1,
488 .flags = (CRAT_CACHE_FLAGS_ENABLED |
489 CRAT_CACHE_FLAGS_INST_CACHE |
490 CRAT_CACHE_FLAGS_SIMD_CACHE),
491 .num_cu_shared = 2,
492 },
493 {
494 /* Scalar L1 Data Cache per SQC */
495 .cache_size = 16,
496 .cache_level = 1,
497 .flags = (CRAT_CACHE_FLAGS_ENABLED |
498 CRAT_CACHE_FLAGS_DATA_CACHE |
499 CRAT_CACHE_FLAGS_SIMD_CACHE),
500 .num_cu_shared = 2,
501 },
502 {
503 /* GL1 Data Cache per SA */
504 .cache_size = 128,
505 .cache_level = 1,
506 .flags = (CRAT_CACHE_FLAGS_ENABLED |
507 CRAT_CACHE_FLAGS_DATA_CACHE |
508 CRAT_CACHE_FLAGS_SIMD_CACHE),
509 .num_cu_shared = 12,
510 },
511 {
512 /* L2 Data Cache per GPU (Total Tex Cache) */
513 .cache_size = 2048,
514 .cache_level = 2,
515 .flags = (CRAT_CACHE_FLAGS_ENABLED |
516 CRAT_CACHE_FLAGS_DATA_CACHE |
517 CRAT_CACHE_FLAGS_SIMD_CACHE),
518 .num_cu_shared = 12,
519 },
520};
521
522static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
523 {
524 /* TCP L1 Cache per CU */
525 .cache_size = 16,
526 .cache_level = 1,
527 .flags = (CRAT_CACHE_FLAGS_ENABLED |
528 CRAT_CACHE_FLAGS_DATA_CACHE |
529 CRAT_CACHE_FLAGS_SIMD_CACHE),
530 .num_cu_shared = 1,
531 },
532 {
533 /* Scalar L1 Instruction Cache per SQC */
534 .cache_size = 32,
535 .cache_level = 1,
536 .flags = (CRAT_CACHE_FLAGS_ENABLED |
537 CRAT_CACHE_FLAGS_INST_CACHE |
538 CRAT_CACHE_FLAGS_SIMD_CACHE),
539 .num_cu_shared = 2,
540 },
541 {
542 /* Scalar L1 Data Cache per SQC */
543 .cache_size = 16,
544 .cache_level = 1,
545 .flags = (CRAT_CACHE_FLAGS_ENABLED |
546 CRAT_CACHE_FLAGS_DATA_CACHE |
547 CRAT_CACHE_FLAGS_SIMD_CACHE),
548 .num_cu_shared = 2,
549 },
550 {
551 /* GL1 Data Cache per SA */
552 .cache_size = 128,
553 .cache_level = 1,
554 .flags = (CRAT_CACHE_FLAGS_ENABLED |
555 CRAT_CACHE_FLAGS_DATA_CACHE |
556 CRAT_CACHE_FLAGS_SIMD_CACHE),
557 .num_cu_shared = 10,
558 },
559 {
560 /* L2 Data Cache per GPU (Total Tex Cache) */
561 .cache_size = 4096,
562 .cache_level = 2,
563 .flags = (CRAT_CACHE_FLAGS_ENABLED |
564 CRAT_CACHE_FLAGS_DATA_CACHE |
565 CRAT_CACHE_FLAGS_SIMD_CACHE),
566 .num_cu_shared = 10,
567 },
568 {
569 /* L3 Data Cache per GPU */
570 .cache_size = 128*1024,
571 .cache_level = 3,
572 .flags = (CRAT_CACHE_FLAGS_ENABLED |
573 CRAT_CACHE_FLAGS_DATA_CACHE |
574 CRAT_CACHE_FLAGS_SIMD_CACHE),
575 .num_cu_shared = 10,
576 },
577};
578
579static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
580 {
581 /* TCP L1 Cache per CU */
582 .cache_size = 16,
583 .cache_level = 1,
584 .flags = (CRAT_CACHE_FLAGS_ENABLED |
585 CRAT_CACHE_FLAGS_DATA_CACHE |
586 CRAT_CACHE_FLAGS_SIMD_CACHE),
587 .num_cu_shared = 1,
588 },
589 {
590 /* Scalar L1 Instruction Cache per SQC */
591 .cache_size = 32,
592 .cache_level = 1,
593 .flags = (CRAT_CACHE_FLAGS_ENABLED |
594 CRAT_CACHE_FLAGS_INST_CACHE |
595 CRAT_CACHE_FLAGS_SIMD_CACHE),
596 .num_cu_shared = 2,
597 },
598 {
599 /* Scalar L1 Data Cache per SQC */
600 .cache_size = 16,
601 .cache_level = 1,
602 .flags = (CRAT_CACHE_FLAGS_ENABLED |
603 CRAT_CACHE_FLAGS_DATA_CACHE |
604 CRAT_CACHE_FLAGS_SIMD_CACHE),
605 .num_cu_shared = 2,
606 },
607 {
608 /* GL1 Data Cache per SA */
609 .cache_size = 128,
610 .cache_level = 1,
611 .flags = (CRAT_CACHE_FLAGS_ENABLED |
612 CRAT_CACHE_FLAGS_DATA_CACHE |
613 CRAT_CACHE_FLAGS_SIMD_CACHE),
614 .num_cu_shared = 10,
615 },
616 {
617 /* L2 Data Cache per GPU (Total Tex Cache) */
618 .cache_size = 3072,
619 .cache_level = 2,
620 .flags = (CRAT_CACHE_FLAGS_ENABLED |
621 CRAT_CACHE_FLAGS_DATA_CACHE |
622 CRAT_CACHE_FLAGS_SIMD_CACHE),
623 .num_cu_shared = 10,
624 },
625 {
626 /* L3 Data Cache per GPU */
627 .cache_size = 96*1024,
628 .cache_level = 3,
629 .flags = (CRAT_CACHE_FLAGS_ENABLED |
630 CRAT_CACHE_FLAGS_DATA_CACHE |
631 CRAT_CACHE_FLAGS_SIMD_CACHE),
632 .num_cu_shared = 10,
633 },
634};
635
636static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
637 {
638 /* TCP L1 Cache per CU */
639 .cache_size = 16,
640 .cache_level = 1,
641 .flags = (CRAT_CACHE_FLAGS_ENABLED |
642 CRAT_CACHE_FLAGS_DATA_CACHE |
643 CRAT_CACHE_FLAGS_SIMD_CACHE),
644 .num_cu_shared = 1,
645 },
646 {
647 /* Scalar L1 Instruction Cache per SQC */
648 .cache_size = 32,
649 .cache_level = 1,
650 .flags = (CRAT_CACHE_FLAGS_ENABLED |
651 CRAT_CACHE_FLAGS_INST_CACHE |
652 CRAT_CACHE_FLAGS_SIMD_CACHE),
653 .num_cu_shared = 2,
654 },
655 {
656 /* Scalar L1 Data Cache per SQC */
657 .cache_size = 16,
658 .cache_level = 1,
659 .flags = (CRAT_CACHE_FLAGS_ENABLED |
660 CRAT_CACHE_FLAGS_DATA_CACHE |
661 CRAT_CACHE_FLAGS_SIMD_CACHE),
662 .num_cu_shared = 2,
663 },
664 {
665 /* GL1 Data Cache per SA */
666 .cache_size = 128,
667 .cache_level = 1,
668 .flags = (CRAT_CACHE_FLAGS_ENABLED |
669 CRAT_CACHE_FLAGS_DATA_CACHE |
670 CRAT_CACHE_FLAGS_SIMD_CACHE),
671 .num_cu_shared = 8,
672 },
673 {
674 /* L2 Data Cache per GPU (Total Tex Cache) */
675 .cache_size = 2048,
676 .cache_level = 2,
677 .flags = (CRAT_CACHE_FLAGS_ENABLED |
678 CRAT_CACHE_FLAGS_DATA_CACHE |
679 CRAT_CACHE_FLAGS_SIMD_CACHE),
680 .num_cu_shared = 8,
681 },
682 {
683 /* L3 Data Cache per GPU */
684 .cache_size = 32*1024,
685 .cache_level = 3,
686 .flags = (CRAT_CACHE_FLAGS_ENABLED |
687 CRAT_CACHE_FLAGS_DATA_CACHE |
688 CRAT_CACHE_FLAGS_SIMD_CACHE),
689 .num_cu_shared = 8,
690 },
691};
692
693static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
694 {
695 /* TCP L1 Cache per CU */
696 .cache_size = 16,
697 .cache_level = 1,
698 .flags = (CRAT_CACHE_FLAGS_ENABLED |
699 CRAT_CACHE_FLAGS_DATA_CACHE |
700 CRAT_CACHE_FLAGS_SIMD_CACHE),
701 .num_cu_shared = 1,
702 },
703 {
704 /* Scalar L1 Instruction Cache per SQC */
705 .cache_size = 32,
706 .cache_level = 1,
707 .flags = (CRAT_CACHE_FLAGS_ENABLED |
708 CRAT_CACHE_FLAGS_INST_CACHE |
709 CRAT_CACHE_FLAGS_SIMD_CACHE),
710 .num_cu_shared = 2,
711 },
712 {
713 /* Scalar L1 Data Cache per SQC */
714 .cache_size = 16,
715 .cache_level = 1,
716 .flags = (CRAT_CACHE_FLAGS_ENABLED |
717 CRAT_CACHE_FLAGS_DATA_CACHE |
718 CRAT_CACHE_FLAGS_SIMD_CACHE),
719 .num_cu_shared = 2,
720 },
721 {
722 /* GL1 Data Cache per SA */
723 .cache_size = 128,
724 .cache_level = 1,
725 .flags = (CRAT_CACHE_FLAGS_ENABLED |
726 CRAT_CACHE_FLAGS_DATA_CACHE |
727 CRAT_CACHE_FLAGS_SIMD_CACHE),
728 .num_cu_shared = 8,
729 },
730 {
731 /* L2 Data Cache per GPU (Total Tex Cache) */
732 .cache_size = 1024,
733 .cache_level = 2,
734 .flags = (CRAT_CACHE_FLAGS_ENABLED |
735 CRAT_CACHE_FLAGS_DATA_CACHE |
736 CRAT_CACHE_FLAGS_SIMD_CACHE),
737 .num_cu_shared = 8,
738 },
739 {
740 /* L3 Data Cache per GPU */
741 .cache_size = 16*1024,
742 .cache_level = 3,
743 .flags = (CRAT_CACHE_FLAGS_ENABLED |
744 CRAT_CACHE_FLAGS_DATA_CACHE |
745 CRAT_CACHE_FLAGS_SIMD_CACHE),
746 .num_cu_shared = 8,
747 },
748};
749
750static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
751 {
752 /* TCP L1 Cache per CU */
753 .cache_size = 16,
754 .cache_level = 1,
755 .flags = (CRAT_CACHE_FLAGS_ENABLED |
756 CRAT_CACHE_FLAGS_DATA_CACHE |
757 CRAT_CACHE_FLAGS_SIMD_CACHE),
758 .num_cu_shared = 1,
759 },
760 {
761 /* Scalar L1 Instruction Cache per SQC */
762 .cache_size = 32,
763 .cache_level = 1,
764 .flags = (CRAT_CACHE_FLAGS_ENABLED |
765 CRAT_CACHE_FLAGS_INST_CACHE |
766 CRAT_CACHE_FLAGS_SIMD_CACHE),
767 .num_cu_shared = 2,
768 },
769 {
770 /* Scalar L1 Data Cache per SQC */
771 .cache_size = 16,
772 .cache_level = 1,
773 .flags = (CRAT_CACHE_FLAGS_ENABLED |
774 CRAT_CACHE_FLAGS_DATA_CACHE |
775 CRAT_CACHE_FLAGS_SIMD_CACHE),
776 .num_cu_shared = 2,
777 },
778 {
779 /* GL1 Data Cache per SA */
780 .cache_size = 128,
781 .cache_level = 1,
782 .flags = (CRAT_CACHE_FLAGS_ENABLED |
783 CRAT_CACHE_FLAGS_DATA_CACHE |
784 CRAT_CACHE_FLAGS_SIMD_CACHE),
785 .num_cu_shared = 6,
786 },
787 {
788 /* L2 Data Cache per GPU (Total Tex Cache) */
789 .cache_size = 2048,
790 .cache_level = 2,
791 .flags = (CRAT_CACHE_FLAGS_ENABLED |
792 CRAT_CACHE_FLAGS_DATA_CACHE |
793 CRAT_CACHE_FLAGS_SIMD_CACHE),
794 .num_cu_shared = 6,
795 },
796};
797
798static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
799 struct crat_subtype_computeunit *cu)
800{
801 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
802 dev->node_props.cpu_core_id_base = cu->processor_id_low;
803 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
804 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
805
806 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
807 cu->processor_id_low);
808}
809
810static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
811 struct crat_subtype_computeunit *cu)
812{
813 dev->node_props.simd_id_base = cu->processor_id_low;
814 dev->node_props.simd_count = cu->num_simd_cores;
815 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
816 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
817 dev->node_props.wave_front_size = cu->wave_front_size;
818 dev->node_props.array_count = cu->array_count;
819 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
820 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
821 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
822 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
823 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
824 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
825}
826
827/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
828 * topology device present in the device_list
829 */
830static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
831 struct list_head *device_list)
832{
833 struct kfd_topology_device *dev;
834
835 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
836 cu->proximity_domain, cu->hsa_capability);
837 list_for_each_entry(dev, device_list, list) {
838 if (cu->proximity_domain == dev->proximity_domain) {
839 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
840 kfd_populated_cu_info_cpu(dev, cu);
841
842 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
843 kfd_populated_cu_info_gpu(dev, cu);
844 break;
845 }
846 }
847
848 return 0;
849}
850
851static struct kfd_mem_properties *
852find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
853 struct kfd_topology_device *dev)
854{
855 struct kfd_mem_properties *props;
856
857 list_for_each_entry(props, &dev->mem_props, list) {
858 if (props->heap_type == heap_type
859 && props->flags == flags
860 && props->width == width)
861 return props;
862 }
863
864 return NULL;
865}
866/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
867 * topology device present in the device_list
868 */
869static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
870 struct list_head *device_list)
871{
872 struct kfd_mem_properties *props;
873 struct kfd_topology_device *dev;
874 uint32_t heap_type;
875 uint64_t size_in_bytes;
876 uint32_t flags = 0;
877 uint32_t width;
878
879 pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
880 mem->proximity_domain);
881 list_for_each_entry(dev, device_list, list) {
882 if (mem->proximity_domain == dev->proximity_domain) {
883 /* We're on GPU node */
884 if (dev->node_props.cpu_cores_count == 0) {
885 /* APU */
886 if (mem->visibility_type == 0)
887 heap_type =
888 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
889 /* dGPU */
890 else
891 heap_type = mem->visibility_type;
892 } else
893 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
894
895 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
896 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
897 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
898 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
899
900 size_in_bytes =
901 ((uint64_t)mem->length_high << 32) +
902 mem->length_low;
903 width = mem->width;
904
905 /* Multiple banks of the same type are aggregated into
906 * one. User mode doesn't care about multiple physical
907 * memory segments. It's managed as a single virtual
908 * heap for user mode.
909 */
910 props = find_subtype_mem(heap_type, flags, width, dev);
911 if (props) {
912 props->size_in_bytes += size_in_bytes;
913 break;
914 }
915
916 props = kfd_alloc_struct(props);
917 if (!props)
918 return -ENOMEM;
919
920 props->heap_type = heap_type;
921 props->flags = flags;
922 props->size_in_bytes = size_in_bytes;
923 props->width = width;
924
925 dev->node_props.mem_banks_count++;
926 list_add_tail(&props->list, &dev->mem_props);
927
928 break;
929 }
930 }
931
932 return 0;
933}
934
935/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
936 * topology device present in the device_list
937 */
938static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
939 struct list_head *device_list)
940{
941 struct kfd_cache_properties *props;
942 struct kfd_topology_device *dev;
943 uint32_t id;
944 uint32_t total_num_of_cu;
945
946 id = cache->processor_id_low;
947
948 pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
949 list_for_each_entry(dev, device_list, list) {
950 total_num_of_cu = (dev->node_props.array_count *
951 dev->node_props.cu_per_simd_array);
952
953 /* Cache infomration in CRAT doesn't have proximity_domain
954 * information as it is associated with a CPU core or GPU
955 * Compute Unit. So map the cache using CPU core Id or SIMD
956 * (GPU) ID.
957 * TODO: This works because currently we can safely assume that
958 * Compute Units are parsed before caches are parsed. In
959 * future, remove this dependency
960 */
961 if ((id >= dev->node_props.cpu_core_id_base &&
962 id <= dev->node_props.cpu_core_id_base +
963 dev->node_props.cpu_cores_count) ||
964 (id >= dev->node_props.simd_id_base &&
965 id < dev->node_props.simd_id_base +
966 total_num_of_cu)) {
967 props = kfd_alloc_struct(props);
968 if (!props)
969 return -ENOMEM;
970
971 props->processor_id_low = id;
972 props->cache_level = cache->cache_level;
973 props->cache_size = cache->cache_size;
974 props->cacheline_size = cache->cache_line_size;
975 props->cachelines_per_tag = cache->lines_per_tag;
976 props->cache_assoc = cache->associativity;
977 props->cache_latency = cache->cache_latency;
978 memcpy(props->sibling_map, cache->sibling_map,
979 sizeof(props->sibling_map));
980
981 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
982 props->cache_type |= HSA_CACHE_TYPE_DATA;
983 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
984 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
985 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
986 props->cache_type |= HSA_CACHE_TYPE_CPU;
987 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
988 props->cache_type |= HSA_CACHE_TYPE_HSACU;
989
990 dev->cache_count++;
991 dev->node_props.caches_count++;
992 list_add_tail(&props->list, &dev->cache_props);
993
994 break;
995 }
996 }
997
998 return 0;
999}
1000
1001/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
1002 * topology device present in the device_list
1003 */
1004static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
1005 struct list_head *device_list)
1006{
1007 struct kfd_iolink_properties *props = NULL, *props2;
1008 struct kfd_topology_device *dev, *to_dev;
1009 uint32_t id_from;
1010 uint32_t id_to;
1011
1012 id_from = iolink->proximity_domain_from;
1013 id_to = iolink->proximity_domain_to;
1014
1015 pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
1016 id_from, id_to);
1017 list_for_each_entry(dev, device_list, list) {
1018 if (id_from == dev->proximity_domain) {
1019 props = kfd_alloc_struct(props);
1020 if (!props)
1021 return -ENOMEM;
1022
1023 props->node_from = id_from;
1024 props->node_to = id_to;
1025 props->ver_maj = iolink->version_major;
1026 props->ver_min = iolink->version_minor;
1027 props->iolink_type = iolink->io_interface_type;
1028
1029 if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1030 props->weight = 20;
1031 else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1032 props->weight = 15 * iolink->num_hops_xgmi;
1033 else
1034 props->weight = node_distance(id_from, id_to);
1035
1036 props->min_latency = iolink->minimum_latency;
1037 props->max_latency = iolink->maximum_latency;
1038 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
1039 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
1040 props->rec_transfer_size =
1041 iolink->recommended_transfer_size;
1042
1043 dev->io_link_count++;
1044 dev->node_props.io_links_count++;
1045 list_add_tail(&props->list, &dev->io_link_props);
1046 break;
1047 }
1048 }
1049
1050 /* CPU topology is created before GPUs are detected, so CPU->GPU
1051 * links are not built at that time. If a PCIe type is discovered, it
1052 * means a GPU is detected and we are adding GPU->CPU to the topology.
1053 * At this time, also add the corresponded CPU->GPU link if GPU
1054 * is large bar.
1055 * For xGMI, we only added the link with one direction in the crat
1056 * table, add corresponded reversed direction link now.
1057 */
1058 if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
1059 to_dev = kfd_topology_device_by_proximity_domain(id_to);
1060 if (!to_dev)
1061 return -ENODEV;
1062 /* same everything but the other direction */
1063 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
1064 if (!props2)
1065 return -ENOMEM;
1066
1067 props2->node_from = id_to;
1068 props2->node_to = id_from;
1069 props2->kobj = NULL;
1070 to_dev->io_link_count++;
1071 to_dev->node_props.io_links_count++;
1072 list_add_tail(&props2->list, &to_dev->io_link_props);
1073 }
1074
1075 return 0;
1076}
1077
1078/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
1079 * present in the device_list
1080 * @sub_type_hdr - subtype section of crat_image
1081 * @device_list - list of topology devices present in this crat_image
1082 */
1083static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
1084 struct list_head *device_list)
1085{
1086 struct crat_subtype_computeunit *cu;
1087 struct crat_subtype_memory *mem;
1088 struct crat_subtype_cache *cache;
1089 struct crat_subtype_iolink *iolink;
1090 int ret = 0;
1091
1092 switch (sub_type_hdr->type) {
1093 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
1094 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1095 ret = kfd_parse_subtype_cu(cu, device_list);
1096 break;
1097 case CRAT_SUBTYPE_MEMORY_AFFINITY:
1098 mem = (struct crat_subtype_memory *)sub_type_hdr;
1099 ret = kfd_parse_subtype_mem(mem, device_list);
1100 break;
1101 case CRAT_SUBTYPE_CACHE_AFFINITY:
1102 cache = (struct crat_subtype_cache *)sub_type_hdr;
1103 ret = kfd_parse_subtype_cache(cache, device_list);
1104 break;
1105 case CRAT_SUBTYPE_TLB_AFFINITY:
1106 /*
1107 * For now, nothing to do here
1108 */
1109 pr_debug("Found TLB entry in CRAT table (not processing)\n");
1110 break;
1111 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
1112 /*
1113 * For now, nothing to do here
1114 */
1115 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
1116 break;
1117 case CRAT_SUBTYPE_IOLINK_AFFINITY:
1118 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
1119 ret = kfd_parse_subtype_iolink(iolink, device_list);
1120 break;
1121 default:
1122 pr_warn("Unknown subtype %d in CRAT\n",
1123 sub_type_hdr->type);
1124 }
1125
1126 return ret;
1127}
1128
1129/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
1130 * create a kfd_topology_device and add in to device_list. Also parse
1131 * CRAT subtypes and attach it to appropriate kfd_topology_device
1132 * @crat_image - input image containing CRAT
1133 * @device_list - [OUT] list of kfd_topology_device generated after
1134 * parsing crat_image
1135 * @proximity_domain - Proximity domain of the first device in the table
1136 *
1137 * Return - 0 if successful else -ve value
1138 */
1139int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
1140 uint32_t proximity_domain)
1141{
1142 struct kfd_topology_device *top_dev = NULL;
1143 struct crat_subtype_generic *sub_type_hdr;
1144 uint16_t node_id;
1145 int ret = 0;
1146 struct crat_header *crat_table = (struct crat_header *)crat_image;
1147 uint16_t num_nodes;
1148 uint32_t image_len;
1149
1150 if (!crat_image)
1151 return -EINVAL;
1152
1153 if (!list_empty(device_list)) {
1154 pr_warn("Error device list should be empty\n");
1155 return -EINVAL;
1156 }
1157
1158 num_nodes = crat_table->num_domains;
1159 image_len = crat_table->length;
1160
1161 pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
1162
1163 for (node_id = 0; node_id < num_nodes; node_id++) {
1164 top_dev = kfd_create_topology_device(device_list);
1165 if (!top_dev)
1166 break;
1167 top_dev->proximity_domain = proximity_domain++;
1168 }
1169
1170 if (!top_dev) {
1171 ret = -ENOMEM;
1172 goto err;
1173 }
1174
1175 memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
1176 memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
1177 CRAT_OEMTABLEID_LENGTH);
1178 top_dev->oem_revision = crat_table->oem_revision;
1179
1180 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1181 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
1182 ((char *)crat_image) + image_len) {
1183 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
1184 ret = kfd_parse_subtype(sub_type_hdr, device_list);
1185 if (ret)
1186 break;
1187 }
1188
1189 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1190 sub_type_hdr->length);
1191 }
1192
1193err:
1194 if (ret)
1195 kfd_release_topology_device_list(device_list);
1196
1197 return ret;
1198}
1199
1200/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1201static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
1202 struct kfd_gpu_cache_info *pcache_info,
1203 struct kfd_cu_info *cu_info,
1204 int mem_available,
1205 int cu_bitmask,
1206 int cache_type, unsigned int cu_processor_id,
1207 int cu_block)
1208{
1209 unsigned int cu_sibling_map_mask;
1210 int first_active_cu;
1211
1212 /* First check if enough memory is available */
1213 if (sizeof(struct crat_subtype_cache) > mem_available)
1214 return -ENOMEM;
1215
1216 cu_sibling_map_mask = cu_bitmask;
1217 cu_sibling_map_mask >>= cu_block;
1218 cu_sibling_map_mask &=
1219 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1220 first_active_cu = ffs(cu_sibling_map_mask);
1221
1222 /* CU could be inactive. In case of shared cache find the first active
1223 * CU. and incase of non-shared cache check if the CU is inactive. If
1224 * inactive active skip it
1225 */
1226 if (first_active_cu) {
1227 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1228 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1229 pcache->length = sizeof(struct crat_subtype_cache);
1230 pcache->flags = pcache_info[cache_type].flags;
1231 pcache->processor_id_low = cu_processor_id
1232 + (first_active_cu - 1);
1233 pcache->cache_level = pcache_info[cache_type].cache_level;
1234 pcache->cache_size = pcache_info[cache_type].cache_size;
1235
1236 /* Sibling map is w.r.t processor_id_low, so shift out
1237 * inactive CU
1238 */
1239 cu_sibling_map_mask =
1240 cu_sibling_map_mask >> (first_active_cu - 1);
1241
1242 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1243 pcache->sibling_map[1] =
1244 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1245 pcache->sibling_map[2] =
1246 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1247 pcache->sibling_map[3] =
1248 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1249 return 0;
1250 }
1251 return 1;
1252}
1253
1254/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1255static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
1256 struct kfd_gpu_cache_info *pcache_info,
1257 struct kfd_cu_info *cu_info,
1258 int mem_available,
1259 int cache_type, unsigned int cu_processor_id)
1260{
1261 unsigned int cu_sibling_map_mask;
1262 int first_active_cu;
1263 int i, j, k;
1264
1265 /* First check if enough memory is available */
1266 if (sizeof(struct crat_subtype_cache) > mem_available)
1267 return -ENOMEM;
1268
1269 cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1270 cu_sibling_map_mask &=
1271 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1272 first_active_cu = ffs(cu_sibling_map_mask);
1273
1274 /* CU could be inactive. In case of shared cache find the first active
1275 * CU. and incase of non-shared cache check if the CU is inactive. If
1276 * inactive active skip it
1277 */
1278 if (first_active_cu) {
1279 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1280 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1281 pcache->length = sizeof(struct crat_subtype_cache);
1282 pcache->flags = pcache_info[cache_type].flags;
1283 pcache->processor_id_low = cu_processor_id
1284 + (first_active_cu - 1);
1285 pcache->cache_level = pcache_info[cache_type].cache_level;
1286 pcache->cache_size = pcache_info[cache_type].cache_size;
1287
1288 /* Sibling map is w.r.t processor_id_low, so shift out
1289 * inactive CU
1290 */
1291 cu_sibling_map_mask =
1292 cu_sibling_map_mask >> (first_active_cu - 1);
1293 k = 0;
1294 for (i = 0; i < cu_info->num_shader_engines; i++) {
1295 for (j = 0; j < cu_info->num_shader_arrays_per_engine;
1296 j++) {
1297 pcache->sibling_map[k] =
1298 (uint8_t)(cu_sibling_map_mask & 0xFF);
1299 pcache->sibling_map[k+1] =
1300 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1301 pcache->sibling_map[k+2] =
1302 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1303 pcache->sibling_map[k+3] =
1304 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1305 k += 4;
1306 cu_sibling_map_mask =
1307 cu_info->cu_bitmap[i % 4][j + i / 4];
1308 cu_sibling_map_mask &= (
1309 (1 << pcache_info[cache_type].num_cu_shared)
1310 - 1);
1311 }
1312 }
1313 return 0;
1314 }
1315 return 1;
1316}
1317
1318/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
1319 * tables
1320 *
1321 * @kdev - [IN] GPU device
1322 * @gpu_processor_id - [IN] GPU processor ID to which these caches
1323 * associate
1324 * @available_size - [IN] Amount of memory available in pcache
1325 * @cu_info - [IN] Compute Unit info obtained from KGD
1326 * @pcache - [OUT] memory into which cache data is to be filled in.
1327 * @size_filled - [OUT] amount of data used up in pcache.
1328 * @num_of_entries - [OUT] number of caches added
1329 */
1330static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
1331 int gpu_processor_id,
1332 int available_size,
1333 struct kfd_cu_info *cu_info,
1334 struct crat_subtype_cache *pcache,
1335 int *size_filled,
1336 int *num_of_entries)
1337{
1338 struct kfd_gpu_cache_info *pcache_info;
1339 int num_of_cache_types = 0;
1340 int i, j, k;
1341 int ct = 0;
1342 int mem_available = available_size;
1343 unsigned int cu_processor_id;
1344 int ret;
1345 unsigned int num_cu_shared;
1346
1347 switch (kdev->adev->asic_type) {
1348 case CHIP_KAVERI:
1349 pcache_info = kaveri_cache_info;
1350 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
1351 break;
1352 case CHIP_HAWAII:
1353 pcache_info = hawaii_cache_info;
1354 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
1355 break;
1356 case CHIP_CARRIZO:
1357 pcache_info = carrizo_cache_info;
1358 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
1359 break;
1360 case CHIP_TONGA:
1361 pcache_info = tonga_cache_info;
1362 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
1363 break;
1364 case CHIP_FIJI:
1365 pcache_info = fiji_cache_info;
1366 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
1367 break;
1368 case CHIP_POLARIS10:
1369 pcache_info = polaris10_cache_info;
1370 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
1371 break;
1372 case CHIP_POLARIS11:
1373 pcache_info = polaris11_cache_info;
1374 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
1375 break;
1376 case CHIP_POLARIS12:
1377 pcache_info = polaris12_cache_info;
1378 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
1379 break;
1380 case CHIP_VEGAM:
1381 pcache_info = vegam_cache_info;
1382 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
1383 break;
1384 default:
1385 switch (KFD_GC_VERSION(kdev)) {
1386 case IP_VERSION(9, 0, 1):
1387 pcache_info = vega10_cache_info;
1388 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
1389 break;
1390 case IP_VERSION(9, 2, 1):
1391 pcache_info = vega12_cache_info;
1392 num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
1393 break;
1394 case IP_VERSION(9, 4, 0):
1395 case IP_VERSION(9, 4, 1):
1396 pcache_info = vega20_cache_info;
1397 num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
1398 break;
1399 case IP_VERSION(9, 4, 2):
1400 pcache_info = aldebaran_cache_info;
1401 num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
1402 break;
1403 case IP_VERSION(9, 1, 0):
1404 case IP_VERSION(9, 2, 2):
1405 pcache_info = raven_cache_info;
1406 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
1407 break;
1408 case IP_VERSION(9, 3, 0):
1409 pcache_info = renoir_cache_info;
1410 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
1411 break;
1412 case IP_VERSION(10, 1, 10):
1413 case IP_VERSION(10, 1, 2):
1414 case IP_VERSION(10, 1, 3):
1415 case IP_VERSION(10, 1, 4):
1416 pcache_info = navi10_cache_info;
1417 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
1418 break;
1419 case IP_VERSION(10, 1, 1):
1420 pcache_info = navi14_cache_info;
1421 num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
1422 break;
1423 case IP_VERSION(10, 3, 0):
1424 pcache_info = sienna_cichlid_cache_info;
1425 num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
1426 break;
1427 case IP_VERSION(10, 3, 2):
1428 pcache_info = navy_flounder_cache_info;
1429 num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
1430 break;
1431 case IP_VERSION(10, 3, 4):
1432 pcache_info = dimgrey_cavefish_cache_info;
1433 num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
1434 break;
1435 case IP_VERSION(10, 3, 1):
1436 pcache_info = vangogh_cache_info;
1437 num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
1438 break;
1439 case IP_VERSION(10, 3, 5):
1440 pcache_info = beige_goby_cache_info;
1441 num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
1442 break;
1443 case IP_VERSION(10, 3, 3):
1444 pcache_info = yellow_carp_cache_info;
1445 num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
1446 break;
1447 default:
1448 return -EINVAL;
1449 }
1450 }
1451
1452 *size_filled = 0;
1453 *num_of_entries = 0;
1454
1455 /* For each type of cache listed in the kfd_gpu_cache_info table,
1456 * go through all available Compute Units.
1457 * The [i,j,k] loop will
1458 * if kfd_gpu_cache_info.num_cu_shared = 1
1459 * will parse through all available CU
1460 * If (kfd_gpu_cache_info.num_cu_shared != 1)
1461 * then it will consider only one CU from
1462 * the shared unit
1463 */
1464
1465 for (ct = 0; ct < num_of_cache_types; ct++) {
1466 cu_processor_id = gpu_processor_id;
1467 if (pcache_info[ct].cache_level == 1) {
1468 for (i = 0; i < cu_info->num_shader_engines; i++) {
1469 for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1470 for (k = 0; k < cu_info->num_cu_per_sh;
1471 k += pcache_info[ct].num_cu_shared) {
1472 ret = fill_in_l1_pcache(pcache,
1473 pcache_info,
1474 cu_info,
1475 mem_available,
1476 cu_info->cu_bitmap[i % 4][j + i / 4],
1477 ct,
1478 cu_processor_id,
1479 k);
1480
1481 if (ret < 0)
1482 break;
1483
1484 if (!ret) {
1485 pcache++;
1486 (*num_of_entries)++;
1487 mem_available -= sizeof(*pcache);
1488 (*size_filled) += sizeof(*pcache);
1489 }
1490
1491 /* Move to next CU block */
1492 num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1493 cu_info->num_cu_per_sh) ?
1494 pcache_info[ct].num_cu_shared :
1495 (cu_info->num_cu_per_sh - k);
1496 cu_processor_id += num_cu_shared;
1497 }
1498 }
1499 }
1500 } else {
1501 ret = fill_in_l2_l3_pcache(pcache,
1502 pcache_info,
1503 cu_info,
1504 mem_available,
1505 ct,
1506 cu_processor_id);
1507
1508 if (ret < 0)
1509 break;
1510
1511 if (!ret) {
1512 pcache++;
1513 (*num_of_entries)++;
1514 mem_available -= sizeof(*pcache);
1515 (*size_filled) += sizeof(*pcache);
1516 }
1517 }
1518 }
1519
1520 pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
1521
1522 return 0;
1523}
1524
1525static bool kfd_ignore_crat(void)
1526{
1527 bool ret;
1528
1529 if (ignore_crat)
1530 return true;
1531
1532#ifndef KFD_SUPPORT_IOMMU_V2
1533 ret = true;
1534#else
1535 ret = false;
1536#endif
1537
1538 return ret;
1539}
1540
1541/*
1542 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
1543 * copies CRAT from ACPI (if available).
1544 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1545 *
1546 * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
1547 * crat_image will be NULL
1548 * @size: [OUT] size of crat_image
1549 *
1550 * Return 0 if successful else return error code
1551 */
1552int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
1553{
1554 struct acpi_table_header *crat_table;
1555 acpi_status status;
1556 void *pcrat_image;
1557 int rc = 0;
1558
1559 if (!crat_image)
1560 return -EINVAL;
1561
1562 *crat_image = NULL;
1563
1564 if (kfd_ignore_crat()) {
1565 pr_info("CRAT table disabled by module option\n");
1566 return -ENODATA;
1567 }
1568
1569 /* Fetch the CRAT table from ACPI */
1570 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
1571 if (status == AE_NOT_FOUND) {
1572 pr_info("CRAT table not found\n");
1573 return -ENODATA;
1574 } else if (ACPI_FAILURE(status)) {
1575 const char *err = acpi_format_exception(status);
1576
1577 pr_err("CRAT table error: %s\n", err);
1578 return -EINVAL;
1579 }
1580
1581 pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
1582 if (!pcrat_image) {
1583 rc = -ENOMEM;
1584 goto out;
1585 }
1586
1587 memcpy(pcrat_image, crat_table, crat_table->length);
1588 *crat_image = pcrat_image;
1589 *size = crat_table->length;
1590out:
1591 acpi_put_table(crat_table);
1592 return rc;
1593}
1594
1595/* Memory required to create Virtual CRAT.
1596 * Since there is no easy way to predict the amount of memory required, the
1597 * following amount is allocated for GPU Virtual CRAT. This is
1598 * expected to cover all known conditions. But to be safe additional check
1599 * is put in the code to ensure we don't overwrite.
1600 */
1601#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
1602
1603/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
1604 *
1605 * @numa_node_id: CPU NUMA node id
1606 * @avail_size: Available size in the memory
1607 * @sub_type_hdr: Memory into which compute info will be filled in
1608 *
1609 * Return 0 if successful else return -ve value
1610 */
1611static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
1612 int proximity_domain,
1613 struct crat_subtype_computeunit *sub_type_hdr)
1614{
1615 const struct cpumask *cpumask;
1616
1617 *avail_size -= sizeof(struct crat_subtype_computeunit);
1618 if (*avail_size < 0)
1619 return -ENOMEM;
1620
1621 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1622
1623 /* Fill in subtype header data */
1624 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1625 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1626 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1627
1628 cpumask = cpumask_of_node(numa_node_id);
1629
1630 /* Fill in CU data */
1631 sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
1632 sub_type_hdr->proximity_domain = proximity_domain;
1633 sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
1634 if (sub_type_hdr->processor_id_low == -1)
1635 return -EINVAL;
1636
1637 sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
1638
1639 return 0;
1640}
1641
1642/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
1643 *
1644 * @numa_node_id: CPU NUMA node id
1645 * @avail_size: Available size in the memory
1646 * @sub_type_hdr: Memory into which compute info will be filled in
1647 *
1648 * Return 0 if successful else return -ve value
1649 */
1650static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
1651 int proximity_domain,
1652 struct crat_subtype_memory *sub_type_hdr)
1653{
1654 uint64_t mem_in_bytes = 0;
1655 pg_data_t *pgdat;
1656 int zone_type;
1657
1658 *avail_size -= sizeof(struct crat_subtype_memory);
1659 if (*avail_size < 0)
1660 return -ENOMEM;
1661
1662 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1663
1664 /* Fill in subtype header data */
1665 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1666 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1667 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1668
1669 /* Fill in Memory Subunit data */
1670
1671 /* Unlike si_meminfo, si_meminfo_node is not exported. So
1672 * the following lines are duplicated from si_meminfo_node
1673 * function
1674 */
1675 pgdat = NODE_DATA(numa_node_id);
1676 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1677 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
1678 mem_in_bytes <<= PAGE_SHIFT;
1679
1680 sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
1681 sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
1682 sub_type_hdr->proximity_domain = proximity_domain;
1683
1684 return 0;
1685}
1686
1687#ifdef CONFIG_X86_64
1688static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
1689 uint32_t *num_entries,
1690 struct crat_subtype_iolink *sub_type_hdr)
1691{
1692 int nid;
1693 struct cpuinfo_x86 *c = &cpu_data(0);
1694 uint8_t link_type;
1695
1696 if (c->x86_vendor == X86_VENDOR_AMD)
1697 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
1698 else
1699 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
1700
1701 *num_entries = 0;
1702
1703 /* Create IO links from this node to other CPU nodes */
1704 for_each_online_node(nid) {
1705 if (nid == numa_node_id) /* node itself */
1706 continue;
1707
1708 *avail_size -= sizeof(struct crat_subtype_iolink);
1709 if (*avail_size < 0)
1710 return -ENOMEM;
1711
1712 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1713
1714 /* Fill in subtype header data */
1715 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1716 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1717 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1718
1719 /* Fill in IO link data */
1720 sub_type_hdr->proximity_domain_from = numa_node_id;
1721 sub_type_hdr->proximity_domain_to = nid;
1722 sub_type_hdr->io_interface_type = link_type;
1723
1724 (*num_entries)++;
1725 sub_type_hdr++;
1726 }
1727
1728 return 0;
1729}
1730#endif
1731
1732/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
1733 *
1734 * @pcrat_image: Fill in VCRAT for CPU
1735 * @size: [IN] allocated size of crat_image.
1736 * [OUT] actual size of data filled in crat_image
1737 */
1738static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
1739{
1740 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1741 struct acpi_table_header *acpi_table;
1742 acpi_status status;
1743 struct crat_subtype_generic *sub_type_hdr;
1744 int avail_size = *size;
1745 int numa_node_id;
1746#ifdef CONFIG_X86_64
1747 uint32_t entries = 0;
1748#endif
1749 int ret = 0;
1750
1751 if (!pcrat_image)
1752 return -EINVAL;
1753
1754 /* Fill in CRAT Header.
1755 * Modify length and total_entries as subunits are added.
1756 */
1757 avail_size -= sizeof(struct crat_header);
1758 if (avail_size < 0)
1759 return -ENOMEM;
1760
1761 memset(crat_table, 0, sizeof(struct crat_header));
1762 memcpy(&crat_table->signature, CRAT_SIGNATURE,
1763 sizeof(crat_table->signature));
1764 crat_table->length = sizeof(struct crat_header);
1765
1766 status = acpi_get_table("DSDT", 0, &acpi_table);
1767 if (status != AE_OK)
1768 pr_warn("DSDT table not found for OEM information\n");
1769 else {
1770 crat_table->oem_revision = acpi_table->revision;
1771 memcpy(crat_table->oem_id, acpi_table->oem_id,
1772 CRAT_OEMID_LENGTH);
1773 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1774 CRAT_OEMTABLEID_LENGTH);
1775 acpi_put_table(acpi_table);
1776 }
1777 crat_table->total_entries = 0;
1778 crat_table->num_domains = 0;
1779
1780 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1781
1782 for_each_online_node(numa_node_id) {
1783 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1784 continue;
1785
1786 /* Fill in Subtype: Compute Unit */
1787 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1788 crat_table->num_domains,
1789 (struct crat_subtype_computeunit *)sub_type_hdr);
1790 if (ret < 0)
1791 return ret;
1792 crat_table->length += sub_type_hdr->length;
1793 crat_table->total_entries++;
1794
1795 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1796 sub_type_hdr->length);
1797
1798 /* Fill in Subtype: Memory */
1799 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1800 crat_table->num_domains,
1801 (struct crat_subtype_memory *)sub_type_hdr);
1802 if (ret < 0)
1803 return ret;
1804 crat_table->length += sub_type_hdr->length;
1805 crat_table->total_entries++;
1806
1807 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1808 sub_type_hdr->length);
1809
1810 /* Fill in Subtype: IO Link */
1811#ifdef CONFIG_X86_64
1812 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1813 &entries,
1814 (struct crat_subtype_iolink *)sub_type_hdr);
1815 if (ret < 0)
1816 return ret;
1817
1818 if (entries) {
1819 crat_table->length += (sub_type_hdr->length * entries);
1820 crat_table->total_entries += entries;
1821
1822 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1823 sub_type_hdr->length * entries);
1824 }
1825#else
1826 pr_info("IO link not available for non x86 platforms\n");
1827#endif
1828
1829 crat_table->num_domains++;
1830 }
1831
1832 /* TODO: Add cache Subtype for CPU.
1833 * Currently, CPU cache information is available in function
1834 * detect_cache_attributes(cpu) defined in the file
1835 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1836 * exported and to get the same information the code needs to be
1837 * duplicated.
1838 */
1839
1840 *size = crat_table->length;
1841 pr_info("Virtual CRAT table created for CPU\n");
1842
1843 return 0;
1844}
1845
1846static int kfd_fill_gpu_memory_affinity(int *avail_size,
1847 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1848 struct crat_subtype_memory *sub_type_hdr,
1849 uint32_t proximity_domain,
1850 const struct kfd_local_mem_info *local_mem_info)
1851{
1852 *avail_size -= sizeof(struct crat_subtype_memory);
1853 if (*avail_size < 0)
1854 return -ENOMEM;
1855
1856 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1857 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1858 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1859 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1860
1861 sub_type_hdr->proximity_domain = proximity_domain;
1862
1863 pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1864 type, size);
1865
1866 sub_type_hdr->length_low = lower_32_bits(size);
1867 sub_type_hdr->length_high = upper_32_bits(size);
1868
1869 sub_type_hdr->width = local_mem_info->vram_width;
1870 sub_type_hdr->visibility_type = type;
1871
1872 return 0;
1873}
1874
1875#ifdef CONFIG_ACPI_NUMA
1876static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
1877{
1878 struct acpi_table_header *table_header = NULL;
1879 struct acpi_subtable_header *sub_header = NULL;
1880 unsigned long table_end, subtable_len;
1881 u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 |
1882 pci_dev_id(kdev->pdev);
1883 u32 bdf;
1884 acpi_status status;
1885 struct acpi_srat_cpu_affinity *cpu;
1886 struct acpi_srat_generic_affinity *gpu;
1887 int pxm = 0, max_pxm = 0;
1888 int numa_node = NUMA_NO_NODE;
1889 bool found = false;
1890
1891 /* Fetch the SRAT table from ACPI */
1892 status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
1893 if (status == AE_NOT_FOUND) {
1894 pr_warn("SRAT table not found\n");
1895 return;
1896 } else if (ACPI_FAILURE(status)) {
1897 const char *err = acpi_format_exception(status);
1898 pr_err("SRAT table error: %s\n", err);
1899 return;
1900 }
1901
1902 table_end = (unsigned long)table_header + table_header->length;
1903
1904 /* Parse all entries looking for a match. */
1905 sub_header = (struct acpi_subtable_header *)
1906 ((unsigned long)table_header +
1907 sizeof(struct acpi_table_srat));
1908 subtable_len = sub_header->length;
1909
1910 while (((unsigned long)sub_header) + subtable_len < table_end) {
1911 /*
1912 * If length is 0, break from this loop to avoid
1913 * infinite loop.
1914 */
1915 if (subtable_len == 0) {
1916 pr_err("SRAT invalid zero length\n");
1917 break;
1918 }
1919
1920 switch (sub_header->type) {
1921 case ACPI_SRAT_TYPE_CPU_AFFINITY:
1922 cpu = (struct acpi_srat_cpu_affinity *)sub_header;
1923 pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
1924 cpu->proximity_domain_lo;
1925 if (pxm > max_pxm)
1926 max_pxm = pxm;
1927 break;
1928 case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
1929 gpu = (struct acpi_srat_generic_affinity *)sub_header;
1930 bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
1931 *((u16 *)(&gpu->device_handle[2]));
1932 if (bdf == pci_id) {
1933 found = true;
1934 numa_node = pxm_to_node(gpu->proximity_domain);
1935 }
1936 break;
1937 default:
1938 break;
1939 }
1940
1941 if (found)
1942 break;
1943
1944 sub_header = (struct acpi_subtable_header *)
1945 ((unsigned long)sub_header + subtable_len);
1946 subtable_len = sub_header->length;
1947 }
1948
1949 acpi_put_table(table_header);
1950
1951 /* Workaround bad cpu-gpu binding case */
1952 if (found && (numa_node < 0 ||
1953 numa_node > pxm_to_node(max_pxm)))
1954 numa_node = 0;
1955
1956 if (numa_node != NUMA_NO_NODE)
1957 set_dev_node(&kdev->pdev->dev, numa_node);
1958}
1959#endif
1960
1961/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1962 * to its NUMA node
1963 * @avail_size: Available size in the memory
1964 * @kdev - [IN] GPU device
1965 * @sub_type_hdr: Memory into which io link info will be filled in
1966 * @proximity_domain - proximity domain of the GPU node
1967 *
1968 * Return 0 if successful else return -ve value
1969 */
1970static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1971 struct kfd_dev *kdev,
1972 struct crat_subtype_iolink *sub_type_hdr,
1973 uint32_t proximity_domain)
1974{
1975 *avail_size -= sizeof(struct crat_subtype_iolink);
1976 if (*avail_size < 0)
1977 return -ENOMEM;
1978
1979 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1980
1981 /* Fill in subtype header data */
1982 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1983 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1984 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1985 if (kfd_dev_is_large_bar(kdev))
1986 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1987
1988 /* Fill in IOLINK subtype.
1989 * TODO: Fill-in other fields of iolink subtype
1990 */
1991 if (kdev->adev->gmc.xgmi.connected_to_cpu) {
1992 /*
1993 * with host gpu xgmi link, host can access gpu memory whether
1994 * or not pcie bar type is large, so always create bidirectional
1995 * io link.
1996 */
1997 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1998 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1999 sub_type_hdr->num_hops_xgmi = 1;
2000 if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) {
2001 sub_type_hdr->minimum_bandwidth_mbs =
2002 amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
2003 kdev->adev, NULL, true);
2004 sub_type_hdr->maximum_bandwidth_mbs =
2005 sub_type_hdr->minimum_bandwidth_mbs;
2006 }
2007 } else {
2008 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
2009 sub_type_hdr->minimum_bandwidth_mbs =
2010 amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true);
2011 sub_type_hdr->maximum_bandwidth_mbs =
2012 amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false);
2013 }
2014
2015 sub_type_hdr->proximity_domain_from = proximity_domain;
2016
2017#ifdef CONFIG_ACPI_NUMA
2018 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2019 kfd_find_numa_node_in_srat(kdev);
2020#endif
2021#ifdef CONFIG_NUMA
2022 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2023 sub_type_hdr->proximity_domain_to = 0;
2024 else
2025 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
2026#else
2027 sub_type_hdr->proximity_domain_to = 0;
2028#endif
2029 return 0;
2030}
2031
2032static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
2033 struct kfd_dev *kdev,
2034 struct kfd_dev *peer_kdev,
2035 struct crat_subtype_iolink *sub_type_hdr,
2036 uint32_t proximity_domain_from,
2037 uint32_t proximity_domain_to)
2038{
2039 *avail_size -= sizeof(struct crat_subtype_iolink);
2040 if (*avail_size < 0)
2041 return -ENOMEM;
2042
2043 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2044
2045 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2046 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2047 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
2048 CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2049
2050 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2051 sub_type_hdr->proximity_domain_from = proximity_domain_from;
2052 sub_type_hdr->proximity_domain_to = proximity_domain_to;
2053 sub_type_hdr->num_hops_xgmi =
2054 amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev);
2055 sub_type_hdr->maximum_bandwidth_mbs =
2056 amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false);
2057 sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
2058 amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0;
2059
2060 return 0;
2061}
2062
2063/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
2064 *
2065 * @pcrat_image: Fill in VCRAT for GPU
2066 * @size: [IN] allocated size of crat_image.
2067 * [OUT] actual size of data filled in crat_image
2068 */
2069static int kfd_create_vcrat_image_gpu(void *pcrat_image,
2070 size_t *size, struct kfd_dev *kdev,
2071 uint32_t proximity_domain)
2072{
2073 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
2074 struct crat_subtype_generic *sub_type_hdr;
2075 struct kfd_local_mem_info local_mem_info;
2076 struct kfd_topology_device *peer_dev;
2077 struct crat_subtype_computeunit *cu;
2078 struct kfd_cu_info cu_info;
2079 int avail_size = *size;
2080 uint32_t total_num_of_cu;
2081 int num_of_cache_entries = 0;
2082 int cache_mem_filled = 0;
2083 uint32_t nid = 0;
2084 int ret = 0;
2085
2086 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
2087 return -EINVAL;
2088
2089 /* Fill the CRAT Header.
2090 * Modify length and total_entries as subunits are added.
2091 */
2092 avail_size -= sizeof(struct crat_header);
2093 if (avail_size < 0)
2094 return -ENOMEM;
2095
2096 memset(crat_table, 0, sizeof(struct crat_header));
2097
2098 memcpy(&crat_table->signature, CRAT_SIGNATURE,
2099 sizeof(crat_table->signature));
2100 /* Change length as we add more subtypes*/
2101 crat_table->length = sizeof(struct crat_header);
2102 crat_table->num_domains = 1;
2103 crat_table->total_entries = 0;
2104
2105 /* Fill in Subtype: Compute Unit
2106 * First fill in the sub type header and then sub type data
2107 */
2108 avail_size -= sizeof(struct crat_subtype_computeunit);
2109 if (avail_size < 0)
2110 return -ENOMEM;
2111
2112 sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
2113 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
2114
2115 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
2116 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
2117 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
2118
2119 /* Fill CU subtype data */
2120 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
2121 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
2122 cu->proximity_domain = proximity_domain;
2123
2124 amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
2125 cu->num_simd_per_cu = cu_info.simd_per_cu;
2126 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2127 cu->max_waves_simd = cu_info.max_waves_per_simd;
2128
2129 cu->wave_front_size = cu_info.wave_front_size;
2130 cu->array_count = cu_info.num_shader_arrays_per_engine *
2131 cu_info.num_shader_engines;
2132 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
2133 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
2134 cu->num_cu_per_array = cu_info.num_cu_per_sh;
2135 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
2136 cu->num_banks = cu_info.num_shader_engines;
2137 cu->lds_size_in_kb = cu_info.lds_size;
2138
2139 cu->hsa_capability = 0;
2140
2141 /* Check if this node supports IOMMU. During parsing this flag will
2142 * translate to HSA_CAP_ATS_PRESENT
2143 */
2144 if (!kfd_iommu_check_device(kdev))
2145 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
2146
2147 crat_table->length += sub_type_hdr->length;
2148 crat_table->total_entries++;
2149
2150 /* Fill in Subtype: Memory. Only on systems with large BAR (no
2151 * private FB), report memory as public. On other systems
2152 * report the total FB size (public+private) as a single
2153 * private heap.
2154 */
2155 amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info);
2156 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2157 sub_type_hdr->length);
2158
2159 if (debug_largebar)
2160 local_mem_info.local_mem_size_private = 0;
2161
2162 if (local_mem_info.local_mem_size_private == 0)
2163 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2164 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
2165 local_mem_info.local_mem_size_public,
2166 (struct crat_subtype_memory *)sub_type_hdr,
2167 proximity_domain,
2168 &local_mem_info);
2169 else
2170 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2171 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
2172 local_mem_info.local_mem_size_public +
2173 local_mem_info.local_mem_size_private,
2174 (struct crat_subtype_memory *)sub_type_hdr,
2175 proximity_domain,
2176 &local_mem_info);
2177 if (ret < 0)
2178 return ret;
2179
2180 crat_table->length += sizeof(struct crat_subtype_memory);
2181 crat_table->total_entries++;
2182
2183 /* TODO: Fill in cache information. This information is NOT readily
2184 * available in KGD
2185 */
2186 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2187 sub_type_hdr->length);
2188 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
2189 avail_size,
2190 &cu_info,
2191 (struct crat_subtype_cache *)sub_type_hdr,
2192 &cache_mem_filled,
2193 &num_of_cache_entries);
2194
2195 if (ret < 0)
2196 return ret;
2197
2198 crat_table->length += cache_mem_filled;
2199 crat_table->total_entries += num_of_cache_entries;
2200 avail_size -= cache_mem_filled;
2201
2202 /* Fill in Subtype: IO_LINKS
2203 * Only direct links are added here which is Link from GPU to
2204 * to its NUMA node. Indirect links are added by userspace.
2205 */
2206 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2207 cache_mem_filled);
2208 ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
2209 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
2210
2211 if (ret < 0)
2212 return ret;
2213
2214 crat_table->length += sub_type_hdr->length;
2215 crat_table->total_entries++;
2216
2217
2218 /* Fill in Subtype: IO_LINKS
2219 * Direct links from GPU to other GPUs through xGMI.
2220 * We will loop GPUs that already be processed (with lower value
2221 * of proximity_domain), add the link for the GPUs with same
2222 * hive id (from this GPU to other GPU) . The reversed iolink
2223 * (from other GPU to this GPU) will be added
2224 * in kfd_parse_subtype_iolink.
2225 */
2226 if (kdev->hive_id) {
2227 for (nid = 0; nid < proximity_domain; ++nid) {
2228 peer_dev = kfd_topology_device_by_proximity_domain(nid);
2229 if (!peer_dev->gpu)
2230 continue;
2231 if (peer_dev->gpu->hive_id != kdev->hive_id)
2232 continue;
2233 sub_type_hdr = (typeof(sub_type_hdr))(
2234 (char *)sub_type_hdr +
2235 sizeof(struct crat_subtype_iolink));
2236 ret = kfd_fill_gpu_xgmi_link_to_gpu(
2237 &avail_size, kdev, peer_dev->gpu,
2238 (struct crat_subtype_iolink *)sub_type_hdr,
2239 proximity_domain, nid);
2240 if (ret < 0)
2241 return ret;
2242 crat_table->length += sub_type_hdr->length;
2243 crat_table->total_entries++;
2244 }
2245 }
2246 *size = crat_table->length;
2247 pr_info("Virtual CRAT table created for GPU\n");
2248
2249 return ret;
2250}
2251
2252/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
2253 * creates a Virtual CRAT (VCRAT) image
2254 *
2255 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
2256 *
2257 * @crat_image: VCRAT image created because ACPI does not have a
2258 * CRAT for this device
2259 * @size: [OUT] size of virtual crat_image
2260 * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
2261 * COMPUTE_UNIT_GPU - Create VCRAT for GPU
2262 * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
2263 * -- this option is not currently implemented.
2264 * The assumption is that all AMD APUs will have CRAT
2265 * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
2266 *
2267 * Return 0 if successful else return -ve value
2268 */
2269int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
2270 int flags, struct kfd_dev *kdev,
2271 uint32_t proximity_domain)
2272{
2273 void *pcrat_image = NULL;
2274 int ret = 0, num_nodes;
2275 size_t dyn_size;
2276
2277 if (!crat_image)
2278 return -EINVAL;
2279
2280 *crat_image = NULL;
2281
2282 /* Allocate the CPU Virtual CRAT size based on the number of online
2283 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
2284 * This should cover all the current conditions. A check is put not
2285 * to overwrite beyond allocated size for GPUs
2286 */
2287 switch (flags) {
2288 case COMPUTE_UNIT_CPU:
2289 num_nodes = num_online_nodes();
2290 dyn_size = sizeof(struct crat_header) +
2291 num_nodes * (sizeof(struct crat_subtype_computeunit) +
2292 sizeof(struct crat_subtype_memory) +
2293 (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
2294 pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
2295 if (!pcrat_image)
2296 return -ENOMEM;
2297 *size = dyn_size;
2298 pr_debug("CRAT size is %ld", dyn_size);
2299 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
2300 break;
2301 case COMPUTE_UNIT_GPU:
2302 if (!kdev)
2303 return -EINVAL;
2304 pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
2305 if (!pcrat_image)
2306 return -ENOMEM;
2307 *size = VCRAT_SIZE_FOR_GPU;
2308 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
2309 proximity_domain);
2310 break;
2311 case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
2312 /* TODO: */
2313 ret = -EINVAL;
2314 pr_err("VCRAT not implemented for APU\n");
2315 break;
2316 default:
2317 ret = -EINVAL;
2318 }
2319
2320 if (!ret)
2321 *crat_image = pcrat_image;
2322 else
2323 kvfree(pcrat_image);
2324
2325 return ret;
2326}
2327
2328
2329/* kfd_destroy_crat_image
2330 *
2331 * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
2332 *
2333 */
2334void kfd_destroy_crat_image(void *crat_image)
2335{
2336 kvfree(crat_image);
2337}