Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.5-rc7 2111 lines 57 kB view raw
1/* 2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13#include <linux/list_sort.h> 14#include <linux/libnvdimm.h> 15#include <linux/module.h> 16#include <linux/mutex.h> 17#include <linux/ndctl.h> 18#include <linux/delay.h> 19#include <linux/list.h> 20#include <linux/acpi.h> 21#include <linux/sort.h> 22#include <linux/pmem.h> 23#include <linux/io.h> 24#include <asm/cacheflush.h> 25#include "nfit.h" 26 27/* 28 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 29 * irrelevant. 30 */ 31#include <linux/io-64-nonatomic-hi-lo.h> 32 33static bool force_enable_dimms; 34module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); 35MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); 36 37struct nfit_table_prev { 38 struct list_head spas; 39 struct list_head memdevs; 40 struct list_head dcrs; 41 struct list_head bdws; 42 struct list_head idts; 43 struct list_head flushes; 44}; 45 46static u8 nfit_uuid[NFIT_UUID_MAX][16]; 47 48const u8 *to_nfit_uuid(enum nfit_uuids id) 49{ 50 return nfit_uuid[id]; 51} 52EXPORT_SYMBOL(to_nfit_uuid); 53 54static struct acpi_nfit_desc *to_acpi_nfit_desc( 55 struct nvdimm_bus_descriptor *nd_desc) 56{ 57 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 58} 59 60static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 61{ 62 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 63 64 /* 65 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct 66 * acpi_device. 67 */ 68 if (!nd_desc->provider_name 69 || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) 70 return NULL; 71 72 return to_acpi_device(acpi_desc->dev); 73} 74 75static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, 76 struct nvdimm *nvdimm, unsigned int cmd, void *buf, 77 unsigned int buf_len) 78{ 79 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 80 const struct nd_cmd_desc *desc = NULL; 81 union acpi_object in_obj, in_buf, *out_obj; 82 struct device *dev = acpi_desc->dev; 83 const char *cmd_name, *dimm_name; 84 unsigned long dsm_mask; 85 acpi_handle handle; 86 const u8 *uuid; 87 u32 offset; 88 int rc, i; 89 90 if (nvdimm) { 91 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 92 struct acpi_device *adev = nfit_mem->adev; 93 94 if (!adev) 95 return -ENOTTY; 96 dimm_name = nvdimm_name(nvdimm); 97 cmd_name = nvdimm_cmd_name(cmd); 98 dsm_mask = nfit_mem->dsm_mask; 99 desc = nd_cmd_dimm_desc(cmd); 100 uuid = to_nfit_uuid(NFIT_DEV_DIMM); 101 handle = adev->handle; 102 } else { 103 struct acpi_device *adev = to_acpi_dev(acpi_desc); 104 105 cmd_name = nvdimm_bus_cmd_name(cmd); 106 dsm_mask = nd_desc->dsm_mask; 107 desc = nd_cmd_bus_desc(cmd); 108 uuid = to_nfit_uuid(NFIT_DEV_BUS); 109 handle = adev->handle; 110 dimm_name = "bus"; 111 } 112 113 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 114 return -ENOTTY; 115 116 if (!test_bit(cmd, &dsm_mask)) 117 return -ENOTTY; 118 119 in_obj.type = ACPI_TYPE_PACKAGE; 120 in_obj.package.count = 1; 121 in_obj.package.elements = &in_buf; 122 in_buf.type = ACPI_TYPE_BUFFER; 123 in_buf.buffer.pointer = buf; 124 in_buf.buffer.length = 0; 125 126 /* libnvdimm has already validated the input envelope */ 127 for (i = 0; i < desc->in_num; i++) 128 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, 129 i, buf); 130 131 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 132 dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__, 133 dimm_name, cmd_name, in_buf.buffer.length); 134 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 135 4, in_buf.buffer.pointer, min_t(u32, 128, 136 in_buf.buffer.length), true); 137 } 138 139 out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj); 140 if (!out_obj) { 141 dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, 142 cmd_name); 143 return -EINVAL; 144 } 145 146 if (out_obj->package.type != ACPI_TYPE_BUFFER) { 147 dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", 148 __func__, dimm_name, cmd_name, out_obj->type); 149 rc = -EINVAL; 150 goto out; 151 } 152 153 if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { 154 dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, 155 dimm_name, cmd_name, out_obj->buffer.length); 156 print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 157 4, out_obj->buffer.pointer, min_t(u32, 128, 158 out_obj->buffer.length), true); 159 } 160 161 for (i = 0, offset = 0; i < desc->out_num; i++) { 162 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, 163 (u32 *) out_obj->buffer.pointer); 164 165 if (offset + out_size > out_obj->buffer.length) { 166 dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", 167 __func__, dimm_name, cmd_name, i); 168 break; 169 } 170 171 if (in_buf.buffer.length + offset + out_size > buf_len) { 172 dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", 173 __func__, dimm_name, cmd_name, i); 174 rc = -ENXIO; 175 goto out; 176 } 177 memcpy(buf + in_buf.buffer.length + offset, 178 out_obj->buffer.pointer + offset, out_size); 179 offset += out_size; 180 } 181 if (offset + in_buf.buffer.length < buf_len) { 182 if (i >= 1) { 183 /* 184 * status valid, return the number of bytes left 185 * unfilled in the output buffer 186 */ 187 rc = buf_len - offset - in_buf.buffer.length; 188 } else { 189 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 190 __func__, dimm_name, cmd_name, buf_len, 191 offset); 192 rc = -ENXIO; 193 } 194 } else 195 rc = 0; 196 197 out: 198 ACPI_FREE(out_obj); 199 200 return rc; 201} 202 203static const char *spa_type_name(u16 type) 204{ 205 static const char *to_name[] = { 206 [NFIT_SPA_VOLATILE] = "volatile", 207 [NFIT_SPA_PM] = "pmem", 208 [NFIT_SPA_DCR] = "dimm-control-region", 209 [NFIT_SPA_BDW] = "block-data-window", 210 [NFIT_SPA_VDISK] = "volatile-disk", 211 [NFIT_SPA_VCD] = "volatile-cd", 212 [NFIT_SPA_PDISK] = "persistent-disk", 213 [NFIT_SPA_PCD] = "persistent-cd", 214 215 }; 216 217 if (type > NFIT_SPA_PCD) 218 return "unknown"; 219 220 return to_name[type]; 221} 222 223static int nfit_spa_type(struct acpi_nfit_system_address *spa) 224{ 225 int i; 226 227 for (i = 0; i < NFIT_UUID_MAX; i++) 228 if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) 229 return i; 230 return -1; 231} 232 233static bool add_spa(struct acpi_nfit_desc *acpi_desc, 234 struct nfit_table_prev *prev, 235 struct acpi_nfit_system_address *spa) 236{ 237 size_t length = min_t(size_t, sizeof(*spa), spa->header.length); 238 struct device *dev = acpi_desc->dev; 239 struct nfit_spa *nfit_spa; 240 241 list_for_each_entry(nfit_spa, &prev->spas, list) { 242 if (memcmp(nfit_spa->spa, spa, length) == 0) { 243 list_move_tail(&nfit_spa->list, &acpi_desc->spas); 244 return true; 245 } 246 } 247 248 nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL); 249 if (!nfit_spa) 250 return false; 251 INIT_LIST_HEAD(&nfit_spa->list); 252 nfit_spa->spa = spa; 253 list_add_tail(&nfit_spa->list, &acpi_desc->spas); 254 dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, 255 spa->range_index, 256 spa_type_name(nfit_spa_type(spa))); 257 return true; 258} 259 260static bool add_memdev(struct acpi_nfit_desc *acpi_desc, 261 struct nfit_table_prev *prev, 262 struct acpi_nfit_memory_map *memdev) 263{ 264 size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length); 265 struct device *dev = acpi_desc->dev; 266 struct nfit_memdev *nfit_memdev; 267 268 list_for_each_entry(nfit_memdev, &prev->memdevs, list) 269 if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { 270 list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); 271 return true; 272 } 273 274 nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL); 275 if (!nfit_memdev) 276 return false; 277 INIT_LIST_HEAD(&nfit_memdev->list); 278 nfit_memdev->memdev = memdev; 279 list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); 280 dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", 281 __func__, memdev->device_handle, memdev->range_index, 282 memdev->region_index); 283 return true; 284} 285 286static bool add_dcr(struct acpi_nfit_desc *acpi_desc, 287 struct nfit_table_prev *prev, 288 struct acpi_nfit_control_region *dcr) 289{ 290 size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length); 291 struct device *dev = acpi_desc->dev; 292 struct nfit_dcr *nfit_dcr; 293 294 list_for_each_entry(nfit_dcr, &prev->dcrs, list) 295 if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { 296 list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); 297 return true; 298 } 299 300 nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL); 301 if (!nfit_dcr) 302 return false; 303 INIT_LIST_HEAD(&nfit_dcr->list); 304 nfit_dcr->dcr = dcr; 305 list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); 306 dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, 307 dcr->region_index, dcr->windows); 308 return true; 309} 310 311static bool add_bdw(struct acpi_nfit_desc *acpi_desc, 312 struct nfit_table_prev *prev, 313 struct acpi_nfit_data_region *bdw) 314{ 315 size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length); 316 struct device *dev = acpi_desc->dev; 317 struct nfit_bdw *nfit_bdw; 318 319 list_for_each_entry(nfit_bdw, &prev->bdws, list) 320 if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { 321 list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); 322 return true; 323 } 324 325 nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL); 326 if (!nfit_bdw) 327 return false; 328 INIT_LIST_HEAD(&nfit_bdw->list); 329 nfit_bdw->bdw = bdw; 330 list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); 331 dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, 332 bdw->region_index, bdw->windows); 333 return true; 334} 335 336static bool add_idt(struct acpi_nfit_desc *acpi_desc, 337 struct nfit_table_prev *prev, 338 struct acpi_nfit_interleave *idt) 339{ 340 size_t length = min_t(size_t, sizeof(*idt), idt->header.length); 341 struct device *dev = acpi_desc->dev; 342 struct nfit_idt *nfit_idt; 343 344 list_for_each_entry(nfit_idt, &prev->idts, list) 345 if (memcmp(nfit_idt->idt, idt, length) == 0) { 346 list_move_tail(&nfit_idt->list, &acpi_desc->idts); 347 return true; 348 } 349 350 nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL); 351 if (!nfit_idt) 352 return false; 353 INIT_LIST_HEAD(&nfit_idt->list); 354 nfit_idt->idt = idt; 355 list_add_tail(&nfit_idt->list, &acpi_desc->idts); 356 dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, 357 idt->interleave_index, idt->line_count); 358 return true; 359} 360 361static bool add_flush(struct acpi_nfit_desc *acpi_desc, 362 struct nfit_table_prev *prev, 363 struct acpi_nfit_flush_address *flush) 364{ 365 size_t length = min_t(size_t, sizeof(*flush), flush->header.length); 366 struct device *dev = acpi_desc->dev; 367 struct nfit_flush *nfit_flush; 368 369 list_for_each_entry(nfit_flush, &prev->flushes, list) 370 if (memcmp(nfit_flush->flush, flush, length) == 0) { 371 list_move_tail(&nfit_flush->list, &acpi_desc->flushes); 372 return true; 373 } 374 375 nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL); 376 if (!nfit_flush) 377 return false; 378 INIT_LIST_HEAD(&nfit_flush->list); 379 nfit_flush->flush = flush; 380 list_add_tail(&nfit_flush->list, &acpi_desc->flushes); 381 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, 382 flush->device_handle, flush->hint_count); 383 return true; 384} 385 386static void *add_table(struct acpi_nfit_desc *acpi_desc, 387 struct nfit_table_prev *prev, void *table, const void *end) 388{ 389 struct device *dev = acpi_desc->dev; 390 struct acpi_nfit_header *hdr; 391 void *err = ERR_PTR(-ENOMEM); 392 393 if (table >= end) 394 return NULL; 395 396 hdr = table; 397 if (!hdr->length) { 398 dev_warn(dev, "found a zero length table '%d' parsing nfit\n", 399 hdr->type); 400 return NULL; 401 } 402 403 switch (hdr->type) { 404 case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: 405 if (!add_spa(acpi_desc, prev, table)) 406 return err; 407 break; 408 case ACPI_NFIT_TYPE_MEMORY_MAP: 409 if (!add_memdev(acpi_desc, prev, table)) 410 return err; 411 break; 412 case ACPI_NFIT_TYPE_CONTROL_REGION: 413 if (!add_dcr(acpi_desc, prev, table)) 414 return err; 415 break; 416 case ACPI_NFIT_TYPE_DATA_REGION: 417 if (!add_bdw(acpi_desc, prev, table)) 418 return err; 419 break; 420 case ACPI_NFIT_TYPE_INTERLEAVE: 421 if (!add_idt(acpi_desc, prev, table)) 422 return err; 423 break; 424 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 425 if (!add_flush(acpi_desc, prev, table)) 426 return err; 427 break; 428 case ACPI_NFIT_TYPE_SMBIOS: 429 dev_dbg(dev, "%s: smbios\n", __func__); 430 break; 431 default: 432 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); 433 break; 434 } 435 436 return table + hdr->length; 437} 438 439static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, 440 struct nfit_mem *nfit_mem) 441{ 442 u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 443 u16 dcr = nfit_mem->dcr->region_index; 444 struct nfit_spa *nfit_spa; 445 446 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 447 u16 range_index = nfit_spa->spa->range_index; 448 int type = nfit_spa_type(nfit_spa->spa); 449 struct nfit_memdev *nfit_memdev; 450 451 if (type != NFIT_SPA_BDW) 452 continue; 453 454 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 455 if (nfit_memdev->memdev->range_index != range_index) 456 continue; 457 if (nfit_memdev->memdev->device_handle != device_handle) 458 continue; 459 if (nfit_memdev->memdev->region_index != dcr) 460 continue; 461 462 nfit_mem->spa_bdw = nfit_spa->spa; 463 return; 464 } 465 } 466 467 dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", 468 nfit_mem->spa_dcr->range_index); 469 nfit_mem->bdw = NULL; 470} 471 472static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, 473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 474{ 475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 476 struct nfit_memdev *nfit_memdev; 477 struct nfit_flush *nfit_flush; 478 struct nfit_bdw *nfit_bdw; 479 struct nfit_idt *nfit_idt; 480 u16 idt_idx, range_index; 481 482 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 483 if (nfit_bdw->bdw->region_index != dcr) 484 continue; 485 nfit_mem->bdw = nfit_bdw->bdw; 486 break; 487 } 488 489 if (!nfit_mem->bdw) 490 return; 491 492 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 493 494 if (!nfit_mem->spa_bdw) 495 return; 496 497 range_index = nfit_mem->spa_bdw->range_index; 498 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 499 if (nfit_memdev->memdev->range_index != range_index || 500 nfit_memdev->memdev->region_index != dcr) 501 continue; 502 nfit_mem->memdev_bdw = nfit_memdev->memdev; 503 idt_idx = nfit_memdev->memdev->interleave_index; 504 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 505 if (nfit_idt->idt->interleave_index != idt_idx) 506 continue; 507 nfit_mem->idt_bdw = nfit_idt->idt; 508 break; 509 } 510 511 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { 512 if (nfit_flush->flush->device_handle != 513 nfit_memdev->memdev->device_handle) 514 continue; 515 nfit_mem->nfit_flush = nfit_flush; 516 break; 517 } 518 break; 519 } 520} 521 522static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 523 struct acpi_nfit_system_address *spa) 524{ 525 struct nfit_mem *nfit_mem, *found; 526 struct nfit_memdev *nfit_memdev; 527 int type = nfit_spa_type(spa); 528 529 switch (type) { 530 case NFIT_SPA_DCR: 531 case NFIT_SPA_PM: 532 break; 533 default: 534 return 0; 535 } 536 537 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 538 struct nfit_dcr *nfit_dcr; 539 u32 device_handle; 540 u16 dcr; 541 542 if (nfit_memdev->memdev->range_index != spa->range_index) 543 continue; 544 found = NULL; 545 dcr = nfit_memdev->memdev->region_index; 546 device_handle = nfit_memdev->memdev->device_handle; 547 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 548 if (__to_nfit_memdev(nfit_mem)->device_handle 549 == device_handle) { 550 found = nfit_mem; 551 break; 552 } 553 554 if (found) 555 nfit_mem = found; 556 else { 557 nfit_mem = devm_kzalloc(acpi_desc->dev, 558 sizeof(*nfit_mem), GFP_KERNEL); 559 if (!nfit_mem) 560 return -ENOMEM; 561 INIT_LIST_HEAD(&nfit_mem->list); 562 list_add(&nfit_mem->list, &acpi_desc->dimms); 563 } 564 565 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { 566 if (nfit_dcr->dcr->region_index != dcr) 567 continue; 568 /* 569 * Record the control region for the dimm. For 570 * the ACPI 6.1 case, where there are separate 571 * control regions for the pmem vs blk 572 * interfaces, be sure to record the extended 573 * blk details. 574 */ 575 if (!nfit_mem->dcr) 576 nfit_mem->dcr = nfit_dcr->dcr; 577 else if (nfit_mem->dcr->windows == 0 578 && nfit_dcr->dcr->windows) 579 nfit_mem->dcr = nfit_dcr->dcr; 580 break; 581 } 582 583 if (dcr && !nfit_mem->dcr) { 584 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", 585 spa->range_index, dcr); 586 return -ENODEV; 587 } 588 589 if (type == NFIT_SPA_DCR) { 590 struct nfit_idt *nfit_idt; 591 u16 idt_idx; 592 593 /* multiple dimms may share a SPA when interleaved */ 594 nfit_mem->spa_dcr = spa; 595 nfit_mem->memdev_dcr = nfit_memdev->memdev; 596 idt_idx = nfit_memdev->memdev->interleave_index; 597 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { 598 if (nfit_idt->idt->interleave_index != idt_idx) 599 continue; 600 nfit_mem->idt_dcr = nfit_idt->idt; 601 break; 602 } 603 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); 604 } else { 605 /* 606 * A single dimm may belong to multiple SPA-PM 607 * ranges, record at least one in addition to 608 * any SPA-DCR range. 609 */ 610 nfit_mem->memdev_pmem = nfit_memdev->memdev; 611 } 612 } 613 614 return 0; 615} 616 617static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) 618{ 619 struct nfit_mem *a = container_of(_a, typeof(*a), list); 620 struct nfit_mem *b = container_of(_b, typeof(*b), list); 621 u32 handleA, handleB; 622 623 handleA = __to_nfit_memdev(a)->device_handle; 624 handleB = __to_nfit_memdev(b)->device_handle; 625 if (handleA < handleB) 626 return -1; 627 else if (handleA > handleB) 628 return 1; 629 return 0; 630} 631 632static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) 633{ 634 struct nfit_spa *nfit_spa; 635 636 /* 637 * For each SPA-DCR or SPA-PMEM address range find its 638 * corresponding MEMDEV(s). From each MEMDEV find the 639 * corresponding DCR. Then, if we're operating on a SPA-DCR, 640 * try to find a SPA-BDW and a corresponding BDW that references 641 * the DCR. Throw it all into an nfit_mem object. Note, that 642 * BDWs are optional. 643 */ 644 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 645 int rc; 646 647 rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); 648 if (rc) 649 return rc; 650 } 651 652 list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); 653 654 return 0; 655} 656 657static ssize_t revision_show(struct device *dev, 658 struct device_attribute *attr, char *buf) 659{ 660 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 661 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 662 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 663 664 return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); 665} 666static DEVICE_ATTR_RO(revision); 667 668static struct attribute *acpi_nfit_attributes[] = { 669 &dev_attr_revision.attr, 670 NULL, 671}; 672 673static struct attribute_group acpi_nfit_attribute_group = { 674 .name = "nfit", 675 .attrs = acpi_nfit_attributes, 676}; 677 678const struct attribute_group *acpi_nfit_attribute_groups[] = { 679 &nvdimm_bus_attribute_group, 680 &acpi_nfit_attribute_group, 681 NULL, 682}; 683EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups); 684 685static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 686{ 687 struct nvdimm *nvdimm = to_nvdimm(dev); 688 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 689 690 return __to_nfit_memdev(nfit_mem); 691} 692 693static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) 694{ 695 struct nvdimm *nvdimm = to_nvdimm(dev); 696 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 697 698 return nfit_mem->dcr; 699} 700 701static ssize_t handle_show(struct device *dev, 702 struct device_attribute *attr, char *buf) 703{ 704 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 705 706 return sprintf(buf, "%#x\n", memdev->device_handle); 707} 708static DEVICE_ATTR_RO(handle); 709 710static ssize_t phys_id_show(struct device *dev, 711 struct device_attribute *attr, char *buf) 712{ 713 struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); 714 715 return sprintf(buf, "%#x\n", memdev->physical_id); 716} 717static DEVICE_ATTR_RO(phys_id); 718 719static ssize_t vendor_show(struct device *dev, 720 struct device_attribute *attr, char *buf) 721{ 722 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 723 724 return sprintf(buf, "%#x\n", dcr->vendor_id); 725} 726static DEVICE_ATTR_RO(vendor); 727 728static ssize_t rev_id_show(struct device *dev, 729 struct device_attribute *attr, char *buf) 730{ 731 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 732 733 return sprintf(buf, "%#x\n", dcr->revision_id); 734} 735static DEVICE_ATTR_RO(rev_id); 736 737static ssize_t device_show(struct device *dev, 738 struct device_attribute *attr, char *buf) 739{ 740 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 741 742 return sprintf(buf, "%#x\n", dcr->device_id); 743} 744static DEVICE_ATTR_RO(device); 745 746static ssize_t format_show(struct device *dev, 747 struct device_attribute *attr, char *buf) 748{ 749 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 750 751 return sprintf(buf, "%#x\n", dcr->code); 752} 753static DEVICE_ATTR_RO(format); 754 755static ssize_t serial_show(struct device *dev, 756 struct device_attribute *attr, char *buf) 757{ 758 struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); 759 760 return sprintf(buf, "%#x\n", dcr->serial_number); 761} 762static DEVICE_ATTR_RO(serial); 763 764static ssize_t flags_show(struct device *dev, 765 struct device_attribute *attr, char *buf) 766{ 767 u16 flags = to_nfit_memdev(dev)->flags; 768 769 return sprintf(buf, "%s%s%s%s%s\n", 770 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", 771 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", 772 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", 773 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", 774 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); 775} 776static DEVICE_ATTR_RO(flags); 777 778static struct attribute *acpi_nfit_dimm_attributes[] = { 779 &dev_attr_handle.attr, 780 &dev_attr_phys_id.attr, 781 &dev_attr_vendor.attr, 782 &dev_attr_device.attr, 783 &dev_attr_format.attr, 784 &dev_attr_serial.attr, 785 &dev_attr_rev_id.attr, 786 &dev_attr_flags.attr, 787 NULL, 788}; 789 790static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, 791 struct attribute *a, int n) 792{ 793 struct device *dev = container_of(kobj, struct device, kobj); 794 795 if (to_nfit_dcr(dev)) 796 return a->mode; 797 else 798 return 0; 799} 800 801static struct attribute_group acpi_nfit_dimm_attribute_group = { 802 .name = "nfit", 803 .attrs = acpi_nfit_dimm_attributes, 804 .is_visible = acpi_nfit_dimm_attr_visible, 805}; 806 807static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { 808 &nvdimm_attribute_group, 809 &nd_device_attribute_group, 810 &acpi_nfit_dimm_attribute_group, 811 NULL, 812}; 813 814static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, 815 u32 device_handle) 816{ 817 struct nfit_mem *nfit_mem; 818 819 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 820 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) 821 return nfit_mem->nvdimm; 822 823 return NULL; 824} 825 826static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, 827 struct nfit_mem *nfit_mem, u32 device_handle) 828{ 829 struct acpi_device *adev, *adev_dimm; 830 struct device *dev = acpi_desc->dev; 831 const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM); 832 int i; 833 834 nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en; 835 adev = to_acpi_dev(acpi_desc); 836 if (!adev) 837 return 0; 838 839 adev_dimm = acpi_find_child_device(adev, device_handle, false); 840 nfit_mem->adev = adev_dimm; 841 if (!adev_dimm) { 842 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", 843 device_handle); 844 return force_enable_dimms ? 0 : -ENODEV; 845 } 846 847 for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++) 848 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) 849 set_bit(i, &nfit_mem->dsm_mask); 850 851 return 0; 852} 853 854static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) 855{ 856 struct nfit_mem *nfit_mem; 857 int dimm_count = 0; 858 859 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 860 struct nvdimm *nvdimm; 861 unsigned long flags = 0; 862 u32 device_handle; 863 u16 mem_flags; 864 int rc; 865 866 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; 867 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); 868 if (nvdimm) { 869 dimm_count++; 870 continue; 871 } 872 873 if (nfit_mem->bdw && nfit_mem->memdev_pmem) 874 flags |= NDD_ALIASING; 875 876 mem_flags = __to_nfit_memdev(nfit_mem)->flags; 877 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) 878 flags |= NDD_UNARMED; 879 880 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); 881 if (rc) 882 continue; 883 884 nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, 885 acpi_nfit_dimm_attribute_groups, 886 flags, &nfit_mem->dsm_mask); 887 if (!nvdimm) 888 return -ENOMEM; 889 890 nfit_mem->nvdimm = nvdimm; 891 dimm_count++; 892 893 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) 894 continue; 895 896 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", 897 nvdimm_name(nvdimm), 898 mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", 899 mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", 900 mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", 901 mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); 902 903 } 904 905 return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); 906} 907 908static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) 909{ 910 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 911 const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); 912 struct acpi_device *adev; 913 int i; 914 915 nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en; 916 adev = to_acpi_dev(acpi_desc); 917 if (!adev) 918 return; 919 920 for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++) 921 if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) 922 set_bit(i, &nd_desc->dsm_mask); 923} 924 925static ssize_t range_index_show(struct device *dev, 926 struct device_attribute *attr, char *buf) 927{ 928 struct nd_region *nd_region = to_nd_region(dev); 929 struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); 930 931 return sprintf(buf, "%d\n", nfit_spa->spa->range_index); 932} 933static DEVICE_ATTR_RO(range_index); 934 935static struct attribute *acpi_nfit_region_attributes[] = { 936 &dev_attr_range_index.attr, 937 NULL, 938}; 939 940static struct attribute_group acpi_nfit_region_attribute_group = { 941 .name = "nfit", 942 .attrs = acpi_nfit_region_attributes, 943}; 944 945static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { 946 &nd_region_attribute_group, 947 &nd_mapping_attribute_group, 948 &nd_device_attribute_group, 949 &nd_numa_attribute_group, 950 &acpi_nfit_region_attribute_group, 951 NULL, 952}; 953 954/* enough info to uniquely specify an interleave set */ 955struct nfit_set_info { 956 struct nfit_set_info_map { 957 u64 region_offset; 958 u32 serial_number; 959 u32 pad; 960 } mapping[0]; 961}; 962 963static size_t sizeof_nfit_set_info(int num_mappings) 964{ 965 return sizeof(struct nfit_set_info) 966 + num_mappings * sizeof(struct nfit_set_info_map); 967} 968 969static int cmp_map(const void *m0, const void *m1) 970{ 971 const struct nfit_set_info_map *map0 = m0; 972 const struct nfit_set_info_map *map1 = m1; 973 974 return memcmp(&map0->region_offset, &map1->region_offset, 975 sizeof(u64)); 976} 977 978/* Retrieve the nth entry referencing this spa */ 979static struct acpi_nfit_memory_map *memdev_from_spa( 980 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) 981{ 982 struct nfit_memdev *nfit_memdev; 983 984 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) 985 if (nfit_memdev->memdev->range_index == range_index) 986 if (n-- == 0) 987 return nfit_memdev->memdev; 988 return NULL; 989} 990 991static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, 992 struct nd_region_desc *ndr_desc, 993 struct acpi_nfit_system_address *spa) 994{ 995 int i, spa_type = nfit_spa_type(spa); 996 struct device *dev = acpi_desc->dev; 997 struct nd_interleave_set *nd_set; 998 u16 nr = ndr_desc->num_mappings; 999 struct nfit_set_info *info; 1000 1001 if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) 1002 /* pass */; 1003 else 1004 return 0; 1005 1006 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 1007 if (!nd_set) 1008 return -ENOMEM; 1009 1010 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 1011 if (!info) 1012 return -ENOMEM; 1013 for (i = 0; i < nr; i++) { 1014 struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; 1015 struct nfit_set_info_map *map = &info->mapping[i]; 1016 struct nvdimm *nvdimm = nd_mapping->nvdimm; 1017 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 1018 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, 1019 spa->range_index, i); 1020 1021 if (!memdev || !nfit_mem->dcr) { 1022 dev_err(dev, "%s: failed to find DCR\n", __func__); 1023 return -ENODEV; 1024 } 1025 1026 map->region_offset = memdev->region_offset; 1027 map->serial_number = nfit_mem->dcr->serial_number; 1028 } 1029 1030 sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), 1031 cmp_map, NULL); 1032 nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); 1033 ndr_desc->nd_set = nd_set; 1034 devm_kfree(dev, info); 1035 1036 return 0; 1037} 1038 1039static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) 1040{ 1041 struct acpi_nfit_interleave *idt = mmio->idt; 1042 u32 sub_line_offset, line_index, line_offset; 1043 u64 line_no, table_skip_count, table_offset; 1044 1045 line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); 1046 table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); 1047 line_offset = idt->line_offset[line_index] 1048 * mmio->line_size; 1049 table_offset = table_skip_count * mmio->table_size; 1050 1051 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1052} 1053 1054static void wmb_blk(struct nfit_blk *nfit_blk) 1055{ 1056 1057 if (nfit_blk->nvdimm_flush) { 1058 /* 1059 * The first wmb() is needed to 'sfence' all previous writes 1060 * such that they are architecturally visible for the platform 1061 * buffer flush. Note that we've already arranged for pmem 1062 * writes to avoid the cache via arch_memcpy_to_pmem(). The 1063 * final wmb() ensures ordering for the NVDIMM flush write. 1064 */ 1065 wmb(); 1066 writeq(1, nfit_blk->nvdimm_flush); 1067 wmb(); 1068 } else 1069 wmb_pmem(); 1070} 1071 1072static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1073{ 1074 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1075 u64 offset = nfit_blk->stat_offset + mmio->size * bw; 1076 1077 if (mmio->num_lines) 1078 offset = to_interleave_offset(offset, mmio); 1079 1080 return readl(mmio->addr.base + offset); 1081} 1082 1083static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, 1084 resource_size_t dpa, unsigned int len, unsigned int write) 1085{ 1086 u64 cmd, offset; 1087 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1088 1089 enum { 1090 BCW_OFFSET_MASK = (1ULL << 48)-1, 1091 BCW_LEN_SHIFT = 48, 1092 BCW_LEN_MASK = (1ULL << 8) - 1, 1093 BCW_CMD_SHIFT = 56, 1094 }; 1095 1096 cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; 1097 len = len >> L1_CACHE_SHIFT; 1098 cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; 1099 cmd |= ((u64) write) << BCW_CMD_SHIFT; 1100 1101 offset = nfit_blk->cmd_offset + mmio->size * bw; 1102 if (mmio->num_lines) 1103 offset = to_interleave_offset(offset, mmio); 1104 1105 writeq(cmd, mmio->addr.base + offset); 1106 wmb_blk(nfit_blk); 1107 1108 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH) 1109 readq(mmio->addr.base + offset); 1110} 1111 1112static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1113 resource_size_t dpa, void *iobuf, size_t len, int rw, 1114 unsigned int lane) 1115{ 1116 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1117 unsigned int copied = 0; 1118 u64 base_offset; 1119 int rc; 1120 1121 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1122 + lane * mmio->size; 1123 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1124 while (len) { 1125 unsigned int c; 1126 u64 offset; 1127 1128 if (mmio->num_lines) { 1129 u32 line_offset; 1130 1131 offset = to_interleave_offset(base_offset + copied, 1132 mmio); 1133 div_u64_rem(offset, mmio->line_size, &line_offset); 1134 c = min_t(size_t, len, mmio->line_size - line_offset); 1135 } else { 1136 offset = base_offset + nfit_blk->bdw_offset; 1137 c = len; 1138 } 1139 1140 if (rw) 1141 memcpy_to_pmem(mmio->addr.aperture + offset, 1142 iobuf + copied, c); 1143 else { 1144 if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH) 1145 mmio_flush_range((void __force *) 1146 mmio->addr.aperture + offset, c); 1147 1148 memcpy_from_pmem(iobuf + copied, 1149 mmio->addr.aperture + offset, c); 1150 } 1151 1152 copied += c; 1153 len -= c; 1154 } 1155 1156 if (rw) 1157 wmb_blk(nfit_blk); 1158 1159 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1160 return rc; 1161} 1162 1163static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, 1164 resource_size_t dpa, void *iobuf, u64 len, int rw) 1165{ 1166 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1167 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 1168 struct nd_region *nd_region = nfit_blk->nd_region; 1169 unsigned int lane, copied = 0; 1170 int rc = 0; 1171 1172 lane = nd_region_acquire_lane(nd_region); 1173 while (len) { 1174 u64 c = min(len, mmio->size); 1175 1176 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, 1177 iobuf + copied, c, rw, lane); 1178 if (rc) 1179 break; 1180 1181 copied += c; 1182 len -= c; 1183 } 1184 nd_region_release_lane(nd_region, lane); 1185 1186 return rc; 1187} 1188 1189static void nfit_spa_mapping_release(struct kref *kref) 1190{ 1191 struct nfit_spa_mapping *spa_map = to_spa_map(kref); 1192 struct acpi_nfit_system_address *spa = spa_map->spa; 1193 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc; 1194 1195 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1196 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index); 1197 if (spa_map->type == SPA_MAP_APERTURE) 1198 memunmap((void __force *)spa_map->addr.aperture); 1199 else 1200 iounmap(spa_map->addr.base); 1201 release_mem_region(spa->address, spa->length); 1202 list_del(&spa_map->list); 1203 kfree(spa_map); 1204} 1205 1206static struct nfit_spa_mapping *find_spa_mapping( 1207 struct acpi_nfit_desc *acpi_desc, 1208 struct acpi_nfit_system_address *spa) 1209{ 1210 struct nfit_spa_mapping *spa_map; 1211 1212 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1213 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list) 1214 if (spa_map->spa == spa) 1215 return spa_map; 1216 1217 return NULL; 1218} 1219 1220static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc, 1221 struct acpi_nfit_system_address *spa) 1222{ 1223 struct nfit_spa_mapping *spa_map; 1224 1225 mutex_lock(&acpi_desc->spa_map_mutex); 1226 spa_map = find_spa_mapping(acpi_desc, spa); 1227 1228 if (spa_map) 1229 kref_put(&spa_map->kref, nfit_spa_mapping_release); 1230 mutex_unlock(&acpi_desc->spa_map_mutex); 1231} 1232 1233static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1234 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1235{ 1236 resource_size_t start = spa->address; 1237 resource_size_t n = spa->length; 1238 struct nfit_spa_mapping *spa_map; 1239 struct resource *res; 1240 1241 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex)); 1242 1243 spa_map = find_spa_mapping(acpi_desc, spa); 1244 if (spa_map) { 1245 kref_get(&spa_map->kref); 1246 return spa_map->addr.base; 1247 } 1248 1249 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL); 1250 if (!spa_map) 1251 return NULL; 1252 1253 INIT_LIST_HEAD(&spa_map->list); 1254 spa_map->spa = spa; 1255 kref_init(&spa_map->kref); 1256 spa_map->acpi_desc = acpi_desc; 1257 1258 res = request_mem_region(start, n, dev_name(acpi_desc->dev)); 1259 if (!res) 1260 goto err_mem; 1261 1262 spa_map->type = type; 1263 if (type == SPA_MAP_APERTURE) 1264 spa_map->addr.aperture = (void __pmem *)memremap(start, n, 1265 ARCH_MEMREMAP_PMEM); 1266 else 1267 spa_map->addr.base = ioremap_nocache(start, n); 1268 1269 1270 if (!spa_map->addr.base) 1271 goto err_map; 1272 1273 list_add_tail(&spa_map->list, &acpi_desc->spa_maps); 1274 return spa_map->addr.base; 1275 1276 err_map: 1277 release_mem_region(start, n); 1278 err_mem: 1279 kfree(spa_map); 1280 return NULL; 1281} 1282 1283/** 1284 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges 1285 * @nvdimm_bus: NFIT-bus that provided the spa table entry 1286 * @nfit_spa: spa table to map 1287 * @type: aperture or control region 1288 * 1289 * In the case where block-data-window apertures and 1290 * dimm-control-regions are interleaved they will end up sharing a 1291 * single request_mem_region() + ioremap() for the address range. In 1292 * the style of devm nfit_spa_map() mappings are automatically dropped 1293 * when all region devices referencing the same mapping are disabled / 1294 * unbound. 1295 */ 1296static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1297 struct acpi_nfit_system_address *spa, enum spa_map_type type) 1298{ 1299 void __iomem *iomem; 1300 1301 mutex_lock(&acpi_desc->spa_map_mutex); 1302 iomem = __nfit_spa_map(acpi_desc, spa, type); 1303 mutex_unlock(&acpi_desc->spa_map_mutex); 1304 1305 return iomem; 1306} 1307 1308static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1309 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1310{ 1311 if (idt) { 1312 mmio->num_lines = idt->line_count; 1313 mmio->line_size = idt->line_size; 1314 if (interleave_ways == 0) 1315 return -ENXIO; 1316 mmio->table_size = mmio->num_lines * interleave_ways 1317 * mmio->line_size; 1318 } 1319 1320 return 0; 1321} 1322 1323static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, 1324 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) 1325{ 1326 struct nd_cmd_dimm_flags flags; 1327 int rc; 1328 1329 memset(&flags, 0, sizeof(flags)); 1330 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, 1331 sizeof(flags)); 1332 1333 if (rc >= 0 && flags.status == 0) 1334 nfit_blk->dimm_flags = flags.flags; 1335 else if (rc == -ENOTTY) { 1336 /* fall back to a conservative default */ 1337 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH; 1338 rc = 0; 1339 } else 1340 rc = -ENXIO; 1341 1342 return rc; 1343} 1344 1345static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1346 struct device *dev) 1347{ 1348 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1349 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1350 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1351 struct nfit_flush *nfit_flush; 1352 struct nfit_blk_mmio *mmio; 1353 struct nfit_blk *nfit_blk; 1354 struct nfit_mem *nfit_mem; 1355 struct nvdimm *nvdimm; 1356 int rc; 1357 1358 nvdimm = nd_blk_region_to_dimm(ndbr); 1359 nfit_mem = nvdimm_provider_data(nvdimm); 1360 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1361 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1362 nfit_mem ? "" : " nfit_mem", 1363 (nfit_mem && nfit_mem->dcr) ? "" : " dcr", 1364 (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); 1365 return -ENXIO; 1366 } 1367 1368 nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); 1369 if (!nfit_blk) 1370 return -ENOMEM; 1371 nd_blk_region_set_provider_data(ndbr, nfit_blk); 1372 nfit_blk->nd_region = to_nd_region(dev); 1373 1374 /* map block aperture memory */ 1375 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1376 mmio = &nfit_blk->mmio[BDW]; 1377 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw, 1378 SPA_MAP_APERTURE); 1379 if (!mmio->addr.base) { 1380 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1381 nvdimm_name(nvdimm)); 1382 return -ENOMEM; 1383 } 1384 mmio->size = nfit_mem->bdw->size; 1385 mmio->base_offset = nfit_mem->memdev_bdw->region_offset; 1386 mmio->idt = nfit_mem->idt_bdw; 1387 mmio->spa = nfit_mem->spa_bdw; 1388 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, 1389 nfit_mem->memdev_bdw->interleave_ways); 1390 if (rc) { 1391 dev_dbg(dev, "%s: %s failed to init bdw interleave\n", 1392 __func__, nvdimm_name(nvdimm)); 1393 return rc; 1394 } 1395 1396 /* map block control memory */ 1397 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1398 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1399 mmio = &nfit_blk->mmio[DCR]; 1400 mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr, 1401 SPA_MAP_CONTROL); 1402 if (!mmio->addr.base) { 1403 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1404 nvdimm_name(nvdimm)); 1405 return -ENOMEM; 1406 } 1407 mmio->size = nfit_mem->dcr->window_size; 1408 mmio->base_offset = nfit_mem->memdev_dcr->region_offset; 1409 mmio->idt = nfit_mem->idt_dcr; 1410 mmio->spa = nfit_mem->spa_dcr; 1411 rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, 1412 nfit_mem->memdev_dcr->interleave_ways); 1413 if (rc) { 1414 dev_dbg(dev, "%s: %s failed to init dcr interleave\n", 1415 __func__, nvdimm_name(nvdimm)); 1416 return rc; 1417 } 1418 1419 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); 1420 if (rc < 0) { 1421 dev_dbg(dev, "%s: %s failed get DIMM flags\n", 1422 __func__, nvdimm_name(nvdimm)); 1423 return rc; 1424 } 1425 1426 nfit_flush = nfit_mem->nfit_flush; 1427 if (nfit_flush && nfit_flush->flush->hint_count != 0) { 1428 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev, 1429 nfit_flush->flush->hint_address[0], 8); 1430 if (!nfit_blk->nvdimm_flush) 1431 return -ENOMEM; 1432 } 1433 1434 if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush) 1435 dev_warn(dev, "unable to guarantee persistence of writes\n"); 1436 1437 if (mmio->line_size == 0) 1438 return 0; 1439 1440 if ((u32) nfit_blk->cmd_offset % mmio->line_size 1441 + 8 > mmio->line_size) { 1442 dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); 1443 return -ENXIO; 1444 } else if ((u32) nfit_blk->stat_offset % mmio->line_size 1445 + 8 > mmio->line_size) { 1446 dev_dbg(dev, "stat_offset crosses interleave boundary\n"); 1447 return -ENXIO; 1448 } 1449 1450 return 0; 1451} 1452 1453static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus, 1454 struct device *dev) 1455{ 1456 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1457 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1458 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1459 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); 1460 int i; 1461 1462 if (!nfit_blk) 1463 return; /* never enabled */ 1464 1465 /* auto-free BLK spa mappings */ 1466 for (i = 0; i < 2; i++) { 1467 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i]; 1468 1469 if (mmio->addr.base) 1470 nfit_spa_unmap(acpi_desc, mmio->spa); 1471 } 1472 nd_blk_region_set_provider_data(ndbr, NULL); 1473 /* devm will free nfit_blk */ 1474} 1475 1476static int ars_get_cap(struct nvdimm_bus_descriptor *nd_desc, 1477 struct nd_cmd_ars_cap *cmd, u64 addr, u64 length) 1478{ 1479 cmd->address = addr; 1480 cmd->length = length; 1481 1482 return nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, 1483 sizeof(*cmd)); 1484} 1485 1486static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc, 1487 struct nd_cmd_ars_start *cmd, u64 addr, u64 length) 1488{ 1489 int rc; 1490 1491 cmd->address = addr; 1492 cmd->length = length; 1493 cmd->type = ND_ARS_PERSISTENT; 1494 1495 while (1) { 1496 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, cmd, 1497 sizeof(*cmd)); 1498 if (rc) 1499 return rc; 1500 switch (cmd->status) { 1501 case 0: 1502 return 0; 1503 case 1: 1504 /* ARS unsupported, but we should never get here */ 1505 return 0; 1506 case 6: 1507 /* ARS is in progress */ 1508 msleep(1000); 1509 break; 1510 default: 1511 return -ENXIO; 1512 } 1513 } 1514} 1515 1516static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, 1517 struct nd_cmd_ars_status *cmd, u32 size) 1518{ 1519 int rc; 1520 1521 while (1) { 1522 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, 1523 size); 1524 if (rc || cmd->status & 0xffff) 1525 return -ENXIO; 1526 1527 /* Check extended status (Upper two bytes) */ 1528 switch (cmd->status >> 16) { 1529 case 0: 1530 return 0; 1531 case 1: 1532 /* ARS is in progress */ 1533 msleep(1000); 1534 break; 1535 case 2: 1536 /* No ARS performed for the current boot */ 1537 return 0; 1538 case 3: 1539 /* TODO: error list overflow support */ 1540 default: 1541 return -ENXIO; 1542 } 1543 } 1544} 1545 1546static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, 1547 struct nd_cmd_ars_status *ars_status, u64 start) 1548{ 1549 int rc; 1550 u32 i; 1551 1552 /* 1553 * The address field returned by ars_status should be either 1554 * less than or equal to the address we last started ARS for. 1555 * The (start, length) returned by ars_status should also have 1556 * non-zero overlap with the range we started ARS for. 1557 * If this is not the case, bail. 1558 */ 1559 if (ars_status->address > start || 1560 (ars_status->address + ars_status->length < start)) 1561 return -ENXIO; 1562 1563 for (i = 0; i < ars_status->num_records; i++) { 1564 rc = nvdimm_bus_add_poison(nvdimm_bus, 1565 ars_status->records[i].err_address, 1566 ars_status->records[i].length); 1567 if (rc) 1568 return rc; 1569 } 1570 1571 return 0; 1572} 1573 1574static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc, 1575 struct nd_region_desc *ndr_desc) 1576{ 1577 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 1578 struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus; 1579 struct nd_cmd_ars_status *ars_status = NULL; 1580 struct nd_cmd_ars_start *ars_start = NULL; 1581 struct nd_cmd_ars_cap *ars_cap = NULL; 1582 u64 start, len, cur, remaining; 1583 u32 ars_status_size; 1584 int rc; 1585 1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL); 1587 if (!ars_cap) 1588 return -ENOMEM; 1589 1590 start = ndr_desc->res->start; 1591 len = ndr_desc->res->end - ndr_desc->res->start + 1; 1592 1593 /* 1594 * If ARS is unimplemented, unsupported, or if the 'Persistent Memory 1595 * Scrub' flag in extended status is not set, skip this but continue 1596 * initialization 1597 */ 1598 rc = ars_get_cap(nd_desc, ars_cap, start, len); 1599 if (rc == -ENOTTY) { 1600 dev_dbg(acpi_desc->dev, 1601 "Address Range Scrub is not implemented, won't create an error list\n"); 1602 rc = 0; 1603 goto out; 1604 } 1605 if (rc) 1606 goto out; 1607 1608 if ((ars_cap->status & 0xffff) || 1609 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) { 1610 dev_warn(acpi_desc->dev, 1611 "ARS unsupported (status: 0x%x), won't create an error list\n", 1612 ars_cap->status); 1613 goto out; 1614 } 1615 1616 /* 1617 * Check if a full-range ARS has been run. If so, use those results 1618 * without having to start a new ARS. 1619 */ 1620 ars_status_size = ars_cap->max_ars_out; 1621 ars_status = kzalloc(ars_status_size, GFP_KERNEL); 1622 if (!ars_status) { 1623 rc = -ENOMEM; 1624 goto out; 1625 } 1626 1627 rc = ars_get_status(nd_desc, ars_status, ars_status_size); 1628 if (rc) 1629 goto out; 1630 1631 if (ars_status->address <= start && 1632 (ars_status->address + ars_status->length >= start + len)) { 1633 rc = ars_status_process_records(nvdimm_bus, ars_status, start); 1634 goto out; 1635 } 1636 1637 /* 1638 * ARS_STATUS can overflow if the number of poison entries found is 1639 * greater than the maximum buffer size (ars_cap->max_ars_out) 1640 * To detect overflow, check if the length field of ars_status 1641 * is less than the length we supplied. If so, process the 1642 * error entries we got, adjust the start point, and start again 1643 */ 1644 ars_start = kzalloc(sizeof(*ars_start), GFP_KERNEL); 1645 if (!ars_start) 1646 return -ENOMEM; 1647 1648 cur = start; 1649 remaining = len; 1650 do { 1651 u64 done, end; 1652 1653 rc = ars_do_start(nd_desc, ars_start, cur, remaining); 1654 if (rc) 1655 goto out; 1656 1657 rc = ars_get_status(nd_desc, ars_status, ars_status_size); 1658 if (rc) 1659 goto out; 1660 1661 rc = ars_status_process_records(nvdimm_bus, ars_status, cur); 1662 if (rc) 1663 goto out; 1664 1665 end = min(cur + remaining, 1666 ars_status->address + ars_status->length); 1667 done = end - cur; 1668 cur += done; 1669 remaining -= done; 1670 } while (remaining); 1671 1672 out: 1673 kfree(ars_cap); 1674 kfree(ars_start); 1675 kfree(ars_status); 1676 return rc; 1677} 1678 1679static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, 1680 struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, 1681 struct acpi_nfit_memory_map *memdev, 1682 struct acpi_nfit_system_address *spa) 1683{ 1684 struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, 1685 memdev->device_handle); 1686 struct nd_blk_region_desc *ndbr_desc; 1687 struct nfit_mem *nfit_mem; 1688 int blk_valid = 0; 1689 1690 if (!nvdimm) { 1691 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", 1692 spa->range_index, memdev->device_handle); 1693 return -ENODEV; 1694 } 1695 1696 nd_mapping->nvdimm = nvdimm; 1697 switch (nfit_spa_type(spa)) { 1698 case NFIT_SPA_PM: 1699 case NFIT_SPA_VOLATILE: 1700 nd_mapping->start = memdev->address; 1701 nd_mapping->size = memdev->region_size; 1702 break; 1703 case NFIT_SPA_DCR: 1704 nfit_mem = nvdimm_provider_data(nvdimm); 1705 if (!nfit_mem || !nfit_mem->bdw) { 1706 dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", 1707 spa->range_index, nvdimm_name(nvdimm)); 1708 } else { 1709 nd_mapping->size = nfit_mem->bdw->capacity; 1710 nd_mapping->start = nfit_mem->bdw->start_address; 1711 ndr_desc->num_lanes = nfit_mem->bdw->windows; 1712 blk_valid = 1; 1713 } 1714 1715 ndr_desc->nd_mapping = nd_mapping; 1716 ndr_desc->num_mappings = blk_valid; 1717 ndbr_desc = to_blk_region_desc(ndr_desc); 1718 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1719 ndbr_desc->disable = acpi_nfit_blk_region_disable; 1720 ndbr_desc->do_io = acpi_desc->blk_do_io; 1721 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) 1722 return -ENOMEM; 1723 break; 1724 } 1725 1726 return 0; 1727} 1728 1729static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, 1730 struct nfit_spa *nfit_spa) 1731{ 1732 static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; 1733 struct acpi_nfit_system_address *spa = nfit_spa->spa; 1734 struct nd_blk_region_desc ndbr_desc; 1735 struct nd_region_desc *ndr_desc; 1736 struct nfit_memdev *nfit_memdev; 1737 struct nvdimm_bus *nvdimm_bus; 1738 struct resource res; 1739 int count = 0, rc; 1740 1741 if (nfit_spa->is_registered) 1742 return 0; 1743 1744 if (spa->range_index == 0) { 1745 dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", 1746 __func__); 1747 return 0; 1748 } 1749 1750 memset(&res, 0, sizeof(res)); 1751 memset(&nd_mappings, 0, sizeof(nd_mappings)); 1752 memset(&ndbr_desc, 0, sizeof(ndbr_desc)); 1753 res.start = spa->address; 1754 res.end = res.start + spa->length - 1; 1755 ndr_desc = &ndbr_desc.ndr_desc; 1756 ndr_desc->res = &res; 1757 ndr_desc->provider_data = nfit_spa; 1758 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 1759 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 1760 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 1761 spa->proximity_domain); 1762 else 1763 ndr_desc->numa_node = NUMA_NO_NODE; 1764 1765 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 1766 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; 1767 struct nd_mapping *nd_mapping; 1768 1769 if (memdev->range_index != spa->range_index) 1770 continue; 1771 if (count >= ND_MAX_MAPPINGS) { 1772 dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", 1773 spa->range_index, ND_MAX_MAPPINGS); 1774 return -ENXIO; 1775 } 1776 nd_mapping = &nd_mappings[count++]; 1777 rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, 1778 memdev, spa); 1779 if (rc) 1780 return rc; 1781 } 1782 1783 ndr_desc->nd_mapping = nd_mappings; 1784 ndr_desc->num_mappings = count; 1785 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); 1786 if (rc) 1787 return rc; 1788 1789 nvdimm_bus = acpi_desc->nvdimm_bus; 1790 if (nfit_spa_type(spa) == NFIT_SPA_PM) { 1791 rc = acpi_nfit_find_poison(acpi_desc, ndr_desc); 1792 if (rc) { 1793 dev_err(acpi_desc->dev, 1794 "error while performing ARS to find poison: %d\n", 1795 rc); 1796 return rc; 1797 } 1798 if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc)) 1799 return -ENOMEM; 1800 } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { 1801 if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc)) 1802 return -ENOMEM; 1803 } 1804 1805 nfit_spa->is_registered = 1; 1806 return 0; 1807} 1808 1809static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) 1810{ 1811 struct nfit_spa *nfit_spa; 1812 1813 list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { 1814 int rc = acpi_nfit_register_region(acpi_desc, nfit_spa); 1815 1816 if (rc) 1817 return rc; 1818 } 1819 return 0; 1820} 1821 1822static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, 1823 struct nfit_table_prev *prev) 1824{ 1825 struct device *dev = acpi_desc->dev; 1826 1827 if (!list_empty(&prev->spas) || 1828 !list_empty(&prev->memdevs) || 1829 !list_empty(&prev->dcrs) || 1830 !list_empty(&prev->bdws) || 1831 !list_empty(&prev->idts) || 1832 !list_empty(&prev->flushes)) { 1833 dev_err(dev, "new nfit deletes entries (unsupported)\n"); 1834 return -ENXIO; 1835 } 1836 return 0; 1837} 1838 1839int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 1840{ 1841 struct device *dev = acpi_desc->dev; 1842 struct nfit_table_prev prev; 1843 const void *end; 1844 u8 *data; 1845 int rc; 1846 1847 mutex_lock(&acpi_desc->init_mutex); 1848 1849 INIT_LIST_HEAD(&prev.spas); 1850 INIT_LIST_HEAD(&prev.memdevs); 1851 INIT_LIST_HEAD(&prev.dcrs); 1852 INIT_LIST_HEAD(&prev.bdws); 1853 INIT_LIST_HEAD(&prev.idts); 1854 INIT_LIST_HEAD(&prev.flushes); 1855 1856 list_cut_position(&prev.spas, &acpi_desc->spas, 1857 acpi_desc->spas.prev); 1858 list_cut_position(&prev.memdevs, &acpi_desc->memdevs, 1859 acpi_desc->memdevs.prev); 1860 list_cut_position(&prev.dcrs, &acpi_desc->dcrs, 1861 acpi_desc->dcrs.prev); 1862 list_cut_position(&prev.bdws, &acpi_desc->bdws, 1863 acpi_desc->bdws.prev); 1864 list_cut_position(&prev.idts, &acpi_desc->idts, 1865 acpi_desc->idts.prev); 1866 list_cut_position(&prev.flushes, &acpi_desc->flushes, 1867 acpi_desc->flushes.prev); 1868 1869 data = (u8 *) acpi_desc->nfit; 1870 end = data + sz; 1871 while (!IS_ERR_OR_NULL(data)) 1872 data = add_table(acpi_desc, &prev, data, end); 1873 1874 if (IS_ERR(data)) { 1875 dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, 1876 PTR_ERR(data)); 1877 rc = PTR_ERR(data); 1878 goto out_unlock; 1879 } 1880 1881 rc = acpi_nfit_check_deletions(acpi_desc, &prev); 1882 if (rc) 1883 goto out_unlock; 1884 1885 if (nfit_mem_init(acpi_desc) != 0) { 1886 rc = -ENOMEM; 1887 goto out_unlock; 1888 } 1889 1890 acpi_nfit_init_dsms(acpi_desc); 1891 1892 rc = acpi_nfit_register_dimms(acpi_desc); 1893 if (rc) 1894 goto out_unlock; 1895 1896 rc = acpi_nfit_register_regions(acpi_desc); 1897 1898 out_unlock: 1899 mutex_unlock(&acpi_desc->init_mutex); 1900 return rc; 1901} 1902EXPORT_SYMBOL_GPL(acpi_nfit_init); 1903 1904static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev) 1905{ 1906 struct nvdimm_bus_descriptor *nd_desc; 1907 struct acpi_nfit_desc *acpi_desc; 1908 struct device *dev = &adev->dev; 1909 1910 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); 1911 if (!acpi_desc) 1912 return ERR_PTR(-ENOMEM); 1913 1914 dev_set_drvdata(dev, acpi_desc); 1915 acpi_desc->dev = dev; 1916 acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 1917 nd_desc = &acpi_desc->nd_desc; 1918 nd_desc->provider_name = "ACPI.NFIT"; 1919 nd_desc->ndctl = acpi_nfit_ctl; 1920 nd_desc->attr_groups = acpi_nfit_attribute_groups; 1921 1922 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc); 1923 if (!acpi_desc->nvdimm_bus) { 1924 devm_kfree(dev, acpi_desc); 1925 return ERR_PTR(-ENXIO); 1926 } 1927 1928 INIT_LIST_HEAD(&acpi_desc->spa_maps); 1929 INIT_LIST_HEAD(&acpi_desc->spas); 1930 INIT_LIST_HEAD(&acpi_desc->dcrs); 1931 INIT_LIST_HEAD(&acpi_desc->bdws); 1932 INIT_LIST_HEAD(&acpi_desc->idts); 1933 INIT_LIST_HEAD(&acpi_desc->flushes); 1934 INIT_LIST_HEAD(&acpi_desc->memdevs); 1935 INIT_LIST_HEAD(&acpi_desc->dimms); 1936 mutex_init(&acpi_desc->spa_map_mutex); 1937 mutex_init(&acpi_desc->init_mutex); 1938 1939 return acpi_desc; 1940} 1941 1942static int acpi_nfit_add(struct acpi_device *adev) 1943{ 1944 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 1945 struct acpi_nfit_desc *acpi_desc; 1946 struct device *dev = &adev->dev; 1947 struct acpi_table_header *tbl; 1948 acpi_status status = AE_OK; 1949 acpi_size sz; 1950 int rc; 1951 1952 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz); 1953 if (ACPI_FAILURE(status)) { 1954 /* This is ok, we could have an nvdimm hotplugged later */ 1955 dev_dbg(dev, "failed to find NFIT at startup\n"); 1956 return 0; 1957 } 1958 1959 acpi_desc = acpi_nfit_desc_init(adev); 1960 if (IS_ERR(acpi_desc)) { 1961 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 1962 __func__, PTR_ERR(acpi_desc)); 1963 return PTR_ERR(acpi_desc); 1964 } 1965 1966 /* 1967 * Save the acpi header for later and then skip it, 1968 * making nfit point to the first nfit table header. 1969 */ 1970 acpi_desc->acpi_header = *tbl; 1971 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); 1972 sz -= sizeof(struct acpi_table_nfit); 1973 1974 /* Evaluate _FIT and override with that if present */ 1975 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 1976 if (ACPI_SUCCESS(status) && buf.length > 0) { 1977 union acpi_object *obj; 1978 /* 1979 * Adjust for the acpi_object header of the _FIT 1980 */ 1981 obj = buf.pointer; 1982 if (obj->type == ACPI_TYPE_BUFFER) { 1983 acpi_desc->nfit = 1984 (struct acpi_nfit_header *)obj->buffer.pointer; 1985 sz = obj->buffer.length; 1986 } else 1987 dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", 1988 __func__, (int) obj->type); 1989 } 1990 1991 rc = acpi_nfit_init(acpi_desc, sz); 1992 if (rc) { 1993 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 1994 return rc; 1995 } 1996 return 0; 1997} 1998 1999static int acpi_nfit_remove(struct acpi_device *adev) 2000{ 2001 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 2002 2003 nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 2004 return 0; 2005} 2006 2007static void acpi_nfit_notify(struct acpi_device *adev, u32 event) 2008{ 2009 struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); 2010 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; 2011 struct acpi_nfit_header *nfit_saved; 2012 union acpi_object *obj; 2013 struct device *dev = &adev->dev; 2014 acpi_status status; 2015 int ret; 2016 2017 dev_dbg(dev, "%s: event: %d\n", __func__, event); 2018 2019 device_lock(dev); 2020 if (!dev->driver) { 2021 /* dev->driver may be null if we're being removed */ 2022 dev_dbg(dev, "%s: no driver found for dev\n", __func__); 2023 goto out_unlock; 2024 } 2025 2026 if (!acpi_desc) { 2027 acpi_desc = acpi_nfit_desc_init(adev); 2028 if (IS_ERR(acpi_desc)) { 2029 dev_err(dev, "%s: error initializing acpi_desc: %ld\n", 2030 __func__, PTR_ERR(acpi_desc)); 2031 goto out_unlock; 2032 } 2033 } 2034 2035 /* Evaluate _FIT */ 2036 status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); 2037 if (ACPI_FAILURE(status)) { 2038 dev_err(dev, "failed to evaluate _FIT\n"); 2039 goto out_unlock; 2040 } 2041 2042 nfit_saved = acpi_desc->nfit; 2043 obj = buf.pointer; 2044 if (obj->type == ACPI_TYPE_BUFFER) { 2045 acpi_desc->nfit = 2046 (struct acpi_nfit_header *)obj->buffer.pointer; 2047 ret = acpi_nfit_init(acpi_desc, obj->buffer.length); 2048 if (ret) { 2049 /* Merge failed, restore old nfit, and exit */ 2050 acpi_desc->nfit = nfit_saved; 2051 dev_err(dev, "failed to merge updated NFIT\n"); 2052 } 2053 } else { 2054 /* Bad _FIT, restore old nfit */ 2055 dev_err(dev, "Invalid _FIT\n"); 2056 } 2057 kfree(buf.pointer); 2058 2059 out_unlock: 2060 device_unlock(dev); 2061} 2062 2063static const struct acpi_device_id acpi_nfit_ids[] = { 2064 { "ACPI0012", 0 }, 2065 { "", 0 }, 2066}; 2067MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); 2068 2069static struct acpi_driver acpi_nfit_driver = { 2070 .name = KBUILD_MODNAME, 2071 .ids = acpi_nfit_ids, 2072 .ops = { 2073 .add = acpi_nfit_add, 2074 .remove = acpi_nfit_remove, 2075 .notify = acpi_nfit_notify, 2076 }, 2077}; 2078 2079static __init int nfit_init(void) 2080{ 2081 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 2082 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 2083 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 2084 BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); 2085 BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); 2086 BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); 2087 BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); 2088 2089 acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); 2090 acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); 2091 acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); 2092 acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); 2093 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); 2094 acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); 2095 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); 2096 acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); 2097 acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); 2098 acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); 2099 2100 return acpi_bus_register_driver(&acpi_nfit_driver); 2101} 2102 2103static __exit void nfit_exit(void) 2104{ 2105 acpi_bus_unregister_driver(&acpi_nfit_driver); 2106} 2107 2108module_init(nfit_init); 2109module_exit(nfit_exit); 2110MODULE_LICENSE("GPL v2"); 2111MODULE_AUTHOR("Intel Corporation");