at v2.6.24 1036 lines 24 kB view raw
1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License as published by 4 * the Free Software Foundation; either version 2 of the License, or 5 * (at your option) any later version. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 15 * 16 * Copyright Pantelis Antoniou 2006 17 * Copyright (C) IBM Corporation 2006 18 * 19 * Authors: Pantelis Antoniou <pantelis@embeddedalley.com> 20 * Hollis Blanchard <hollisb@us.ibm.com> 21 * Mark A. Greer <mgreer@mvista.com> 22 * Paul Mackerras <paulus@samba.org> 23 */ 24 25#include <string.h> 26#include <stddef.h> 27#include "flatdevtree.h" 28#include "flatdevtree_env.h" 29 30#define _ALIGN(x, al) (((x) + (al) - 1) & ~((al) - 1)) 31 32static char *ft_root_node(struct ft_cxt *cxt) 33{ 34 return cxt->rgn[FT_STRUCT].start; 35} 36 37/* Routines for keeping node ptrs returned by ft_find_device current */ 38/* First entry not used b/c it would return 0 and be taken as NULL/error */ 39static void *ft_get_phandle(struct ft_cxt *cxt, char *node) 40{ 41 unsigned int i; 42 43 if (!node) 44 return NULL; 45 46 for (i = 1; i < cxt->nodes_used; i++) /* already there? */ 47 if (cxt->node_tbl[i] == node) 48 return (void *)i; 49 50 if (cxt->nodes_used < cxt->node_max) { 51 cxt->node_tbl[cxt->nodes_used] = node; 52 return (void *)cxt->nodes_used++; 53 } 54 55 return NULL; 56} 57 58static char *ft_node_ph2node(struct ft_cxt *cxt, const void *phandle) 59{ 60 unsigned int i = (unsigned int)phandle; 61 62 if (i < cxt->nodes_used) 63 return cxt->node_tbl[i]; 64 return NULL; 65} 66 67static void ft_node_update_before(struct ft_cxt *cxt, char *addr, int shift) 68{ 69 unsigned int i; 70 71 if (shift == 0) 72 return; 73 74 for (i = 1; i < cxt->nodes_used; i++) 75 if (cxt->node_tbl[i] < addr) 76 cxt->node_tbl[i] += shift; 77} 78 79static void ft_node_update_after(struct ft_cxt *cxt, char *addr, int shift) 80{ 81 unsigned int i; 82 83 if (shift == 0) 84 return; 85 86 for (i = 1; i < cxt->nodes_used; i++) 87 if (cxt->node_tbl[i] >= addr) 88 cxt->node_tbl[i] += shift; 89} 90 91/* Struct used to return info from ft_next() */ 92struct ft_atom { 93 u32 tag; 94 const char *name; 95 void *data; 96 u32 size; 97}; 98 99/* Set ptrs to current one's info; return addr of next one */ 100static char *ft_next(struct ft_cxt *cxt, char *p, struct ft_atom *ret) 101{ 102 u32 sz; 103 104 if (p >= cxt->rgn[FT_STRUCT].start + cxt->rgn[FT_STRUCT].size) 105 return NULL; 106 107 ret->tag = be32_to_cpu(*(u32 *) p); 108 p += 4; 109 110 switch (ret->tag) { /* Tag */ 111 case OF_DT_BEGIN_NODE: 112 ret->name = p; 113 ret->data = (void *)(p - 4); /* start of node */ 114 p += _ALIGN(strlen(p) + 1, 4); 115 break; 116 case OF_DT_PROP: 117 ret->size = sz = be32_to_cpu(*(u32 *) p); 118 ret->name = cxt->str_anchor + be32_to_cpu(*(u32 *) (p + 4)); 119 ret->data = (void *)(p + 8); 120 p += 8 + _ALIGN(sz, 4); 121 break; 122 case OF_DT_END_NODE: 123 case OF_DT_NOP: 124 break; 125 case OF_DT_END: 126 default: 127 p = NULL; 128 break; 129 } 130 131 return p; 132} 133 134#define HDR_SIZE _ALIGN(sizeof(struct boot_param_header), 8) 135#define EXPAND_INCR 1024 /* alloc this much extra when expanding */ 136 137/* Copy the tree to a newly-allocated region and put things in order */ 138static int ft_reorder(struct ft_cxt *cxt, int nextra) 139{ 140 unsigned long tot; 141 enum ft_rgn_id r; 142 char *p, *pend; 143 int stroff; 144 145 tot = HDR_SIZE + EXPAND_INCR; 146 for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) 147 tot += cxt->rgn[r].size; 148 if (nextra > 0) 149 tot += nextra; 150 tot = _ALIGN(tot, 8); 151 152 if (!cxt->realloc) 153 return 0; 154 p = cxt->realloc(NULL, tot); 155 if (!p) 156 return 0; 157 158 memcpy(p, cxt->bph, sizeof(struct boot_param_header)); 159 /* offsets get fixed up later */ 160 161 cxt->bph = (struct boot_param_header *)p; 162 cxt->max_size = tot; 163 pend = p + tot; 164 p += HDR_SIZE; 165 166 memcpy(p, cxt->rgn[FT_RSVMAP].start, cxt->rgn[FT_RSVMAP].size); 167 cxt->rgn[FT_RSVMAP].start = p; 168 p += cxt->rgn[FT_RSVMAP].size; 169 170 memcpy(p, cxt->rgn[FT_STRUCT].start, cxt->rgn[FT_STRUCT].size); 171 ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, 172 p - cxt->rgn[FT_STRUCT].start); 173 cxt->p += p - cxt->rgn[FT_STRUCT].start; 174 cxt->rgn[FT_STRUCT].start = p; 175 176 p = pend - cxt->rgn[FT_STRINGS].size; 177 memcpy(p, cxt->rgn[FT_STRINGS].start, cxt->rgn[FT_STRINGS].size); 178 stroff = cxt->str_anchor - cxt->rgn[FT_STRINGS].start; 179 cxt->rgn[FT_STRINGS].start = p; 180 cxt->str_anchor = p + stroff; 181 182 cxt->isordered = 1; 183 return 1; 184} 185 186static inline char *prev_end(struct ft_cxt *cxt, enum ft_rgn_id r) 187{ 188 if (r > FT_RSVMAP) 189 return cxt->rgn[r - 1].start + cxt->rgn[r - 1].size; 190 return (char *)cxt->bph + HDR_SIZE; 191} 192 193static inline char *next_start(struct ft_cxt *cxt, enum ft_rgn_id r) 194{ 195 if (r < FT_STRINGS) 196 return cxt->rgn[r + 1].start; 197 return (char *)cxt->bph + cxt->max_size; 198} 199 200/* 201 * See if we can expand region rgn by nextra bytes by using up 202 * free space after or before the region. 203 */ 204static int ft_shuffle(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn, 205 int nextra) 206{ 207 char *p = *pp; 208 char *rgn_start, *rgn_end; 209 210 rgn_start = cxt->rgn[rgn].start; 211 rgn_end = rgn_start + cxt->rgn[rgn].size; 212 if (nextra <= 0 || rgn_end + nextra <= next_start(cxt, rgn)) { 213 /* move following stuff */ 214 if (p < rgn_end) { 215 if (nextra < 0) 216 memmove(p, p - nextra, rgn_end - p + nextra); 217 else 218 memmove(p + nextra, p, rgn_end - p); 219 if (rgn == FT_STRUCT) 220 ft_node_update_after(cxt, p, nextra); 221 } 222 cxt->rgn[rgn].size += nextra; 223 if (rgn == FT_STRINGS) 224 /* assumes strings only added at beginning */ 225 cxt->str_anchor += nextra; 226 return 1; 227 } 228 if (prev_end(cxt, rgn) <= rgn_start - nextra) { 229 /* move preceding stuff */ 230 if (p > rgn_start) { 231 memmove(rgn_start - nextra, rgn_start, p - rgn_start); 232 if (rgn == FT_STRUCT) 233 ft_node_update_before(cxt, p, -nextra); 234 } 235 *pp -= nextra; 236 cxt->rgn[rgn].start -= nextra; 237 cxt->rgn[rgn].size += nextra; 238 return 1; 239 } 240 return 0; 241} 242 243static int ft_make_space(struct ft_cxt *cxt, char **pp, enum ft_rgn_id rgn, 244 int nextra) 245{ 246 unsigned long size, ssize, tot; 247 char *str, *next; 248 enum ft_rgn_id r; 249 250 if (!cxt->isordered) { 251 unsigned long rgn_off = *pp - cxt->rgn[rgn].start; 252 253 if (!ft_reorder(cxt, nextra)) 254 return 0; 255 256 *pp = cxt->rgn[rgn].start + rgn_off; 257 } 258 if (ft_shuffle(cxt, pp, rgn, nextra)) 259 return 1; 260 261 /* See if there is space after the strings section */ 262 ssize = cxt->rgn[FT_STRINGS].size; 263 if (cxt->rgn[FT_STRINGS].start + ssize 264 < (char *)cxt->bph + cxt->max_size) { 265 /* move strings up as far as possible */ 266 str = (char *)cxt->bph + cxt->max_size - ssize; 267 cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start; 268 memmove(str, cxt->rgn[FT_STRINGS].start, ssize); 269 cxt->rgn[FT_STRINGS].start = str; 270 /* enough space now? */ 271 if (rgn >= FT_STRUCT && ft_shuffle(cxt, pp, rgn, nextra)) 272 return 1; 273 } 274 275 /* how much total free space is there following this region? */ 276 tot = 0; 277 for (r = rgn; r < FT_STRINGS; ++r) { 278 char *r_end = cxt->rgn[r].start + cxt->rgn[r].size; 279 tot += next_start(cxt, rgn) - r_end; 280 } 281 282 /* cast is to shut gcc up; we know nextra >= 0 */ 283 if (tot < (unsigned int)nextra) { 284 /* have to reallocate */ 285 char *newp, *new_start; 286 int shift; 287 288 if (!cxt->realloc) 289 return 0; 290 size = _ALIGN(cxt->max_size + (nextra - tot) + EXPAND_INCR, 8); 291 newp = cxt->realloc(cxt->bph, size); 292 if (!newp) 293 return 0; 294 cxt->max_size = size; 295 shift = newp - (char *)cxt->bph; 296 297 if (shift) { /* realloc can return same addr */ 298 cxt->bph = (struct boot_param_header *)newp; 299 ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, 300 shift); 301 for (r = FT_RSVMAP; r <= FT_STRINGS; ++r) { 302 new_start = cxt->rgn[r].start + shift; 303 cxt->rgn[r].start = new_start; 304 } 305 *pp += shift; 306 cxt->str_anchor += shift; 307 } 308 309 /* move strings up to the end */ 310 str = newp + size - ssize; 311 cxt->str_anchor += str - cxt->rgn[FT_STRINGS].start; 312 memmove(str, cxt->rgn[FT_STRINGS].start, ssize); 313 cxt->rgn[FT_STRINGS].start = str; 314 315 if (ft_shuffle(cxt, pp, rgn, nextra)) 316 return 1; 317 } 318 319 /* must be FT_RSVMAP and we need to move FT_STRUCT up */ 320 if (rgn == FT_RSVMAP) { 321 next = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size 322 + nextra; 323 ssize = cxt->rgn[FT_STRUCT].size; 324 if (next + ssize >= cxt->rgn[FT_STRINGS].start) 325 return 0; /* "can't happen" */ 326 memmove(next, cxt->rgn[FT_STRUCT].start, ssize); 327 ft_node_update_after(cxt, cxt->rgn[FT_STRUCT].start, nextra); 328 cxt->rgn[FT_STRUCT].start = next; 329 330 if (ft_shuffle(cxt, pp, rgn, nextra)) 331 return 1; 332 } 333 334 return 0; /* "can't happen" */ 335} 336 337static void ft_put_word(struct ft_cxt *cxt, u32 v) 338{ 339 *(u32 *) cxt->p = cpu_to_be32(v); 340 cxt->p += 4; 341} 342 343static void ft_put_bin(struct ft_cxt *cxt, const void *data, unsigned int sz) 344{ 345 unsigned long sza = _ALIGN(sz, 4); 346 347 /* zero out the alignment gap if necessary */ 348 if (sz < sza) 349 *(u32 *) (cxt->p + sza - 4) = 0; 350 351 /* copy in the data */ 352 memcpy(cxt->p, data, sz); 353 354 cxt->p += sza; 355} 356 357char *ft_begin_node(struct ft_cxt *cxt, const char *name) 358{ 359 unsigned long nlen = strlen(name) + 1; 360 unsigned long len = 8 + _ALIGN(nlen, 4); 361 char *ret; 362 363 if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len)) 364 return NULL; 365 366 ret = cxt->p; 367 368 ft_put_word(cxt, OF_DT_BEGIN_NODE); 369 ft_put_bin(cxt, name, strlen(name) + 1); 370 371 return ret; 372} 373 374void ft_end_node(struct ft_cxt *cxt) 375{ 376 ft_put_word(cxt, OF_DT_END_NODE); 377} 378 379void ft_nop(struct ft_cxt *cxt) 380{ 381 if (ft_make_space(cxt, &cxt->p, FT_STRUCT, 4)) 382 ft_put_word(cxt, OF_DT_NOP); 383} 384 385#define NO_STRING 0x7fffffff 386 387static int lookup_string(struct ft_cxt *cxt, const char *name) 388{ 389 char *p, *end; 390 391 p = cxt->rgn[FT_STRINGS].start; 392 end = p + cxt->rgn[FT_STRINGS].size; 393 while (p < end) { 394 if (strcmp(p, (char *)name) == 0) 395 return p - cxt->str_anchor; 396 p += strlen(p) + 1; 397 } 398 399 return NO_STRING; 400} 401 402/* lookup string and insert if not found */ 403static int map_string(struct ft_cxt *cxt, const char *name) 404{ 405 int off; 406 char *p; 407 408 off = lookup_string(cxt, name); 409 if (off != NO_STRING) 410 return off; 411 p = cxt->rgn[FT_STRINGS].start; 412 if (!ft_make_space(cxt, &p, FT_STRINGS, strlen(name) + 1)) 413 return NO_STRING; 414 strcpy(p, name); 415 return p - cxt->str_anchor; 416} 417 418int ft_prop(struct ft_cxt *cxt, const char *name, const void *data, 419 unsigned int sz) 420{ 421 int off, len; 422 423 off = map_string(cxt, name); 424 if (off == NO_STRING) 425 return -1; 426 427 len = 12 + _ALIGN(sz, 4); 428 if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, len)) 429 return -1; 430 431 ft_put_word(cxt, OF_DT_PROP); 432 ft_put_word(cxt, sz); 433 ft_put_word(cxt, off); 434 ft_put_bin(cxt, data, sz); 435 return 0; 436} 437 438int ft_prop_str(struct ft_cxt *cxt, const char *name, const char *str) 439{ 440 return ft_prop(cxt, name, str, strlen(str) + 1); 441} 442 443int ft_prop_int(struct ft_cxt *cxt, const char *name, unsigned int val) 444{ 445 u32 v = cpu_to_be32((u32) val); 446 447 return ft_prop(cxt, name, &v, 4); 448} 449 450/* Calculate the size of the reserved map */ 451static unsigned long rsvmap_size(struct ft_cxt *cxt) 452{ 453 struct ft_reserve *res; 454 455 res = (struct ft_reserve *)cxt->rgn[FT_RSVMAP].start; 456 while (res->start || res->len) 457 ++res; 458 return (char *)(res + 1) - cxt->rgn[FT_RSVMAP].start; 459} 460 461/* Calculate the size of the struct region by stepping through it */ 462static unsigned long struct_size(struct ft_cxt *cxt) 463{ 464 char *p = cxt->rgn[FT_STRUCT].start; 465 char *next; 466 struct ft_atom atom; 467 468 /* make check in ft_next happy */ 469 if (cxt->rgn[FT_STRUCT].size == 0) 470 cxt->rgn[FT_STRUCT].size = 0xfffffffful - (unsigned long)p; 471 472 while ((next = ft_next(cxt, p, &atom)) != NULL) 473 p = next; 474 return p + 4 - cxt->rgn[FT_STRUCT].start; 475} 476 477/* add `adj' on to all string offset values in the struct area */ 478static void adjust_string_offsets(struct ft_cxt *cxt, int adj) 479{ 480 char *p = cxt->rgn[FT_STRUCT].start; 481 char *next; 482 struct ft_atom atom; 483 int off; 484 485 while ((next = ft_next(cxt, p, &atom)) != NULL) { 486 if (atom.tag == OF_DT_PROP) { 487 off = be32_to_cpu(*(u32 *) (p + 8)); 488 *(u32 *) (p + 8) = cpu_to_be32(off + adj); 489 } 490 p = next; 491 } 492} 493 494/* start construction of the flat OF tree from scratch */ 495void ft_begin(struct ft_cxt *cxt, void *blob, unsigned int max_size, 496 void *(*realloc_fn) (void *, unsigned long)) 497{ 498 struct boot_param_header *bph = blob; 499 char *p; 500 struct ft_reserve *pres; 501 502 /* clear the cxt */ 503 memset(cxt, 0, sizeof(*cxt)); 504 505 cxt->bph = bph; 506 cxt->max_size = max_size; 507 cxt->realloc = realloc_fn; 508 cxt->isordered = 1; 509 510 /* zero everything in the header area */ 511 memset(bph, 0, sizeof(*bph)); 512 513 bph->magic = cpu_to_be32(OF_DT_HEADER); 514 bph->version = cpu_to_be32(0x10); 515 bph->last_comp_version = cpu_to_be32(0x10); 516 517 /* start pointers */ 518 cxt->rgn[FT_RSVMAP].start = p = blob + HDR_SIZE; 519 cxt->rgn[FT_RSVMAP].size = sizeof(struct ft_reserve); 520 pres = (struct ft_reserve *)p; 521 cxt->rgn[FT_STRUCT].start = p += sizeof(struct ft_reserve); 522 cxt->rgn[FT_STRUCT].size = 4; 523 cxt->rgn[FT_STRINGS].start = blob + max_size; 524 cxt->rgn[FT_STRINGS].size = 0; 525 526 /* init rsvmap and struct */ 527 pres->start = 0; 528 pres->len = 0; 529 *(u32 *) p = cpu_to_be32(OF_DT_END); 530 531 cxt->str_anchor = blob; 532} 533 534/* open up an existing blob to be examined or modified */ 535int ft_open(struct ft_cxt *cxt, void *blob, unsigned int max_size, 536 unsigned int max_find_device, 537 void *(*realloc_fn) (void *, unsigned long)) 538{ 539 struct boot_param_header *bph = blob; 540 541 /* can't cope with version < 16 */ 542 if (be32_to_cpu(bph->version) < 16) 543 return -1; 544 545 /* clear the cxt */ 546 memset(cxt, 0, sizeof(*cxt)); 547 548 /* alloc node_tbl to track node ptrs returned by ft_find_device */ 549 ++max_find_device; 550 cxt->node_tbl = realloc_fn(NULL, max_find_device * sizeof(char *)); 551 if (!cxt->node_tbl) 552 return -1; 553 memset(cxt->node_tbl, 0, max_find_device * sizeof(char *)); 554 cxt->node_max = max_find_device; 555 cxt->nodes_used = 1; /* don't use idx 0 b/c looks like NULL */ 556 557 cxt->bph = bph; 558 cxt->max_size = max_size; 559 cxt->realloc = realloc_fn; 560 561 cxt->rgn[FT_RSVMAP].start = blob + be32_to_cpu(bph->off_mem_rsvmap); 562 cxt->rgn[FT_RSVMAP].size = rsvmap_size(cxt); 563 cxt->rgn[FT_STRUCT].start = blob + be32_to_cpu(bph->off_dt_struct); 564 cxt->rgn[FT_STRUCT].size = struct_size(cxt); 565 cxt->rgn[FT_STRINGS].start = blob + be32_to_cpu(bph->off_dt_strings); 566 cxt->rgn[FT_STRINGS].size = be32_to_cpu(bph->dt_strings_size); 567 568 cxt->p = cxt->rgn[FT_STRUCT].start; 569 cxt->str_anchor = cxt->rgn[FT_STRINGS].start; 570 571 return 0; 572} 573 574/* add a reserver physical area to the rsvmap */ 575int ft_add_rsvmap(struct ft_cxt *cxt, u64 physaddr, u64 size) 576{ 577 char *p; 578 struct ft_reserve *pres; 579 580 p = cxt->rgn[FT_RSVMAP].start + cxt->rgn[FT_RSVMAP].size 581 - sizeof(struct ft_reserve); 582 if (!ft_make_space(cxt, &p, FT_RSVMAP, sizeof(struct ft_reserve))) 583 return -1; 584 585 pres = (struct ft_reserve *)p; 586 pres->start = cpu_to_be64(physaddr); 587 pres->len = cpu_to_be64(size); 588 589 return 0; 590} 591 592void ft_begin_tree(struct ft_cxt *cxt) 593{ 594 cxt->p = ft_root_node(cxt); 595} 596 597void ft_end_tree(struct ft_cxt *cxt) 598{ 599 struct boot_param_header *bph = cxt->bph; 600 char *p, *oldstr, *str, *endp; 601 unsigned long ssize; 602 int adj; 603 604 if (!cxt->isordered) 605 return; /* we haven't touched anything */ 606 607 /* adjust string offsets */ 608 oldstr = cxt->rgn[FT_STRINGS].start; 609 adj = cxt->str_anchor - oldstr; 610 if (adj) 611 adjust_string_offsets(cxt, adj); 612 613 /* make strings end on 8-byte boundary */ 614 ssize = cxt->rgn[FT_STRINGS].size; 615 endp = (char *)_ALIGN((unsigned long)cxt->rgn[FT_STRUCT].start 616 + cxt->rgn[FT_STRUCT].size + ssize, 8); 617 str = endp - ssize; 618 619 /* move strings down to end of structs */ 620 memmove(str, oldstr, ssize); 621 cxt->str_anchor = str; 622 cxt->rgn[FT_STRINGS].start = str; 623 624 /* fill in header fields */ 625 p = (char *)bph; 626 bph->totalsize = cpu_to_be32(endp - p); 627 bph->off_mem_rsvmap = cpu_to_be32(cxt->rgn[FT_RSVMAP].start - p); 628 bph->off_dt_struct = cpu_to_be32(cxt->rgn[FT_STRUCT].start - p); 629 bph->off_dt_strings = cpu_to_be32(cxt->rgn[FT_STRINGS].start - p); 630 bph->dt_strings_size = cpu_to_be32(ssize); 631} 632 633void *ft_find_device(struct ft_cxt *cxt, const void *top, const char *srch_path) 634{ 635 char *node; 636 637 if (top) { 638 node = ft_node_ph2node(cxt, top); 639 if (node == NULL) 640 return NULL; 641 } else { 642 node = ft_root_node(cxt); 643 } 644 645 node = ft_find_descendent(cxt, node, srch_path); 646 return ft_get_phandle(cxt, node); 647} 648 649void *ft_find_descendent(struct ft_cxt *cxt, void *top, const char *srch_path) 650{ 651 struct ft_atom atom; 652 char *p; 653 const char *cp, *q; 654 int cl; 655 int depth = -1; 656 int dmatch = 0; 657 const char *path_comp[FT_MAX_DEPTH]; 658 659 cp = srch_path; 660 cl = 0; 661 p = top; 662 663 while ((p = ft_next(cxt, p, &atom)) != NULL) { 664 switch (atom.tag) { 665 case OF_DT_BEGIN_NODE: 666 ++depth; 667 if (depth != dmatch) 668 break; 669 cxt->genealogy[depth] = atom.data; 670 cxt->genealogy[depth + 1] = NULL; 671 if (depth && !(strncmp(atom.name, cp, cl) == 0 672 && (atom.name[cl] == '/' 673 || atom.name[cl] == '\0' 674 || atom.name[cl] == '@'))) 675 break; 676 path_comp[dmatch] = cp; 677 /* it matches so far, advance to next path component */ 678 cp += cl; 679 /* skip slashes */ 680 while (*cp == '/') 681 ++cp; 682 /* we're done if this is the end of the string */ 683 if (*cp == 0) 684 return atom.data; 685 /* look for end of this component */ 686 q = strchr(cp, '/'); 687 if (q) 688 cl = q - cp; 689 else 690 cl = strlen(cp); 691 ++dmatch; 692 break; 693 case OF_DT_END_NODE: 694 if (depth == 0) 695 return NULL; 696 if (dmatch > depth) { 697 --dmatch; 698 cl = cp - path_comp[dmatch] - 1; 699 cp = path_comp[dmatch]; 700 while (cl > 0 && cp[cl - 1] == '/') 701 --cl; 702 } 703 --depth; 704 break; 705 } 706 } 707 return NULL; 708} 709 710void *__ft_get_parent(struct ft_cxt *cxt, void *node) 711{ 712 int d; 713 struct ft_atom atom; 714 char *p; 715 716 for (d = 0; cxt->genealogy[d] != NULL; ++d) 717 if (cxt->genealogy[d] == node) 718 return d > 0 ? cxt->genealogy[d - 1] : NULL; 719 720 /* have to do it the hard way... */ 721 p = ft_root_node(cxt); 722 d = 0; 723 while ((p = ft_next(cxt, p, &atom)) != NULL) { 724 switch (atom.tag) { 725 case OF_DT_BEGIN_NODE: 726 cxt->genealogy[d] = atom.data; 727 if (node == atom.data) { 728 /* found it */ 729 cxt->genealogy[d + 1] = NULL; 730 return d > 0 ? cxt->genealogy[d - 1] : NULL; 731 } 732 ++d; 733 break; 734 case OF_DT_END_NODE: 735 --d; 736 break; 737 } 738 } 739 return NULL; 740} 741 742void *ft_get_parent(struct ft_cxt *cxt, const void *phandle) 743{ 744 void *node = ft_node_ph2node(cxt, phandle); 745 if (node == NULL) 746 return NULL; 747 748 node = __ft_get_parent(cxt, node); 749 return ft_get_phandle(cxt, node); 750} 751 752static const void *__ft_get_prop(struct ft_cxt *cxt, void *node, 753 const char *propname, unsigned int *len) 754{ 755 struct ft_atom atom; 756 int depth = 0; 757 758 while ((node = ft_next(cxt, node, &atom)) != NULL) { 759 switch (atom.tag) { 760 case OF_DT_BEGIN_NODE: 761 ++depth; 762 break; 763 764 case OF_DT_PROP: 765 if (depth != 1 || strcmp(atom.name, propname)) 766 break; 767 768 if (len) 769 *len = atom.size; 770 771 return atom.data; 772 773 case OF_DT_END_NODE: 774 if (--depth <= 0) 775 return NULL; 776 } 777 } 778 779 return NULL; 780} 781 782int ft_get_prop(struct ft_cxt *cxt, const void *phandle, const char *propname, 783 void *buf, const unsigned int buflen) 784{ 785 const void *data; 786 unsigned int size; 787 788 void *node = ft_node_ph2node(cxt, phandle); 789 if (!node) 790 return -1; 791 792 data = __ft_get_prop(cxt, node, propname, &size); 793 if (data) { 794 unsigned int clipped_size = min(size, buflen); 795 memcpy(buf, data, clipped_size); 796 return size; 797 } 798 799 return -1; 800} 801 802void *__ft_find_node_by_prop_value(struct ft_cxt *cxt, void *prev, 803 const char *propname, const char *propval, 804 unsigned int proplen) 805{ 806 struct ft_atom atom; 807 char *p = ft_root_node(cxt); 808 char *next; 809 int past_prev = prev ? 0 : 1; 810 int depth = -1; 811 812 while ((next = ft_next(cxt, p, &atom)) != NULL) { 813 const void *data; 814 unsigned int size; 815 816 switch (atom.tag) { 817 case OF_DT_BEGIN_NODE: 818 depth++; 819 820 if (prev == p) { 821 past_prev = 1; 822 break; 823 } 824 825 if (!past_prev || depth < 1) 826 break; 827 828 data = __ft_get_prop(cxt, p, propname, &size); 829 if (!data || size != proplen) 830 break; 831 if (memcmp(data, propval, size)) 832 break; 833 834 return p; 835 836 case OF_DT_END_NODE: 837 if (depth-- == 0) 838 return NULL; 839 840 break; 841 } 842 843 p = next; 844 } 845 846 return NULL; 847} 848 849void *ft_find_node_by_prop_value(struct ft_cxt *cxt, const void *prev, 850 const char *propname, const char *propval, 851 int proplen) 852{ 853 void *node = NULL; 854 855 if (prev) { 856 node = ft_node_ph2node(cxt, prev); 857 858 if (!node) 859 return NULL; 860 } 861 862 node = __ft_find_node_by_prop_value(cxt, node, propname, 863 propval, proplen); 864 return ft_get_phandle(cxt, node); 865} 866 867int ft_set_prop(struct ft_cxt *cxt, const void *phandle, const char *propname, 868 const void *buf, const unsigned int buflen) 869{ 870 struct ft_atom atom; 871 void *node; 872 char *p, *next; 873 int nextra; 874 875 node = ft_node_ph2node(cxt, phandle); 876 if (node == NULL) 877 return -1; 878 879 next = ft_next(cxt, node, &atom); 880 if (atom.tag != OF_DT_BEGIN_NODE) 881 /* phandle didn't point to a node */ 882 return -1; 883 p = next; 884 885 while ((next = ft_next(cxt, p, &atom)) != NULL) { 886 switch (atom.tag) { 887 case OF_DT_BEGIN_NODE: /* properties must go before subnodes */ 888 case OF_DT_END_NODE: 889 /* haven't found the property, insert here */ 890 cxt->p = p; 891 return ft_prop(cxt, propname, buf, buflen); 892 case OF_DT_PROP: 893 if (strcmp(atom.name, propname)) 894 break; 895 /* found an existing property, overwrite it */ 896 nextra = _ALIGN(buflen, 4) - _ALIGN(atom.size, 4); 897 cxt->p = atom.data; 898 if (nextra && !ft_make_space(cxt, &cxt->p, FT_STRUCT, 899 nextra)) 900 return -1; 901 *(u32 *) (cxt->p - 8) = cpu_to_be32(buflen); 902 ft_put_bin(cxt, buf, buflen); 903 return 0; 904 } 905 p = next; 906 } 907 return -1; 908} 909 910int ft_del_prop(struct ft_cxt *cxt, const void *phandle, const char *propname) 911{ 912 struct ft_atom atom; 913 void *node; 914 char *p, *next; 915 int size; 916 917 node = ft_node_ph2node(cxt, phandle); 918 if (node == NULL) 919 return -1; 920 921 p = node; 922 while ((next = ft_next(cxt, p, &atom)) != NULL) { 923 switch (atom.tag) { 924 case OF_DT_BEGIN_NODE: 925 case OF_DT_END_NODE: 926 return -1; 927 case OF_DT_PROP: 928 if (strcmp(atom.name, propname)) 929 break; 930 /* found the property, remove it */ 931 size = 12 + -_ALIGN(atom.size, 4); 932 cxt->p = p; 933 if (!ft_make_space(cxt, &cxt->p, FT_STRUCT, -size)) 934 return -1; 935 return 0; 936 } 937 p = next; 938 } 939 return -1; 940} 941 942void *ft_create_node(struct ft_cxt *cxt, const void *parent, const char *name) 943{ 944 struct ft_atom atom; 945 char *p, *next, *ret; 946 int depth = 0; 947 948 if (parent) { 949 p = ft_node_ph2node(cxt, parent); 950 if (!p) 951 return NULL; 952 } else { 953 p = ft_root_node(cxt); 954 } 955 956 while ((next = ft_next(cxt, p, &atom)) != NULL) { 957 switch (atom.tag) { 958 case OF_DT_BEGIN_NODE: 959 ++depth; 960 if (depth == 1 && strcmp(atom.name, name) == 0) 961 /* duplicate node name, return error */ 962 return NULL; 963 break; 964 case OF_DT_END_NODE: 965 --depth; 966 if (depth > 0) 967 break; 968 /* end of node, insert here */ 969 cxt->p = p; 970 ret = ft_begin_node(cxt, name); 971 ft_end_node(cxt); 972 return ft_get_phandle(cxt, ret); 973 } 974 p = next; 975 } 976 return NULL; 977} 978 979/* Returns the start of the path within the provided buffer, or NULL on 980 * error. 981 */ 982char *ft_get_path(struct ft_cxt *cxt, const void *phandle, 983 char *buf, int len) 984{ 985 const char *path_comp[FT_MAX_DEPTH]; 986 struct ft_atom atom; 987 char *p, *next, *pos; 988 int depth = 0, i; 989 void *node; 990 991 node = ft_node_ph2node(cxt, phandle); 992 if (node == NULL) 993 return NULL; 994 995 p = ft_root_node(cxt); 996 997 while ((next = ft_next(cxt, p, &atom)) != NULL) { 998 switch (atom.tag) { 999 case OF_DT_BEGIN_NODE: 1000 path_comp[depth++] = atom.name; 1001 if (p == node) 1002 goto found; 1003 1004 break; 1005 1006 case OF_DT_END_NODE: 1007 if (--depth == 0) 1008 return NULL; 1009 } 1010 1011 p = next; 1012 } 1013 1014found: 1015 pos = buf; 1016 for (i = 1; i < depth; i++) { 1017 int this_len; 1018 1019 if (len <= 1) 1020 return NULL; 1021 1022 *pos++ = '/'; 1023 len--; 1024 1025 strncpy(pos, path_comp[i], len); 1026 1027 if (pos[len - 1] != 0) 1028 return NULL; 1029 1030 this_len = strlen(pos); 1031 len -= this_len; 1032 pos += this_len; 1033 } 1034 1035 return buf; 1036}