]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/boot/flatdevtree.c
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 * Copyright Pantelis Antoniou 2006
17 * Copyright (C) IBM Corporation 2006
19 * Authors: Pantelis Antoniou <pantelis@embeddedalley.com>
20 * Hollis Blanchard <hollisb@us.ibm.com>
21 * Mark A. Greer <mgreer@mvista.com>
22 * Paul Mackerras <paulus@samba.org>
27 #include "flatdevtree.h"
28 #include "flatdevtree_env.h"
30 #define _ALIGN(x, al) (((x) + (al) - 1) & ~((al) - 1))
32 static char *ft_root_node(struct ft_cxt
*cxt
)
34 return cxt
->rgn
[FT_STRUCT
].start
;
37 /* Routines for keeping node ptrs returned by ft_find_device current */
38 /* First entry not used b/c it would return 0 and be taken as NULL/error */
39 static void *ft_get_phandle(struct ft_cxt
*cxt
, char *node
)
46 for (i
= 1; i
< cxt
->nodes_used
; i
++) /* already there? */
47 if (cxt
->node_tbl
[i
] == node
)
50 if (cxt
->nodes_used
< cxt
->node_max
) {
51 cxt
->node_tbl
[cxt
->nodes_used
] = node
;
52 return (void *)cxt
->nodes_used
++;
58 static char *ft_node_ph2node(struct ft_cxt
*cxt
, const void *phandle
)
60 unsigned int i
= (unsigned int)phandle
;
62 if (i
< cxt
->nodes_used
)
63 return cxt
->node_tbl
[i
];
67 static void ft_node_update_before(struct ft_cxt
*cxt
, char *addr
, int shift
)
74 for (i
= 1; i
< cxt
->nodes_used
; i
++)
75 if (cxt
->node_tbl
[i
] < addr
)
76 cxt
->node_tbl
[i
] += shift
;
79 static void ft_node_update_after(struct ft_cxt
*cxt
, char *addr
, int shift
)
86 for (i
= 1; i
< cxt
->nodes_used
; i
++)
87 if (cxt
->node_tbl
[i
] >= addr
)
88 cxt
->node_tbl
[i
] += shift
;
91 /* Struct used to return info from ft_next() */
99 /* Set ptrs to current one's info; return addr of next one */
100 static char *ft_next(struct ft_cxt
*cxt
, char *p
, struct ft_atom
*ret
)
104 if (p
>= cxt
->rgn
[FT_STRUCT
].start
+ cxt
->rgn
[FT_STRUCT
].size
)
107 ret
->tag
= be32_to_cpu(*(u32
*) p
);
110 switch (ret
->tag
) { /* Tag */
111 case OF_DT_BEGIN_NODE
:
113 ret
->data
= (void *)(p
- 4); /* start of node */
114 p
+= _ALIGN(strlen(p
) + 1, 4);
117 ret
->size
= sz
= be32_to_cpu(*(u32
*) p
);
118 ret
->name
= cxt
->str_anchor
+ be32_to_cpu(*(u32
*) (p
+ 4));
119 ret
->data
= (void *)(p
+ 8);
120 p
+= 8 + _ALIGN(sz
, 4);
134 #define HDR_SIZE _ALIGN(sizeof(struct boot_param_header), 8)
135 #define EXPAND_INCR 1024 /* alloc this much extra when expanding */
137 /* Copy the tree to a newly-allocated region and put things in order */
138 static int ft_reorder(struct ft_cxt
*cxt
, int nextra
)
145 tot
= HDR_SIZE
+ EXPAND_INCR
;
146 for (r
= FT_RSVMAP
; r
<= FT_STRINGS
; ++r
)
147 tot
+= cxt
->rgn
[r
].size
;
150 tot
= _ALIGN(tot
, 8);
154 p
= cxt
->realloc(NULL
, tot
);
158 memcpy(p
, cxt
->bph
, sizeof(struct boot_param_header
));
159 /* offsets get fixed up later */
161 cxt
->bph
= (struct boot_param_header
*)p
;
166 memcpy(p
, cxt
->rgn
[FT_RSVMAP
].start
, cxt
->rgn
[FT_RSVMAP
].size
);
167 cxt
->rgn
[FT_RSVMAP
].start
= p
;
168 p
+= cxt
->rgn
[FT_RSVMAP
].size
;
170 memcpy(p
, cxt
->rgn
[FT_STRUCT
].start
, cxt
->rgn
[FT_STRUCT
].size
);
171 ft_node_update_after(cxt
, cxt
->rgn
[FT_STRUCT
].start
,
172 p
- cxt
->rgn
[FT_STRUCT
].start
);
173 cxt
->p
+= p
- cxt
->rgn
[FT_STRUCT
].start
;
174 cxt
->rgn
[FT_STRUCT
].start
= p
;
176 p
= pend
- cxt
->rgn
[FT_STRINGS
].size
;
177 memcpy(p
, cxt
->rgn
[FT_STRINGS
].start
, cxt
->rgn
[FT_STRINGS
].size
);
178 stroff
= cxt
->str_anchor
- cxt
->rgn
[FT_STRINGS
].start
;
179 cxt
->rgn
[FT_STRINGS
].start
= p
;
180 cxt
->str_anchor
= p
+ stroff
;
186 static inline char *prev_end(struct ft_cxt
*cxt
, enum ft_rgn_id r
)
189 return cxt
->rgn
[r
- 1].start
+ cxt
->rgn
[r
- 1].size
;
190 return (char *)cxt
->bph
+ HDR_SIZE
;
193 static inline char *next_start(struct ft_cxt
*cxt
, enum ft_rgn_id r
)
196 return cxt
->rgn
[r
+ 1].start
;
197 return (char *)cxt
->bph
+ cxt
->max_size
;
201 * See if we can expand region rgn by nextra bytes by using up
202 * free space after or before the region.
204 static int ft_shuffle(struct ft_cxt
*cxt
, char **pp
, enum ft_rgn_id rgn
,
208 char *rgn_start
, *rgn_end
;
210 rgn_start
= cxt
->rgn
[rgn
].start
;
211 rgn_end
= rgn_start
+ cxt
->rgn
[rgn
].size
;
212 if (nextra
<= 0 || rgn_end
+ nextra
<= next_start(cxt
, rgn
)) {
213 /* move following stuff */
216 memmove(p
, p
- nextra
, rgn_end
- p
+ nextra
);
218 memmove(p
+ nextra
, p
, rgn_end
- p
);
219 if (rgn
== FT_STRUCT
)
220 ft_node_update_after(cxt
, p
, nextra
);
222 cxt
->rgn
[rgn
].size
+= nextra
;
223 if (rgn
== FT_STRINGS
)
224 /* assumes strings only added at beginning */
225 cxt
->str_anchor
+= nextra
;
228 if (prev_end(cxt
, rgn
) <= rgn_start
- nextra
) {
229 /* move preceding stuff */
231 memmove(rgn_start
- nextra
, rgn_start
, p
- rgn_start
);
232 if (rgn
== FT_STRUCT
)
233 ft_node_update_before(cxt
, p
, -nextra
);
236 cxt
->rgn
[rgn
].start
-= nextra
;
237 cxt
->rgn
[rgn
].size
+= nextra
;
243 static int ft_make_space(struct ft_cxt
*cxt
, char **pp
, enum ft_rgn_id rgn
,
246 unsigned long size
, ssize
, tot
;
250 if (!cxt
->isordered
) {
251 unsigned long rgn_off
= *pp
- cxt
->rgn
[rgn
].start
;
253 if (!ft_reorder(cxt
, nextra
))
256 *pp
= cxt
->rgn
[rgn
].start
+ rgn_off
;
258 if (ft_shuffle(cxt
, pp
, rgn
, nextra
))
261 /* See if there is space after the strings section */
262 ssize
= cxt
->rgn
[FT_STRINGS
].size
;
263 if (cxt
->rgn
[FT_STRINGS
].start
+ ssize
264 < (char *)cxt
->bph
+ cxt
->max_size
) {
265 /* move strings up as far as possible */
266 str
= (char *)cxt
->bph
+ cxt
->max_size
- ssize
;
267 cxt
->str_anchor
+= str
- cxt
->rgn
[FT_STRINGS
].start
;
268 memmove(str
, cxt
->rgn
[FT_STRINGS
].start
, ssize
);
269 cxt
->rgn
[FT_STRINGS
].start
= str
;
270 /* enough space now? */
271 if (rgn
>= FT_STRUCT
&& ft_shuffle(cxt
, pp
, rgn
, nextra
))
275 /* how much total free space is there following this region? */
277 for (r
= rgn
; r
< FT_STRINGS
; ++r
) {
278 char *r_end
= cxt
->rgn
[r
].start
+ cxt
->rgn
[r
].size
;
279 tot
+= next_start(cxt
, rgn
) - r_end
;
282 /* cast is to shut gcc up; we know nextra >= 0 */
283 if (tot
< (unsigned int)nextra
) {
284 /* have to reallocate */
285 char *newp
, *new_start
;
290 size
= _ALIGN(cxt
->max_size
+ (nextra
- tot
) + EXPAND_INCR
, 8);
291 newp
= cxt
->realloc(cxt
->bph
, size
);
294 cxt
->max_size
= size
;
295 shift
= newp
- (char *)cxt
->bph
;
297 if (shift
) { /* realloc can return same addr */
298 cxt
->bph
= (struct boot_param_header
*)newp
;
299 ft_node_update_after(cxt
, cxt
->rgn
[FT_STRUCT
].start
,
301 for (r
= FT_RSVMAP
; r
<= FT_STRINGS
; ++r
) {
302 new_start
= cxt
->rgn
[r
].start
+ shift
;
303 cxt
->rgn
[r
].start
= new_start
;
306 cxt
->str_anchor
+= shift
;
309 /* move strings up to the end */
310 str
= newp
+ size
- ssize
;
311 cxt
->str_anchor
+= str
- cxt
->rgn
[FT_STRINGS
].start
;
312 memmove(str
, cxt
->rgn
[FT_STRINGS
].start
, ssize
);
313 cxt
->rgn
[FT_STRINGS
].start
= str
;
315 if (ft_shuffle(cxt
, pp
, rgn
, nextra
))
319 /* must be FT_RSVMAP and we need to move FT_STRUCT up */
320 if (rgn
== FT_RSVMAP
) {
321 next
= cxt
->rgn
[FT_RSVMAP
].start
+ cxt
->rgn
[FT_RSVMAP
].size
323 ssize
= cxt
->rgn
[FT_STRUCT
].size
;
324 if (next
+ ssize
>= cxt
->rgn
[FT_STRINGS
].start
)
325 return 0; /* "can't happen" */
326 memmove(next
, cxt
->rgn
[FT_STRUCT
].start
, ssize
);
327 ft_node_update_after(cxt
, cxt
->rgn
[FT_STRUCT
].start
, nextra
);
328 cxt
->rgn
[FT_STRUCT
].start
= next
;
330 if (ft_shuffle(cxt
, pp
, rgn
, nextra
))
334 return 0; /* "can't happen" */
337 static void ft_put_word(struct ft_cxt
*cxt
, u32 v
)
339 *(u32
*) cxt
->p
= cpu_to_be32(v
);
343 static void ft_put_bin(struct ft_cxt
*cxt
, const void *data
, unsigned int sz
)
345 unsigned long sza
= _ALIGN(sz
, 4);
347 /* zero out the alignment gap if necessary */
349 *(u32
*) (cxt
->p
+ sza
- 4) = 0;
351 /* copy in the data */
352 memcpy(cxt
->p
, data
, sz
);
357 int ft_begin_node(struct ft_cxt
*cxt
, const char *name
)
359 unsigned long nlen
= strlen(name
) + 1;
360 unsigned long len
= 8 + _ALIGN(nlen
, 4);
362 if (!ft_make_space(cxt
, &cxt
->p
, FT_STRUCT
, len
))
364 ft_put_word(cxt
, OF_DT_BEGIN_NODE
);
365 ft_put_bin(cxt
, name
, strlen(name
) + 1);
369 void ft_end_node(struct ft_cxt
*cxt
)
371 ft_put_word(cxt
, OF_DT_END_NODE
);
374 void ft_nop(struct ft_cxt
*cxt
)
376 if (ft_make_space(cxt
, &cxt
->p
, FT_STRUCT
, 4))
377 ft_put_word(cxt
, OF_DT_NOP
);
380 #define NO_STRING 0x7fffffff
382 static int lookup_string(struct ft_cxt
*cxt
, const char *name
)
386 p
= cxt
->rgn
[FT_STRINGS
].start
;
387 end
= p
+ cxt
->rgn
[FT_STRINGS
].size
;
389 if (strcmp(p
, (char *)name
) == 0)
390 return p
- cxt
->str_anchor
;
397 /* lookup string and insert if not found */
398 static int map_string(struct ft_cxt
*cxt
, const char *name
)
403 off
= lookup_string(cxt
, name
);
404 if (off
!= NO_STRING
)
406 p
= cxt
->rgn
[FT_STRINGS
].start
;
407 if (!ft_make_space(cxt
, &p
, FT_STRINGS
, strlen(name
) + 1))
410 return p
- cxt
->str_anchor
;
413 int ft_prop(struct ft_cxt
*cxt
, const char *name
, const void *data
,
418 off
= map_string(cxt
, name
);
419 if (off
== NO_STRING
)
422 len
= 12 + _ALIGN(sz
, 4);
423 if (!ft_make_space(cxt
, &cxt
->p
, FT_STRUCT
, len
))
426 ft_put_word(cxt
, OF_DT_PROP
);
427 ft_put_word(cxt
, sz
);
428 ft_put_word(cxt
, off
);
429 ft_put_bin(cxt
, data
, sz
);
433 int ft_prop_str(struct ft_cxt
*cxt
, const char *name
, const char *str
)
435 return ft_prop(cxt
, name
, str
, strlen(str
) + 1);
438 int ft_prop_int(struct ft_cxt
*cxt
, const char *name
, unsigned int val
)
440 u32 v
= cpu_to_be32((u32
) val
);
442 return ft_prop(cxt
, name
, &v
, 4);
445 /* Calculate the size of the reserved map */
446 static unsigned long rsvmap_size(struct ft_cxt
*cxt
)
448 struct ft_reserve
*res
;
450 res
= (struct ft_reserve
*)cxt
->rgn
[FT_RSVMAP
].start
;
451 while (res
->start
|| res
->len
)
453 return (char *)(res
+ 1) - cxt
->rgn
[FT_RSVMAP
].start
;
456 /* Calculate the size of the struct region by stepping through it */
457 static unsigned long struct_size(struct ft_cxt
*cxt
)
459 char *p
= cxt
->rgn
[FT_STRUCT
].start
;
463 /* make check in ft_next happy */
464 if (cxt
->rgn
[FT_STRUCT
].size
== 0)
465 cxt
->rgn
[FT_STRUCT
].size
= 0xfffffffful
- (unsigned long)p
;
467 while ((next
= ft_next(cxt
, p
, &atom
)) != NULL
)
469 return p
+ 4 - cxt
->rgn
[FT_STRUCT
].start
;
472 /* add `adj' on to all string offset values in the struct area */
473 static void adjust_string_offsets(struct ft_cxt
*cxt
, int adj
)
475 char *p
= cxt
->rgn
[FT_STRUCT
].start
;
480 while ((next
= ft_next(cxt
, p
, &atom
)) != NULL
) {
481 if (atom
.tag
== OF_DT_PROP
) {
482 off
= be32_to_cpu(*(u32
*) (p
+ 8));
483 *(u32
*) (p
+ 8) = cpu_to_be32(off
+ adj
);
489 /* start construction of the flat OF tree from scratch */
490 void ft_begin(struct ft_cxt
*cxt
, void *blob
, unsigned int max_size
,
491 void *(*realloc_fn
) (void *, unsigned long))
493 struct boot_param_header
*bph
= blob
;
495 struct ft_reserve
*pres
;
498 memset(cxt
, 0, sizeof(*cxt
));
501 cxt
->max_size
= max_size
;
502 cxt
->realloc
= realloc_fn
;
505 /* zero everything in the header area */
506 memset(bph
, 0, sizeof(*bph
));
508 bph
->magic
= cpu_to_be32(OF_DT_HEADER
);
509 bph
->version
= cpu_to_be32(0x10);
510 bph
->last_comp_version
= cpu_to_be32(0x10);
513 cxt
->rgn
[FT_RSVMAP
].start
= p
= blob
+ HDR_SIZE
;
514 cxt
->rgn
[FT_RSVMAP
].size
= sizeof(struct ft_reserve
);
515 pres
= (struct ft_reserve
*)p
;
516 cxt
->rgn
[FT_STRUCT
].start
= p
+= sizeof(struct ft_reserve
);
517 cxt
->rgn
[FT_STRUCT
].size
= 4;
518 cxt
->rgn
[FT_STRINGS
].start
= blob
+ max_size
;
519 cxt
->rgn
[FT_STRINGS
].size
= 0;
521 /* init rsvmap and struct */
524 *(u32
*) p
= cpu_to_be32(OF_DT_END
);
526 cxt
->str_anchor
= blob
;
529 /* open up an existing blob to be examined or modified */
530 int ft_open(struct ft_cxt
*cxt
, void *blob
, unsigned int max_size
,
531 unsigned int max_find_device
,
532 void *(*realloc_fn
) (void *, unsigned long))
534 struct boot_param_header
*bph
= blob
;
536 /* can't cope with version < 16 */
537 if (be32_to_cpu(bph
->version
) < 16)
541 memset(cxt
, 0, sizeof(*cxt
));
543 /* alloc node_tbl to track node ptrs returned by ft_find_device */
545 cxt
->node_tbl
= realloc_fn(NULL
, max_find_device
* sizeof(char *));
548 memset(cxt
->node_tbl
, 0, max_find_device
* sizeof(char *));
549 cxt
->node_max
= max_find_device
;
550 cxt
->nodes_used
= 1; /* don't use idx 0 b/c looks like NULL */
553 cxt
->max_size
= max_size
;
554 cxt
->realloc
= realloc_fn
;
556 cxt
->rgn
[FT_RSVMAP
].start
= blob
+ be32_to_cpu(bph
->off_mem_rsvmap
);
557 cxt
->rgn
[FT_RSVMAP
].size
= rsvmap_size(cxt
);
558 cxt
->rgn
[FT_STRUCT
].start
= blob
+ be32_to_cpu(bph
->off_dt_struct
);
559 cxt
->rgn
[FT_STRUCT
].size
= struct_size(cxt
);
560 cxt
->rgn
[FT_STRINGS
].start
= blob
+ be32_to_cpu(bph
->off_dt_strings
);
561 cxt
->rgn
[FT_STRINGS
].size
= be32_to_cpu(bph
->dt_strings_size
);
563 cxt
->p
= cxt
->rgn
[FT_STRUCT
].start
;
564 cxt
->str_anchor
= cxt
->rgn
[FT_STRINGS
].start
;
569 /* add a reserver physical area to the rsvmap */
570 int ft_add_rsvmap(struct ft_cxt
*cxt
, u64 physaddr
, u64 size
)
573 struct ft_reserve
*pres
;
575 p
= cxt
->rgn
[FT_RSVMAP
].start
+ cxt
->rgn
[FT_RSVMAP
].size
576 - sizeof(struct ft_reserve
);
577 if (!ft_make_space(cxt
, &p
, FT_RSVMAP
, sizeof(struct ft_reserve
)))
580 pres
= (struct ft_reserve
*)p
;
581 pres
->start
= cpu_to_be64(physaddr
);
582 pres
->len
= cpu_to_be64(size
);
587 void ft_begin_tree(struct ft_cxt
*cxt
)
589 cxt
->p
= ft_root_node(cxt
);
592 void ft_end_tree(struct ft_cxt
*cxt
)
594 struct boot_param_header
*bph
= cxt
->bph
;
595 char *p
, *oldstr
, *str
, *endp
;
600 return; /* we haven't touched anything */
602 /* adjust string offsets */
603 oldstr
= cxt
->rgn
[FT_STRINGS
].start
;
604 adj
= cxt
->str_anchor
- oldstr
;
606 adjust_string_offsets(cxt
, adj
);
608 /* make strings end on 8-byte boundary */
609 ssize
= cxt
->rgn
[FT_STRINGS
].size
;
610 endp
= (char *)_ALIGN((unsigned long)cxt
->rgn
[FT_STRUCT
].start
611 + cxt
->rgn
[FT_STRUCT
].size
+ ssize
, 8);
614 /* move strings down to end of structs */
615 memmove(str
, oldstr
, ssize
);
616 cxt
->str_anchor
= str
;
617 cxt
->rgn
[FT_STRINGS
].start
= str
;
619 /* fill in header fields */
621 bph
->totalsize
= cpu_to_be32(endp
- p
);
622 bph
->off_mem_rsvmap
= cpu_to_be32(cxt
->rgn
[FT_RSVMAP
].start
- p
);
623 bph
->off_dt_struct
= cpu_to_be32(cxt
->rgn
[FT_STRUCT
].start
- p
);
624 bph
->off_dt_strings
= cpu_to_be32(cxt
->rgn
[FT_STRINGS
].start
- p
);
625 bph
->dt_strings_size
= cpu_to_be32(ssize
);
628 void *ft_find_device(struct ft_cxt
*cxt
, const char *srch_path
)
632 /* require absolute path */
633 if (srch_path
[0] != '/')
635 node
= ft_find_descendent(cxt
, ft_root_node(cxt
), srch_path
);
636 return ft_get_phandle(cxt
, node
);
639 void *ft_find_device_rel(struct ft_cxt
*cxt
, const void *top
,
640 const char *srch_path
)
644 node
= ft_node_ph2node(cxt
, top
);
648 node
= ft_find_descendent(cxt
, node
, srch_path
);
649 return ft_get_phandle(cxt
, node
);
652 void *ft_find_descendent(struct ft_cxt
*cxt
, void *top
, const char *srch_path
)
660 const char *path_comp
[FT_MAX_DEPTH
];
666 while ((p
= ft_next(cxt
, p
, &atom
)) != NULL
) {
668 case OF_DT_BEGIN_NODE
:
672 cxt
->genealogy
[depth
] = atom
.data
;
673 cxt
->genealogy
[depth
+ 1] = NULL
;
674 if (depth
&& !(strncmp(atom
.name
, cp
, cl
) == 0
675 && (atom
.name
[cl
] == '/'
676 || atom
.name
[cl
] == '\0'
677 || atom
.name
[cl
] == '@')))
679 path_comp
[dmatch
] = cp
;
680 /* it matches so far, advance to next path component */
685 /* we're done if this is the end of the string */
688 /* look for end of this component */
699 if (dmatch
> depth
) {
701 cl
= cp
- path_comp
[dmatch
] - 1;
702 cp
= path_comp
[dmatch
];
703 while (cl
> 0 && cp
[cl
- 1] == '/')
713 void *__ft_get_parent(struct ft_cxt
*cxt
, void *node
)
719 for (d
= 0; cxt
->genealogy
[d
] != NULL
; ++d
)
720 if (cxt
->genealogy
[d
] == node
)
721 return d
> 0 ? cxt
->genealogy
[d
- 1] : NULL
;
723 /* have to do it the hard way... */
724 p
= ft_root_node(cxt
);
726 while ((p
= ft_next(cxt
, p
, &atom
)) != NULL
) {
728 case OF_DT_BEGIN_NODE
:
729 cxt
->genealogy
[d
] = atom
.data
;
730 if (node
== atom
.data
) {
732 cxt
->genealogy
[d
+ 1] = NULL
;
733 return d
> 0 ? cxt
->genealogy
[d
- 1] : NULL
;
745 void *ft_get_parent(struct ft_cxt
*cxt
, const void *phandle
)
747 void *node
= ft_node_ph2node(cxt
, phandle
);
751 node
= __ft_get_parent(cxt
, node
);
752 return ft_get_phandle(cxt
, node
);
755 static const void *__ft_get_prop(struct ft_cxt
*cxt
, void *node
,
756 const char *propname
, unsigned int *len
)
761 while ((node
= ft_next(cxt
, node
, &atom
)) != NULL
) {
763 case OF_DT_BEGIN_NODE
:
768 if (depth
!= 1 || strcmp(atom
.name
, propname
))
785 int ft_get_prop(struct ft_cxt
*cxt
, const void *phandle
, const char *propname
,
786 void *buf
, const unsigned int buflen
)
791 void *node
= ft_node_ph2node(cxt
, phandle
);
795 data
= __ft_get_prop(cxt
, node
, propname
, &size
);
797 unsigned int clipped_size
= min(size
, buflen
);
798 memcpy(buf
, data
, clipped_size
);
805 void *__ft_find_node_by_prop_value(struct ft_cxt
*cxt
, void *prev
,
806 const char *propname
, const char *propval
,
807 unsigned int proplen
)
810 char *p
= ft_root_node(cxt
);
812 int past_prev
= prev
? 0 : 1;
815 while ((next
= ft_next(cxt
, p
, &atom
)) != NULL
) {
820 case OF_DT_BEGIN_NODE
:
828 if (!past_prev
|| depth
< 1)
831 data
= __ft_get_prop(cxt
, p
, propname
, &size
);
832 if (!data
|| size
!= proplen
)
834 if (memcmp(data
, propval
, size
))
852 void *ft_find_node_by_prop_value(struct ft_cxt
*cxt
, const void *prev
,
853 const char *propname
, const char *propval
,
859 node
= ft_node_ph2node(cxt
, prev
);
865 node
= __ft_find_node_by_prop_value(cxt
, node
, propname
,
867 return ft_get_phandle(cxt
, node
);
870 int ft_set_prop(struct ft_cxt
*cxt
, const void *phandle
, const char *propname
,
871 const void *buf
, const unsigned int buflen
)
878 node
= ft_node_ph2node(cxt
, phandle
);
882 next
= ft_next(cxt
, node
, &atom
);
883 if (atom
.tag
!= OF_DT_BEGIN_NODE
)
884 /* phandle didn't point to a node */
888 while ((next
= ft_next(cxt
, p
, &atom
)) != NULL
) {
890 case OF_DT_BEGIN_NODE
: /* properties must go before subnodes */
892 /* haven't found the property, insert here */
894 return ft_prop(cxt
, propname
, buf
, buflen
);
896 if (strcmp(atom
.name
, propname
))
898 /* found an existing property, overwrite it */
899 nextra
= _ALIGN(buflen
, 4) - _ALIGN(atom
.size
, 4);
901 if (nextra
&& !ft_make_space(cxt
, &cxt
->p
, FT_STRUCT
,
904 *(u32
*) (cxt
->p
- 8) = cpu_to_be32(buflen
);
905 ft_put_bin(cxt
, buf
, buflen
);
913 int ft_del_prop(struct ft_cxt
*cxt
, const void *phandle
, const char *propname
)
920 node
= ft_node_ph2node(cxt
, phandle
);
925 while ((next
= ft_next(cxt
, p
, &atom
)) != NULL
) {
927 case OF_DT_BEGIN_NODE
:
931 if (strcmp(atom
.name
, propname
))
933 /* found the property, remove it */
934 size
= 12 + -_ALIGN(atom
.size
, 4);
936 if (!ft_make_space(cxt
, &cxt
->p
, FT_STRUCT
, -size
))
945 void *ft_create_node(struct ft_cxt
*cxt
, const void *parent
, const char *name
)
952 p
= ft_node_ph2node(cxt
, parent
);
956 p
= ft_root_node(cxt
);
959 while ((next
= ft_next(cxt
, p
, &atom
)) != NULL
) {
961 case OF_DT_BEGIN_NODE
:
963 if (depth
== 1 && strcmp(atom
.name
, name
) == 0)
964 /* duplicate node name, return error */
971 /* end of node, insert here */
973 ft_begin_node(cxt
, name
);