3 Copyright (c) 2004 - 2008, Intel Corporation. All rights reserved.<BR>
4 This program and the accompanying materials
5 are licensed and made available under the terms and conditions of the BSD License
6 which accompanies this distribution. The full text of the license may be found at
7 http://opensource.org/licenses/bsd-license.php
9 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
10 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
18 IA32, X64 and IPF Specific relocation fixups
24 #include <Common/UefiBaseTypes.h>
25 #include <IndustryStandard/PeImage.h>
26 #include "PeCoffLib.h"
28 #define EXT_IMM64(Value, Address, Size, InstPos, ValPos) \
29 Value |= (((UINT64)((*(Address) >> InstPos) & (((UINT64)1 << Size) - 1))) << ValPos)
31 #define INS_IMM64(Value, Address, Size, InstPos, ValPos) \
32 *(UINT32*)Address = (*(UINT32*)Address & ~(((1 << Size) - 1) << InstPos)) | \
33 ((UINT32)((((UINT64)Value >> ValPos) & (((UINT64)1 << Size) - 1))) << InstPos)
35 #define IMM64_IMM7B_INST_WORD_X 3
36 #define IMM64_IMM7B_SIZE_X 7
37 #define IMM64_IMM7B_INST_WORD_POS_X 4
38 #define IMM64_IMM7B_VAL_POS_X 0
40 #define IMM64_IMM9D_INST_WORD_X 3
41 #define IMM64_IMM9D_SIZE_X 9
42 #define IMM64_IMM9D_INST_WORD_POS_X 18
43 #define IMM64_IMM9D_VAL_POS_X 7
45 #define IMM64_IMM5C_INST_WORD_X 3
46 #define IMM64_IMM5C_SIZE_X 5
47 #define IMM64_IMM5C_INST_WORD_POS_X 13
48 #define IMM64_IMM5C_VAL_POS_X 16
50 #define IMM64_IC_INST_WORD_X 3
51 #define IMM64_IC_SIZE_X 1
52 #define IMM64_IC_INST_WORD_POS_X 12
53 #define IMM64_IC_VAL_POS_X 21
55 #define IMM64_IMM41a_INST_WORD_X 1
56 #define IMM64_IMM41a_SIZE_X 10
57 #define IMM64_IMM41a_INST_WORD_POS_X 14
58 #define IMM64_IMM41a_VAL_POS_X 22
60 #define IMM64_IMM41b_INST_WORD_X 1
61 #define IMM64_IMM41b_SIZE_X 8
62 #define IMM64_IMM41b_INST_WORD_POS_X 24
63 #define IMM64_IMM41b_VAL_POS_X 32
65 #define IMM64_IMM41c_INST_WORD_X 2
66 #define IMM64_IMM41c_SIZE_X 23
67 #define IMM64_IMM41c_INST_WORD_POS_X 0
68 #define IMM64_IMM41c_VAL_POS_X 40
70 #define IMM64_SIGN_INST_WORD_X 3
71 #define IMM64_SIGN_SIZE_X 1
72 #define IMM64_SIGN_INST_WORD_POS_X 27
73 #define IMM64_SIGN_VAL_POS_X 63
76 PeCoffLoaderRelocateIa32Image (
79 IN OUT CHAR8
**FixupData
,
86 Performs an IA-32 specific relocation fixup
90 Reloc - Pointer to the relocation record
92 Fixup - Pointer to the address to fix up
94 FixupData - Pointer to a buffer to log the fixups
96 Adjust - The offset to adjust the fixup
100 EFI_UNSUPPORTED - Unsupported now
104 return RETURN_UNSUPPORTED
;
108 PeCoffLoaderRelocateIpfImage (
111 IN OUT CHAR8
**FixupData
,
118 Performs an Itanium-based specific relocation fixup
122 Reloc - Pointer to the relocation record
124 Fixup - Pointer to the address to fix up
126 FixupData - Pointer to a buffer to log the fixups
128 Adjust - The offset to adjust the fixup
139 switch ((*Reloc
) >> 12) {
141 case EFI_IMAGE_REL_BASED_DIR64
:
142 F64
= (UINT64
*) Fixup
;
143 *F64
= *F64
+ (UINT64
) Adjust
;
144 if (*FixupData
!= NULL
) {
145 *FixupData
= ALIGN_POINTER(*FixupData
, sizeof(UINT64
));
146 *(UINT64
*)(*FixupData
) = *F64
;
147 *FixupData
= *FixupData
+ sizeof(UINT64
);
151 case EFI_IMAGE_REL_BASED_IA64_IMM64
:
154 // Align it to bundle address before fixing up the
155 // 64-bit immediate value of the movl instruction.
158 Fixup
= (CHAR8
*)((UINTN
) Fixup
& (UINTN
) ~(15));
159 FixupVal
= (UINT64
)0;
162 // Extract the lower 32 bits of IMM64 from bundle
165 (UINT32
*)Fixup
+ IMM64_IMM7B_INST_WORD_X
,
167 IMM64_IMM7B_INST_WORD_POS_X
,
168 IMM64_IMM7B_VAL_POS_X
172 (UINT32
*)Fixup
+ IMM64_IMM9D_INST_WORD_X
,
174 IMM64_IMM9D_INST_WORD_POS_X
,
175 IMM64_IMM9D_VAL_POS_X
179 (UINT32
*)Fixup
+ IMM64_IMM5C_INST_WORD_X
,
181 IMM64_IMM5C_INST_WORD_POS_X
,
182 IMM64_IMM5C_VAL_POS_X
186 (UINT32
*)Fixup
+ IMM64_IC_INST_WORD_X
,
188 IMM64_IC_INST_WORD_POS_X
,
193 (UINT32
*)Fixup
+ IMM64_IMM41a_INST_WORD_X
,
195 IMM64_IMM41a_INST_WORD_POS_X
,
196 IMM64_IMM41a_VAL_POS_X
200 // Update 64-bit address
205 // Insert IMM64 into bundle
208 ((UINT32
*)Fixup
+ IMM64_IMM7B_INST_WORD_X
),
210 IMM64_IMM7B_INST_WORD_POS_X
,
211 IMM64_IMM7B_VAL_POS_X
215 ((UINT32
*)Fixup
+ IMM64_IMM9D_INST_WORD_X
),
217 IMM64_IMM9D_INST_WORD_POS_X
,
218 IMM64_IMM9D_VAL_POS_X
222 ((UINT32
*)Fixup
+ IMM64_IMM5C_INST_WORD_X
),
224 IMM64_IMM5C_INST_WORD_POS_X
,
225 IMM64_IMM5C_VAL_POS_X
229 ((UINT32
*)Fixup
+ IMM64_IC_INST_WORD_X
),
231 IMM64_IC_INST_WORD_POS_X
,
236 ((UINT32
*)Fixup
+ IMM64_IMM41a_INST_WORD_X
),
238 IMM64_IMM41a_INST_WORD_POS_X
,
239 IMM64_IMM41a_VAL_POS_X
243 ((UINT32
*)Fixup
+ IMM64_IMM41b_INST_WORD_X
),
245 IMM64_IMM41b_INST_WORD_POS_X
,
246 IMM64_IMM41b_VAL_POS_X
250 ((UINT32
*)Fixup
+ IMM64_IMM41c_INST_WORD_X
),
252 IMM64_IMM41c_INST_WORD_POS_X
,
253 IMM64_IMM41c_VAL_POS_X
257 ((UINT32
*)Fixup
+ IMM64_SIGN_INST_WORD_X
),
259 IMM64_SIGN_INST_WORD_POS_X
,
263 F64
= (UINT64
*) Fixup
;
264 if (*FixupData
!= NULL
) {
265 *FixupData
= ALIGN_POINTER(*FixupData
, sizeof(UINT64
));
266 *(UINT64
*)(*FixupData
) = *F64
;
267 *FixupData
= *FixupData
+ sizeof(UINT64
);
272 return RETURN_UNSUPPORTED
;
275 return RETURN_SUCCESS
;
279 PeCoffLoaderRelocateX64Image (
282 IN OUT CHAR8
**FixupData
,
286 Performs an x64 specific relocation fixup
288 @param Reloc Pointer to the relocation record
289 @param Fixup Pointer to the address to fix up
290 @param FixupData Pointer to a buffer to log the fixups
291 @param Adjust The offset to adjust the fixup
293 @retval RETURN_SUCCESS Success to perform relocation
294 @retval RETURN_UNSUPPORTED Unsupported.
299 switch ((*Reloc
) >> 12) {
301 case EFI_IMAGE_REL_BASED_DIR64
:
302 F64
= (UINT64
*) Fixup
;
303 *F64
= *F64
+ (UINT64
) Adjust
;
304 if (*FixupData
!= NULL
) {
305 *FixupData
= ALIGN_POINTER(*FixupData
, sizeof(UINT64
));
306 *(UINT64
*)(*FixupData
) = *F64
;
307 *FixupData
= *FixupData
+ sizeof(UINT64
);
312 return RETURN_UNSUPPORTED
;
315 return RETURN_SUCCESS
;
319 Pass in a pointer to an ARM MOVT or MOVW immediate instruciton and
320 return the immediate data encoded in the instruction
322 @param Instruction Pointer to ARM MOVT or MOVW immediate instruction
324 @return Immediate address encoded in the instruction
328 ThumbMovtImmediateAddress (
329 IN UINT16
*Instruction
335 // Thumb2 is two 16-bit instructions working together. Not a single 32-bit instruction
336 // Example MOVT R0, #0 is 0x0000f2c0 or 0xf2c0 0x0000
337 Movt
= (*Instruction
<< 16) | (*(Instruction
+ 1));
339 // imm16 = imm4:i:imm3:imm8
340 // imm4 -> Bit19:Bit16
342 // imm3 -> Bit14:Bit12
344 Address
= (UINT16
)(Movt
& 0x000000ff); // imm8
345 Address
|= (UINT16
)((Movt
>> 4) & 0x0000f700); // imm4 imm3
346 Address
|= (((Movt
& BIT26
) != 0) ? BIT11
: 0); // i
352 Update an ARM MOVT or MOVW immediate instruction immediate data.
354 @param Instruction Pointer to ARM MOVT or MOVW immediate instruction
355 @param Address New addres to patch into the instruction
358 ThumbMovtImmediatePatch (
359 IN OUT UINT16
*Instruction
,
365 // First 16-bit chunk of instruciton
366 Patch
= ((Address
>> 12) & 0x000f); // imm4
367 Patch
|= (((Address
& BIT11
) != 0) ? BIT10
: 0); // i
368 *Instruction
= (*Instruction
& ~0x040f) | Patch
;
370 // Second 16-bit chunk of instruction
371 Patch
= Address
& 0x000000ff; // imm8
372 Patch
|= ((Address
<< 4) & 0x00007000); // imm3
374 *Instruction
= (*Instruction
& ~0x70ff) | Patch
;
378 Performs an ARM-based specific relocation fixup and is a no-op on other
381 @param Reloc Pointer to the relocation record.
382 @param Fixup Pointer to the address to fix up.
383 @param FixupData Pointer to a buffer to log the fixups.
384 @param Adjust The offset to adjust the fixup.
390 PeCoffLoaderRelocateArmImage (
393 IN OUT CHAR8
**FixupData
,
401 Fixup16
= (UINT16
*) Fixup
;
403 switch ((**Reloc
) >> 12) {
404 case EFI_IMAGE_REL_BASED_ARM_THUMB_MOVW
:
405 FixupVal
= ThumbMovtImmediateAddress (Fixup16
) + (UINT16
)Adjust
;
406 ThumbMovtImmediatePatch (Fixup16
, FixupVal
);
408 if (*FixupData
!= NULL
) {
409 *FixupData
= ALIGN_POINTER (*FixupData
, sizeof (UINT16
));
410 *(UINT16
*)*FixupData
= *Fixup16
;
411 *FixupData
= *FixupData
+ sizeof (UINT16
);
415 case EFI_IMAGE_REL_BASED_ARM_THUMB_MOVT
:
416 // For MOVT you need to know the lower 16-bits do do the math
417 // So this relocation entry is really two entries.
420 FixupVal
= (UINT16
)(((ThumbMovtImmediateAddress (Fixup16
) << 16) + Adjust
+ *Addend
) >> 16);
421 ThumbMovtImmediatePatch (Fixup16
, FixupVal
);
423 if (*FixupData
!= NULL
) {
424 *FixupData
= ALIGN_POINTER (*FixupData
, sizeof (UINT16
));
425 *(UINT16
*)*FixupData
= *Fixup16
;
426 *FixupData
= *FixupData
+ sizeof (UINT16
);
431 return RETURN_UNSUPPORTED
;
434 return RETURN_SUCCESS
;