2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <subdev/mc.h>
25 #include <subdev/timer.h>
28 managed_falcons_names
[] = {
29 [NVKM_SECBOOT_FALCON_PMU
] = "PMU",
30 [NVKM_SECBOOT_FALCON_RESERVED
] = "<reserved>",
31 [NVKM_SECBOOT_FALCON_FECS
] = "FECS",
32 [NVKM_SECBOOT_FALCON_GPCCS
] = "GPCCS",
33 [NVKM_SECBOOT_FALCON_END
] = "<invalid>",
37 * Helper falcon functions
41 falcon_clear_halt_interrupt(struct nvkm_device
*device
, u32 base
)
45 /* clear halt interrupt */
46 nvkm_mask(device
, base
+ 0x004, 0x10, 0x10);
47 /* wait until halt interrupt is cleared */
48 ret
= nvkm_wait_msec(device
, 10, base
+ 0x008, 0x10, 0x0);
56 falcon_wait_idle(struct nvkm_device
*device
, u32 base
)
60 ret
= nvkm_wait_msec(device
, 10, base
+ 0x04c, 0xffff, 0x0);
68 nvkm_secboot_falcon_enable(struct nvkm_secboot
*sb
)
70 struct nvkm_device
*device
= sb
->subdev
.device
;
74 nvkm_mc_enable(device
, sb
->devidx
);
75 ret
= nvkm_wait_msec(device
, 10, sb
->base
+ 0x10c, 0x6, 0x0);
77 nvkm_error(&sb
->subdev
, "Falcon mem scrubbing timeout\n");
78 nvkm_mc_disable(device
, sb
->devidx
);
82 ret
= falcon_wait_idle(device
, sb
->base
);
87 nvkm_wr32(device
, sb
->base
+ 0x010, 0xff);
88 nvkm_mc_intr_mask(device
, sb
->devidx
, true);
94 nvkm_secboot_falcon_disable(struct nvkm_secboot
*sb
)
96 struct nvkm_device
*device
= sb
->subdev
.device
;
98 /* disable IRQs and wait for any previous code to complete */
99 nvkm_mc_intr_mask(device
, sb
->devidx
, false);
100 nvkm_wr32(device
, sb
->base
+ 0x014, 0xff);
102 falcon_wait_idle(device
, sb
->base
);
105 nvkm_mc_disable(device
, sb
->devidx
);
111 nvkm_secboot_falcon_reset(struct nvkm_secboot
*sb
)
115 ret
= nvkm_secboot_falcon_disable(sb
);
119 ret
= nvkm_secboot_falcon_enable(sb
);
127 * nvkm_secboot_falcon_run - run the falcon that will perform secure boot
129 * This function is to be called after all chip-specific preparations have
130 * been completed. It will start the falcon to perform secure boot, wait for
131 * it to halt, and report if an error occurred.
134 nvkm_secboot_falcon_run(struct nvkm_secboot
*sb
)
136 struct nvkm_device
*device
= sb
->subdev
.device
;
140 nvkm_wr32(device
, sb
->base
+ 0x100, 0x2);
142 /* Wait for falcon halt */
143 ret
= nvkm_wait_msec(device
, 100, sb
->base
+ 0x100, 0x10, 0x10);
147 /* If mailbox register contains an error code, then ACR has failed */
148 ret
= nvkm_rd32(device
, sb
->base
+ 0x040);
150 nvkm_error(&sb
->subdev
, "ACR boot failed, ret 0x%08x", ret
);
151 falcon_clear_halt_interrupt(device
, sb
->base
);
160 * nvkm_secboot_reset() - reset specified falcon
163 nvkm_secboot_reset(struct nvkm_secboot
*sb
, u32 falcon
)
165 /* Unmanaged falcon? */
166 if (!(BIT(falcon
) & sb
->func
->managed_falcons
)) {
167 nvkm_error(&sb
->subdev
, "cannot reset unmanaged falcon!\n");
171 return sb
->func
->reset(sb
, falcon
);
175 * nvkm_secboot_start() - start specified falcon
178 nvkm_secboot_start(struct nvkm_secboot
*sb
, u32 falcon
)
180 /* Unmanaged falcon? */
181 if (!(BIT(falcon
) & sb
->func
->managed_falcons
)) {
182 nvkm_error(&sb
->subdev
, "cannot start unmanaged falcon!\n");
186 return sb
->func
->start(sb
, falcon
);
190 * nvkm_secboot_is_managed() - check whether a given falcon is securely-managed
193 nvkm_secboot_is_managed(struct nvkm_secboot
*secboot
,
194 enum nvkm_secboot_falcon fid
)
199 return secboot
->func
->managed_falcons
& BIT(fid
);
203 nvkm_secboot_oneinit(struct nvkm_subdev
*subdev
)
205 struct nvkm_secboot
*sb
= nvkm_secboot(subdev
);
208 /* Call chip-specific init function */
210 ret
= sb
->func
->init(sb
);
212 nvkm_error(subdev
, "Secure Boot initialization failed: %d\n",
218 * Build all blobs - the same blobs can be used to perform secure boot
221 if (sb
->func
->prepare_blobs
)
222 ret
= sb
->func
->prepare_blobs(sb
);
228 nvkm_secboot_fini(struct nvkm_subdev
*subdev
, bool suspend
)
230 struct nvkm_secboot
*sb
= nvkm_secboot(subdev
);
234 ret
= sb
->func
->fini(sb
, suspend
);
240 nvkm_secboot_dtor(struct nvkm_subdev
*subdev
)
242 struct nvkm_secboot
*sb
= nvkm_secboot(subdev
);
246 ret
= sb
->func
->dtor(sb
);
251 static const struct nvkm_subdev_func
253 .oneinit
= nvkm_secboot_oneinit
,
254 .fini
= nvkm_secboot_fini
,
255 .dtor
= nvkm_secboot_dtor
,
259 nvkm_secboot_ctor(const struct nvkm_secboot_func
*func
,
260 struct nvkm_device
*device
, int index
,
261 struct nvkm_secboot
*sb
)
265 nvkm_subdev_ctor(&nvkm_secboot
, device
, index
, &sb
->subdev
);
268 /* setup the performing falcon's base address and masks */
269 switch (func
->boot_falcon
) {
270 case NVKM_SECBOOT_FALCON_PMU
:
271 sb
->devidx
= NVKM_SUBDEV_PMU
;
275 nvkm_error(&sb
->subdev
, "invalid secure boot falcon\n");
279 nvkm_debug(&sb
->subdev
, "securely managed falcons:\n");
280 for_each_set_bit(fid
, &sb
->func
->managed_falcons
,
281 NVKM_SECBOOT_FALCON_END
)
282 nvkm_debug(&sb
->subdev
, "- %s\n", managed_falcons_names
[fid
]);