aboutsummaryrefslogtreecommitdiff
path: root/core/mem_region.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-03-26 15:06:01 +1000
committerStewart Smith <stewart@linux.vnet.ibm.com>2018-03-27 00:42:19 -0500
commitc32943bfc1e254176ecab564fdb4752403a48cab (patch)
treebe750a051ce726c93cd399b89ed1eb7bcbf1c939 /core/mem_region.c
parent336f306555d02505a429a0a01b06bf7130c36c03 (diff)
downloadskiboot-c32943bfc1e254176ecab564fdb4752403a48cab.zip
skiboot-c32943bfc1e254176ecab564fdb4752403a48cab.tar.gz
skiboot-c32943bfc1e254176ecab564fdb4752403a48cab.tar.bz2
core/fast-reboot: zero memory after fast reboot
This improves the security and predictability of the fast reboot environment. There can not be a secure fence between fast reboots, because a malicious OS can modify the firmware itself. However a well-behaved OS can have a reasonable expectation that OS memory regions it has modified will be cleared upon fast reboot. The memory is zeroed after all other CPUs come up from fast reboot, just before the new kernel is loaded and booted into. This allows image preloading to run concurrently, and will allow parallelisation of the clearing in future. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core/mem_region.c')
-rw-r--r--core/mem_region.c64
1 files changed, 64 insertions, 0 deletions
diff --git a/core/mem_region.c b/core/mem_region.c
index aa0c850..2e0b321 100644
--- a/core/mem_region.c
+++ b/core/mem_region.c
@@ -1171,6 +1171,70 @@ void mem_region_release_unused(void)
unlock(&mem_region_lock);
}
+static void mem_clear_range(uint64_t s, uint64_t e)
+{
+ uint64_t res_start, res_end;
+
+ /* Skip exception vectors */
+ if (s < EXCEPTION_VECTORS_END)
+ s = EXCEPTION_VECTORS_END;
+
+ /* Skip kernel preload area */
+ res_start = (uint64_t)KERNEL_LOAD_BASE;
+ res_end = res_start + KERNEL_LOAD_SIZE;
+
+ if (s >= res_start && s < res_end)
+ s = res_end;
+ if (e > res_start && e <= res_end)
+ e = res_start;
+ if (e <= s)
+ return;
+ if (s < res_start && e > res_end) {
+ mem_clear_range(s, res_start);
+ mem_clear_range(res_end, e);
+ return;
+ }
+
+ /* Skip initramfs preload area */
+ res_start = (uint64_t)INITRAMFS_LOAD_BASE;
+ res_end = res_start + INITRAMFS_LOAD_SIZE;
+
+ if (s >= res_start && s < res_end)
+ s = res_end;
+ if (e > res_start && e <= res_end)
+ e = res_start;
+ if (e <= s)
+ return;
+ if (s < res_start && e > res_end) {
+ mem_clear_range(s, res_start);
+ mem_clear_range(res_end, e);
+ return;
+ }
+
+ prlog(PR_NOTICE, "Clearing region %llx-%llx\n", s, e);
+ memset((void *)s, 0, e - s);
+}
+
+void mem_region_clear_unused(void)
+{
+ struct mem_region *r;
+
+ lock(&mem_region_lock);
+ assert(mem_regions_finalised);
+
+ prlog(PR_NOTICE, "Clearing unused memory:\n");
+ list_for_each(&regions, r, list) {
+ /* If it's not unused, ignore it. */
+ if (!(r->type == REGION_OS))
+ continue;
+
+ assert(r != &skiboot_heap);
+
+ mem_clear_range(r->start, r->start + r->len);
+ }
+ unlock(&mem_region_lock);
+}
+
static void mem_region_add_dt_reserved_node(struct dt_node *parent,
struct mem_region *region)
{