starfive: refresh patches
[openwrt/staging/stintel.git] / target / linux / starfive / patches-6.1 / 0116-RISC-V-Add-arch-functions-to-support-hibernation-sus.patch
1 From 06f1d699e923c3f09869439cdb603e36302c2611 Mon Sep 17 00:00:00 2001
2 From: Sia Jee Heng <jeeheng.sia@starfivetech.com>
3 Date: Thu, 30 Mar 2023 14:43:21 +0800
4 Subject: [PATCH 116/122] RISC-V: Add arch functions to support
5 hibernation/suspend-to-disk
6
7 Low level Arch functions were created to support hibernation.
8 swsusp_arch_suspend() relies code from __cpu_suspend_enter() to write
9 cpu state onto the stack, then calling swsusp_save() to save the memory
10 image.
11
12 Arch specific hibernation header is implemented and is utilized by the
13 arch_hibernation_header_restore() and arch_hibernation_header_save()
14 functions. The arch specific hibernation header consists of satp, hartid,
15 and the cpu_resume address. The kernel built version is also need to be
16 saved into the hibernation image header to making sure only the same
17 kernel is restore when resume.
18
19 swsusp_arch_resume() creates a temporary page table that covering only
20 the linear map. It copies the restore code to a 'safe' page, then start
21 to restore the memory image. Once completed, it restores the original
22 kernel's page table. It then calls into __hibernate_cpu_resume()
23 to restore the CPU context. Finally, it follows the normal hibernation
24 path back to the hibernation core.
25
26 To enable hibernation/suspend to disk into RISCV, the below config
27 need to be enabled:
28 - CONFIG_HIBERNATION
29 - CONFIG_ARCH_HIBERNATION_HEADER
30 - CONFIG_ARCH_HIBERNATION_POSSIBLE
31
32 Signed-off-by: Sia Jee Heng <jeeheng.sia@starfivetech.com>
33 Reviewed-by: Ley Foon Tan <leyfoon.tan@starfivetech.com>
34 Reviewed-by: Mason Huo <mason.huo@starfivetech.com>
35 Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
36 Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
37 ---
38 arch/riscv/Kconfig | 8 +-
39 arch/riscv/include/asm/assembler.h | 20 ++
40 arch/riscv/include/asm/suspend.h | 19 ++
41 arch/riscv/kernel/Makefile | 1 +
42 arch/riscv/kernel/asm-offsets.c | 5 +
43 arch/riscv/kernel/hibernate-asm.S | 77 ++++++
44 arch/riscv/kernel/hibernate.c | 427 +++++++++++++++++++++++++++++
45 7 files changed, 556 insertions(+), 1 deletion(-)
46 create mode 100644 arch/riscv/kernel/hibernate-asm.S
47 create mode 100644 arch/riscv/kernel/hibernate.c
48
49 --- a/arch/riscv/Kconfig
50 +++ b/arch/riscv/Kconfig
51 @@ -52,7 +52,7 @@ config RISCV
52 select CLONE_BACKWARDS
53 select CLINT_TIMER if !MMU
54 select COMMON_CLK
55 - select CPU_PM if CPU_IDLE
56 + select CPU_PM if CPU_IDLE || HIBERNATION
57 select EDAC_SUPPORT
58 select GENERIC_ARCH_TOPOLOGY
59 select GENERIC_ATOMIC64 if !64BIT
60 @@ -715,6 +715,12 @@ menu "Power management options"
61
62 source "kernel/power/Kconfig"
63
64 +config ARCH_HIBERNATION_POSSIBLE
65 + def_bool y
66 +
67 +config ARCH_HIBERNATION_HEADER
68 + def_bool HIBERNATION
69 +
70 endmenu # "Power management options"
71
72 menu "CPU Power Management"
73 --- a/arch/riscv/include/asm/assembler.h
74 +++ b/arch/riscv/include/asm/assembler.h
75 @@ -59,4 +59,24 @@
76 REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
77 .endm
78
79 +/*
80 + * copy_page - copy 1 page (4KB) of data from source to destination
81 + * @a0 - destination
82 + * @a1 - source
83 + */
84 + .macro copy_page a0, a1
85 + lui a2, 0x1
86 + add a2, a2, a0
87 +1 :
88 + REG_L t0, 0(a1)
89 + REG_L t1, SZREG(a1)
90 +
91 + REG_S t0, 0(a0)
92 + REG_S t1, SZREG(a0)
93 +
94 + addi a0, a0, 2 * SZREG
95 + addi a1, a1, 2 * SZREG
96 + bne a2, a0, 1b
97 + .endm
98 +
99 #endif /* __ASM_ASSEMBLER_H */
100 --- a/arch/riscv/include/asm/suspend.h
101 +++ b/arch/riscv/include/asm/suspend.h
102 @@ -21,6 +21,11 @@ struct suspend_context {
103 #endif
104 };
105
106 +/*
107 + * Used by hibernation core and cleared during resume sequence
108 + */
109 +extern int in_suspend;
110 +
111 /* Low-level CPU suspend entry function */
112 int __cpu_suspend_enter(struct suspend_context *context);
113
114 @@ -36,4 +41,18 @@ int __cpu_resume_enter(unsigned long har
115 /* Used to save and restore the CSRs */
116 void suspend_save_csrs(struct suspend_context *context);
117 void suspend_restore_csrs(struct suspend_context *context);
118 +
119 +/* Low-level API to support hibernation */
120 +int swsusp_arch_suspend(void);
121 +int swsusp_arch_resume(void);
122 +int arch_hibernation_header_save(void *addr, unsigned int max_size);
123 +int arch_hibernation_header_restore(void *addr);
124 +int __hibernate_cpu_resume(void);
125 +
126 +/* Used to resume on the CPU we hibernated on */
127 +int hibernate_resume_nonboot_cpu_disable(void);
128 +
129 +asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
130 + unsigned long cpu_resume);
131 +asmlinkage int hibernate_core_restore_code(void);
132 #endif
133 --- a/arch/riscv/kernel/Makefile
134 +++ b/arch/riscv/kernel/Makefile
135 @@ -67,6 +67,7 @@ obj-$(CONFIG_MODULES) += module.o
136 obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
137
138 obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
139 +obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
140
141 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
142 obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
143 --- a/arch/riscv/kernel/asm-offsets.c
144 +++ b/arch/riscv/kernel/asm-offsets.c
145 @@ -9,6 +9,7 @@
146 #include <linux/kbuild.h>
147 #include <linux/mm.h>
148 #include <linux/sched.h>
149 +#include <linux/suspend.h>
150 #include <asm/kvm_host.h>
151 #include <asm/thread_info.h>
152 #include <asm/ptrace.h>
153 @@ -116,6 +117,10 @@ void asm_offsets(void)
154
155 OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
156
157 + OFFSET(HIBERN_PBE_ADDR, pbe, address);
158 + OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
159 + OFFSET(HIBERN_PBE_NEXT, pbe, next);
160 +
161 OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
162 OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
163 OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
164 --- /dev/null
165 +++ b/arch/riscv/kernel/hibernate-asm.S
166 @@ -0,0 +1,77 @@
167 +/* SPDX-License-Identifier: GPL-2.0-only */
168 +/*
169 + * Hibernation low level support for RISCV.
170 + *
171 + * Copyright (C) 2023 StarFive Technology Co., Ltd.
172 + *
173 + * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
174 + */
175 +
176 +#include <asm/asm.h>
177 +#include <asm/asm-offsets.h>
178 +#include <asm/assembler.h>
179 +#include <asm/csr.h>
180 +
181 +#include <linux/linkage.h>
182 +
183 +/*
184 + * int __hibernate_cpu_resume(void)
185 + * Switch back to the hibernated image's page table prior to restoring the CPU
186 + * context.
187 + *
188 + * Always returns 0
189 + */
190 +ENTRY(__hibernate_cpu_resume)
191 + /* switch to hibernated image's page table. */
192 + csrw CSR_SATP, s0
193 + sfence.vma
194 +
195 + REG_L a0, hibernate_cpu_context
196 +
197 + suspend_restore_csrs
198 + suspend_restore_regs
199 +
200 + /* Return zero value. */
201 + mv a0, zero
202 +
203 + ret
204 +END(__hibernate_cpu_resume)
205 +
206 +/*
207 + * Prepare to restore the image.
208 + * a0: satp of saved page tables.
209 + * a1: satp of temporary page tables.
210 + * a2: cpu_resume.
211 + */
212 +ENTRY(hibernate_restore_image)
213 + mv s0, a0
214 + mv s1, a1
215 + mv s2, a2
216 + REG_L s4, restore_pblist
217 + REG_L a1, relocated_restore_code
218 +
219 + jalr a1
220 +END(hibernate_restore_image)
221 +
222 +/*
223 + * The below code will be executed from a 'safe' page.
224 + * It first switches to the temporary page table, then starts to copy the pages
225 + * back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
226 + * to restore the CPU context.
227 + */
228 +ENTRY(hibernate_core_restore_code)
229 + /* switch to temp page table. */
230 + csrw satp, s1
231 + sfence.vma
232 +.Lcopy:
233 + /* The below code will restore the hibernated image. */
234 + REG_L a1, HIBERN_PBE_ADDR(s4)
235 + REG_L a0, HIBERN_PBE_ORIG(s4)
236 +
237 + copy_page a0, a1
238 +
239 + REG_L s4, HIBERN_PBE_NEXT(s4)
240 + bnez s4, .Lcopy
241 +
242 + jalr s2
243 +END(hibernate_core_restore_code)
244 --- /dev/null
245 +++ b/arch/riscv/kernel/hibernate.c
246 @@ -0,0 +1,427 @@
247 +// SPDX-License-Identifier: GPL-2.0-only
248 +/*
249 + * Hibernation support for RISCV
250 + *
251 + * Copyright (C) 2023 StarFive Technology Co., Ltd.
252 + *
253 + * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
254 + */
255 +
256 +#include <asm/barrier.h>
257 +#include <asm/cacheflush.h>
258 +#include <asm/mmu_context.h>
259 +#include <asm/page.h>
260 +#include <asm/pgalloc.h>
261 +#include <asm/pgtable.h>
262 +#include <asm/sections.h>
263 +#include <asm/set_memory.h>
264 +#include <asm/smp.h>
265 +#include <asm/suspend.h>
266 +
267 +#include <linux/cpu.h>
268 +#include <linux/memblock.h>
269 +#include <linux/pm.h>
270 +#include <linux/sched.h>
271 +#include <linux/suspend.h>
272 +#include <linux/utsname.h>
273 +
274 +/* The logical cpu number we should resume on, initialised to a non-cpu number. */
275 +static int sleep_cpu = -EINVAL;
276 +
277 +/* Pointer to the temporary resume page table. */
278 +static pgd_t *resume_pg_dir;
279 +
280 +/* CPU context to be saved. */
281 +struct suspend_context *hibernate_cpu_context;
282 +EXPORT_SYMBOL_GPL(hibernate_cpu_context);
283 +
284 +unsigned long relocated_restore_code;
285 +EXPORT_SYMBOL_GPL(relocated_restore_code);
286 +
287 +/**
288 + * struct arch_hibernate_hdr_invariants - container to store kernel build version.
289 + * @uts_version: to save the build number and date so that we do not resume with
290 + * a different kernel.
291 + */
292 +struct arch_hibernate_hdr_invariants {
293 + char uts_version[__NEW_UTS_LEN + 1];
294 +};
295 +
296 +/**
297 + * struct arch_hibernate_hdr - helper parameters that help us to restore the image.
298 + * @invariants: container to store kernel build version.
299 + * @hartid: to make sure same boot_cpu executes the hibernate/restore code.
300 + * @saved_satp: original page table used by the hibernated image.
301 + * @restore_cpu_addr: the kernel's image address to restore the CPU context.
302 + */
303 +static struct arch_hibernate_hdr {
304 + struct arch_hibernate_hdr_invariants invariants;
305 + unsigned long hartid;
306 + unsigned long saved_satp;
307 + unsigned long restore_cpu_addr;
308 +} resume_hdr;
309 +
310 +static void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
311 +{
312 + memset(i, 0, sizeof(*i));
313 + memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
314 +}
315 +
316 +/*
317 + * Check if the given pfn is in the 'nosave' section.
318 + */
319 +int pfn_is_nosave(unsigned long pfn)
320 +{
321 + unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
322 + unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
323 +
324 + return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn));
325 +}
326 +
327 +void notrace save_processor_state(void)
328 +{
329 + WARN_ON(num_online_cpus() != 1);
330 +}
331 +
332 +void notrace restore_processor_state(void)
333 +{
334 +}
335 +
336 +/*
337 + * Helper parameters need to be saved to the hibernation image header.
338 + */
339 +int arch_hibernation_header_save(void *addr, unsigned int max_size)
340 +{
341 + struct arch_hibernate_hdr *hdr = addr;
342 +
343 + if (max_size < sizeof(*hdr))
344 + return -EOVERFLOW;
345 +
346 + arch_hdr_invariants(&hdr->invariants);
347 +
348 + hdr->hartid = cpuid_to_hartid_map(sleep_cpu);
349 + hdr->saved_satp = csr_read(CSR_SATP);
350 + hdr->restore_cpu_addr = (unsigned long)__hibernate_cpu_resume;
351 +
352 + return 0;
353 +}
354 +EXPORT_SYMBOL_GPL(arch_hibernation_header_save);
355 +
356 +/*
357 + * Retrieve the helper parameters from the hibernation image header.
358 + */
359 +int arch_hibernation_header_restore(void *addr)
360 +{
361 + struct arch_hibernate_hdr_invariants invariants;
362 + struct arch_hibernate_hdr *hdr = addr;
363 + int ret = 0;
364 +
365 + arch_hdr_invariants(&invariants);
366 +
367 + if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
368 + pr_crit("Hibernate image not generated by this kernel!\n");
369 + return -EINVAL;
370 + }
371 +
372 + sleep_cpu = riscv_hartid_to_cpuid(hdr->hartid);
373 + if (sleep_cpu < 0) {
374 + pr_crit("Hibernated on a CPU not known to this kernel!\n");
375 + sleep_cpu = -EINVAL;
376 + return -EINVAL;
377 + }
378 +
379 +#ifdef CONFIG_SMP
380 + ret = bringup_hibernate_cpu(sleep_cpu);
381 + if (ret) {
382 + sleep_cpu = -EINVAL;
383 + return ret;
384 + }
385 +#endif
386 + resume_hdr = *hdr;
387 +
388 + return ret;
389 +}
390 +EXPORT_SYMBOL_GPL(arch_hibernation_header_restore);
391 +
392 +int swsusp_arch_suspend(void)
393 +{
394 + int ret = 0;
395 +
396 + if (__cpu_suspend_enter(hibernate_cpu_context)) {
397 + sleep_cpu = smp_processor_id();
398 + suspend_save_csrs(hibernate_cpu_context);
399 + ret = swsusp_save();
400 + } else {
401 + suspend_restore_csrs(hibernate_cpu_context);
402 + flush_tlb_all();
403 + flush_icache_all();
404 +
405 + /*
406 + * Tell the hibernation core that we've just restored the memory.
407 + */
408 + in_suspend = 0;
409 + sleep_cpu = -EINVAL;
410 + }
411 +
412 + return ret;
413 +}
414 +
415 +static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
416 + unsigned long end, pgprot_t prot)
417 +{
418 + pte_t *src_ptep;
419 + pte_t *dst_ptep;
420 +
421 + if (pmd_none(READ_ONCE(*dst_pmdp))) {
422 + dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
423 + if (!dst_ptep)
424 + return -ENOMEM;
425 +
426 + pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
427 + }
428 +
429 + dst_ptep = pte_offset_kernel(dst_pmdp, start);
430 + src_ptep = pte_offset_kernel(src_pmdp, start);
431 +
432 + do {
433 + pte_t pte = READ_ONCE(*src_ptep);
434 +
435 + if (pte_present(pte))
436 + set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot)));
437 + } while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end);
438 +
439 + return 0;
440 +}
441 +
442 +static int temp_pgtable_map_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
443 + unsigned long end, pgprot_t prot)
444 +{
445 + unsigned long next;
446 + unsigned long ret;
447 + pmd_t *src_pmdp;
448 + pmd_t *dst_pmdp;
449 +
450 + if (pud_none(READ_ONCE(*dst_pudp))) {
451 + dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
452 + if (!dst_pmdp)
453 + return -ENOMEM;
454 +
455 + pud_populate(NULL, dst_pudp, dst_pmdp);
456 + }
457 +
458 + dst_pmdp = pmd_offset(dst_pudp, start);
459 + src_pmdp = pmd_offset(src_pudp, start);
460 +
461 + do {
462 + pmd_t pmd = READ_ONCE(*src_pmdp);
463 +
464 + next = pmd_addr_end(start, end);
465 +
466 + if (pmd_none(pmd))
467 + continue;
468 +
469 + if (pmd_leaf(pmd)) {
470 + set_pmd(dst_pmdp, __pmd(pmd_val(pmd) | pgprot_val(prot)));
471 + } else {
472 + ret = temp_pgtable_map_pte(dst_pmdp, src_pmdp, start, next, prot);
473 + if (ret)
474 + return -ENOMEM;
475 + }
476 + } while (dst_pmdp++, src_pmdp++, start = next, start != end);
477 +
478 + return 0;
479 +}
480 +
481 +static int temp_pgtable_map_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
482 + unsigned long end, pgprot_t prot)
483 +{
484 + unsigned long next;
485 + unsigned long ret;
486 + pud_t *dst_pudp;
487 + pud_t *src_pudp;
488 +
489 + if (p4d_none(READ_ONCE(*dst_p4dp))) {
490 + dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
491 + if (!dst_pudp)
492 + return -ENOMEM;
493 +
494 + p4d_populate(NULL, dst_p4dp, dst_pudp);
495 + }
496 +
497 + dst_pudp = pud_offset(dst_p4dp, start);
498 + src_pudp = pud_offset(src_p4dp, start);
499 +
500 + do {
501 + pud_t pud = READ_ONCE(*src_pudp);
502 +
503 + next = pud_addr_end(start, end);
504 +
505 + if (pud_none(pud))
506 + continue;
507 +
508 + if (pud_leaf(pud)) {
509 + set_pud(dst_pudp, __pud(pud_val(pud) | pgprot_val(prot)));
510 + } else {
511 + ret = temp_pgtable_map_pmd(dst_pudp, src_pudp, start, next, prot);
512 + if (ret)
513 + return -ENOMEM;
514 + }
515 + } while (dst_pudp++, src_pudp++, start = next, start != end);
516 +
517 + return 0;
518 +}
519 +
520 +static int temp_pgtable_map_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
521 + unsigned long end, pgprot_t prot)
522 +{
523 + unsigned long next;
524 + unsigned long ret;
525 + p4d_t *dst_p4dp;
526 + p4d_t *src_p4dp;
527 +
528 + if (pgd_none(READ_ONCE(*dst_pgdp))) {
529 + dst_p4dp = (p4d_t *)get_safe_page(GFP_ATOMIC);
530 + if (!dst_p4dp)
531 + return -ENOMEM;
532 +
533 + pgd_populate(NULL, dst_pgdp, dst_p4dp);
534 + }
535 +
536 + dst_p4dp = p4d_offset(dst_pgdp, start);
537 + src_p4dp = p4d_offset(src_pgdp, start);
538 +
539 + do {
540 + p4d_t p4d = READ_ONCE(*src_p4dp);
541 +
542 + next = p4d_addr_end(start, end);
543 +
544 + if (p4d_none(p4d))
545 + continue;
546 +
547 + if (p4d_leaf(p4d)) {
548 + set_p4d(dst_p4dp, __p4d(p4d_val(p4d) | pgprot_val(prot)));
549 + } else {
550 + ret = temp_pgtable_map_pud(dst_p4dp, src_p4dp, start, next, prot);
551 + if (ret)
552 + return -ENOMEM;
553 + }
554 + } while (dst_p4dp++, src_p4dp++, start = next, start != end);
555 +
556 + return 0;
557 +}
558 +
559 +static int temp_pgtable_mapping(pgd_t *pgdp, unsigned long start, unsigned long end, pgprot_t prot)
560 +{
561 + pgd_t *dst_pgdp = pgd_offset_pgd(pgdp, start);
562 + pgd_t *src_pgdp = pgd_offset_k(start);
563 + unsigned long next;
564 + unsigned long ret;
565 +
566 + do {
567 + pgd_t pgd = READ_ONCE(*src_pgdp);
568 +
569 + next = pgd_addr_end(start, end);
570 +
571 + if (pgd_none(pgd))
572 + continue;
573 +
574 + if (pgd_leaf(pgd)) {
575 + set_pgd(dst_pgdp, __pgd(pgd_val(pgd) | pgprot_val(prot)));
576 + } else {
577 + ret = temp_pgtable_map_p4d(dst_pgdp, src_pgdp, start, next, prot);
578 + if (ret)
579 + return -ENOMEM;
580 + }
581 + } while (dst_pgdp++, src_pgdp++, start = next, start != end);
582 +
583 + return 0;
584 +}
585 +
586 +static unsigned long relocate_restore_code(void)
587 +{
588 + void *page = (void *)get_safe_page(GFP_ATOMIC);
589 +
590 + if (!page)
591 + return -ENOMEM;
592 +
593 + copy_page(page, hibernate_core_restore_code);
594 +
595 + /* Make the page containing the relocated code executable. */
596 + set_memory_x((unsigned long)page, 1);
597 +
598 + return (unsigned long)page;
599 +}
600 +
601 +int swsusp_arch_resume(void)
602 +{
603 + unsigned long end = (unsigned long)pfn_to_virt(max_low_pfn);
604 + unsigned long start = PAGE_OFFSET;
605 + int ret;
606 +
607 + /*
608 + * Memory allocated by get_safe_page() will be dealt with by the hibernation core,
609 + * we don't need to free it here.
610 + */
611 + resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
612 + if (!resume_pg_dir)
613 + return -ENOMEM;
614 +
615 + /*
616 + * Create a temporary page table and map the whole linear region as executable and
617 + * writable.
618 + */
619 + ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE | _PAGE_EXEC));
620 + if (ret)
621 + return ret;
622 +
623 + /* Move the restore code to a new page so that it doesn't get overwritten by itself. */
624 + relocated_restore_code = relocate_restore_code();
625 + if (relocated_restore_code == -ENOMEM)
626 + return -ENOMEM;
627 +
628 + /*
629 + * Map the __hibernate_cpu_resume() address to the temporary page table so that the
630 + * restore code can jumps to it after finished restore the image. The next execution
631 + * code doesn't find itself in a different address space after switching over to the
632 + * original page table used by the hibernated image.
633 + * The __hibernate_cpu_resume() mapping is unnecessary for RV32 since the kernel and
634 + * linear addresses are identical, but different for RV64. To ensure consistency, we
635 + * map it for both RV32 and RV64 kernels.
636 + * Additionally, we should ensure that the page is writable before restoring the image.
637 + */
638 + start = (unsigned long)resume_hdr.restore_cpu_addr;
639 + end = start + PAGE_SIZE;
640 +
641 + ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE));
642 + if (ret)
643 + return ret;
644 +
645 + hibernate_restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | satp_mode),
646 + resume_hdr.restore_cpu_addr);
647 +
648 + return 0;
649 +}
650 +
651 +#ifdef CONFIG_PM_SLEEP_SMP
652 +int hibernate_resume_nonboot_cpu_disable(void)
653 +{
654 + if (sleep_cpu < 0) {
655 + pr_err("Failing to resume from hibernate on an unknown CPU\n");
656 + return -ENODEV;
657 + }
658 +
659 + return freeze_secondary_cpus(sleep_cpu);
660 +}
661 +#endif
662 +
663 +static int __init riscv_hibernate_init(void)
664 +{
665 + hibernate_cpu_context = kzalloc(sizeof(*hibernate_cpu_context), GFP_KERNEL);
666 +
667 + if (WARN_ON(!hibernate_cpu_context))
668 + return -ENOMEM;
669 +
670 + return 0;
671 +}
672 +
673 +early_initcall(riscv_hibernate_init);