代码拉取完成,页面将自动刷新
同步操作将从 YingyiTech/plat-raspi 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
/*
* MIT License
*
* Copyright (c) 2018, Sergey Matyukevich
* (c) 2020, Santiago Pagani <santiagopagani@gmail.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <raspi/entry.h>
#include <uk/arch/lcpu.h>
#include <uk/asm.h>
#include <uk/arch/types.h>
#include <uk/plat/common/lcpu.h>
#include <uk/syscall.h>
#include <uk/plat/config.h>
.macro ventry label
.align 7
b \label
.endm
.macro EXCHANGE_SP_WITH_X0
add sp, sp, x0 // new_sp = sp + x0
sub x0, sp, x0 // new_x0 = new_sp - x0 = sp + x0 - x0 = sp
sub sp, sp, x0 // new_sp = new_sp - new_x0 = sp + x0 - sp = x0
.endm
.macro SAVE_REGS, is_irq
/* Use TPIDRRO_EL0 as scratch register. It is fine to do so because
* it will always hold a value the application can't modify and we will
* always be able to restore it to its desired known value anytime we
* want. Thus, temporarily store x0.
*/
msr tpidrro_el0, x0
/* Fetch middle of `lcpu_irqntrap_sp`:
* CPU_EXCEPT_STACK_SIZE CPU_EXCEPT_STACK_SIZE
* <---------------------><--------------------->
* |============================================|
* | | |
* | trap stack | IRQ stack |
* | | |
* |=============================================
* ^
* SP_EL0
*/
mrs x0, sp_el0
/* Make it so that SP contains SP_EL0 and x0 contains old SP */
EXCHANGE_SP_WITH_X0
.if \is_irq != 0
/* If we are an IRQ, use the IRQ stack instead. */
add sp, sp, #CPU_EXCEPT_STACK_SIZE
.endif
/* Store old SP previously saved into x0 */
str x0, [sp, #-16]
/* Is this trap a system call? */
mrs x0, esr_el1
and x0, x0, #ESR_EC_MASK
orr x0, xzr, x0, lsr #ESR_EC_SHIFT
cmp x0, #ESR_EL1_EC_SVC64
bne 0f
/* If we are in a system call trap then switch to auxiliary stack
* from the current lcpu pointer
*/
mrs x0, tpidr_el1
ldr x0, [x0, #LCPU_AUXSP_OFFSET]
/* NOTE: We should normally align the stack before doing this
* subtraction because we must ensure that the `ectx` field
* is aligned to the corresponding ECTX alignment.
* However, this is guaranteed to already be the case for the
* auxiliary stack because it is allocated with this exact alignment
* in mind.
*/
sub x0, x0, #UK_SYSCALL_CTX_SIZE
/* We now have in SP the trap stack and in x0 the auxiliary stack */
EXCHANGE_SP_WITH_X0 /* Switch them */
/* Restore old SP we stored before system call check */
ldr x0, [x0, #-16]
str x0, [sp, #__SP_OFFSET] /* Store old SP in auxiliary stack */
b 1f
0:
sub sp, sp, #__TRAP_STACK_SIZE
1:
/* Restore x0 */
mrs x0, tpidrro_el0
/* Save general purpose registers */
stp x0, x1, [sp, #16 * 0]
stp x2, x3, [sp, #16 * 1]
stp x4, x5, [sp, #16 * 2]
stp x6, x7, [sp, #16 * 3]
stp x8, x9, [sp, #16 * 4]
stp x10, x11, [sp, #16 * 5]
stp x12, x13, [sp, #16 * 6]
stp x14, x15, [sp, #16 * 7]
stp x16, x17, [sp, #16 * 8]
stp x18, x19, [sp, #16 * 9]
stp x20, x21, [sp, #16 * 10]
stp x22, x23, [sp, #16 * 11]
stp x24, x25, [sp, #16 * 12]
stp x26, x27, [sp, #16 * 13]
stp x28, x29, [sp, #16 * 14]
/* Save LR and exception PC */
mrs x21, elr_el1
stp x30, x21, [sp, #16 * 15]
/* Save pstate and exception status register */
mrs x22, spsr_el1
mrs x23, esr_el1
stp x22, x23, [sp, #16 * 16]
.endm
.macro RESTORE_REGS
/* Mask IRQ to make sure restore would not be interrupted by IRQ */
msr daifset, #2
/* Restore pstate and exception status register */
ldp x22, x23, [sp, #16 * 16]
msr spsr_el1, x22
msr esr_el1, x23
/* Restore LR and exception PC */
ldp x30, x21, [sp, #16 * 15]
msr elr_el1, x21
/* Restore general purpose registers */
ldp x28, x29, [sp, #16 * 14]
ldp x26, x27, [sp, #16 * 13]
ldp x24, x25, [sp, #16 * 12]
ldp x22, x23, [sp, #16 * 11]
ldp x20, x21, [sp, #16 * 10]
/* Skip x18, x19 */
ldp x16, x17, [sp, #16 * 8]
ldp x14, x15, [sp, #16 * 7]
ldp x12, x13, [sp, #16 * 6]
ldp x10, x11, [sp, #16 * 5]
ldp x8, x9, [sp, #16 * 4]
ldp x6, x7, [sp, #16 * 3]
ldp x4, x5, [sp, #16 * 2]
ldp x2, x3, [sp, #16 * 1]
ldp x0, x1, [sp, #16 * 0]
/* Restore stack pointer for exception from EL1 */
ldr x18, [sp, #__SP_OFFSET]
mov x19, sp
mov sp, x18
/* Restore x18, x19 */
ldp x18, x19, [x19, #16 * 9]
eret
.endm
/* Bad Abort numbers */
#define BAD_SYNC 0
#define BAD_IRQ 1
#define BAD_FIQ 2
#define BAD_ERROR 3
#define el_invalid(name, reason, el) \
.align 6; \
name##_invalid: \
SAVE_REGS 0; \
mov x0, sp; \
mov x1, el; \
mov x2, #(reason); \
mrs x3, far_el1; \
b invalid_trap_handler; \
ENDPROC(name##_invalid); \
el_invalid(el1_sync, BAD_SYNC, 1);
el_invalid(el0_sync, BAD_SYNC, 0);
el_invalid(el1_irq, BAD_IRQ, 1);
el_invalid(el0_irq, BAD_IRQ, 0);
el_invalid(el1_fiq, BAD_FIQ, 1);
el_invalid(el0_fiq, BAD_FIQ, 0);
el_invalid(el1_error, BAD_ERROR, 1);
el_invalid(el0_error, BAD_ERROR, 0);
el_invalid(el2_sync, BAD_SYNC, 2);
el_invalid(el3_sync, BAD_SYNC, 3);
el_invalid(el2_irq, BAD_IRQ, 2);
el_invalid(el3_irq, BAD_IRQ, 3);
el_invalid(el2_fiq, BAD_FIQ, 2);
el_invalid(el3_fiq, BAD_FIQ, 3);
el_invalid(el2_error, BAD_ERROR, 2);
el_invalid(el3_error, BAD_ERROR, 3);
/*
* Exception vectors.
*/
.align 11
.globl vectors_el3
vectors_el3:
ventry el3_sync_invalid /* Synchronous EL3t */
ventry el3_irq_invalid /* IRQ EL3t */
ventry el3_fiq_invalid /* FIQ EL3t */
ventry el3_error_invalid /* Error EL3t */
ventry el3_sync_invalid /* Synchronous EL3h */
ventry el3_irq_invalid /* IRQ EL3h */
ventry el3_fiq_invalid /* FIQ EL3h */
ventry el3_error_invalid /* Error EL3h */
ventry el2_sync_invalid /* Synchronous 64-bit EL2 */
ventry el2_irq_invalid /* IRQ 64-bit EL2 */
ventry el2_fiq_invalid /* FIQ 64-bit EL2 */
ventry el2_error_invalid /* Error 64-bit EL2 */
ventry el2_sync_invalid /* Synchronous 32-bit EL2 */
ventry el2_irq_invalid /* IRQ 32-bit EL2 */
ventry el2_fiq_invalid /* FIQ 32-bit EL2 */
ventry el2_error_invalid /* Error 32-bit EL2 */
.align 11
.globl vectors_el2
vectors_el2:
ventry el2_sync_invalid /* Synchronous EL2t */
ventry el2_irq_invalid /* IRQ EL2t */
ventry el2_fiq_invalid /* FIQ EL2t */
ventry el2_error_invalid /* Error EL2t */
ventry el2_sync_invalid /* Synchronous EL2h */
ventry el2_irq_invalid /* IRQ EL2h */
ventry el2_fiq_invalid /* FIQ EL2h */
ventry el2_error_invalid /* Error EL2h */
ventry el1_sync_invalid /* Synchronous 64-bit EL1 */
ventry el1_irq_invalid /* IRQ 64-bit EL1 */
ventry el1_fiq_invalid /* FIQ 64-bit EL1 */
ventry el1_error_invalid /* Error 64-bit EL1 */
ventry el1_sync_invalid /* Synchronous 32-bit EL1 */
ventry el1_irq_invalid /* IRQ 32-bit EL1 */
ventry el1_fiq_invalid /* FIQ 32-bit EL1 */
ventry el1_error_invalid /* Error 32-bit EL1 */
.align 11
.globl vectors_el1
vectors_el1:
ventry el1_sync_invalid /* Synchronous EL1t */
ventry el1_irq_invalid /* IRQ EL1t */
ventry el1_fiq_invalid /* FIQ EL1t */
ventry el1_error_invalid /* Error EL1t */
ventry el1_sync /* Synchronous EL1h */
ventry el1_irq /* IRQ EL1h */
ventry el1_fiq_invalid /* FIQ EL1h */
ventry el1_error_invalid /* Error EL1h */
ventry el0_sync_invalid /* Synchronous 64-bit EL0 */
ventry el0_irq_invalid /* IRQ 64-bit EL0 */
ventry el0_fiq_invalid /* FIQ 64-bit EL0 */
ventry el0_error_invalid /* Error 64-bit EL0 */
ventry el0_sync_invalid /* Synchronous 32-bit EL0 */
ventry el0_irq_invalid /* IRQ 32-bit EL0 */
ventry el0_fiq_invalid /* FIQ 32-bit EL0 */
ventry el0_error_invalid /* Error 32-bit EL0 */
.align 6
el1_sync:
SAVE_REGS 0
mov x0, sp
mrs x1, FAR_EL1
bl trap_el1_sync
RESTORE_REGS
.align 6
el1_irq:
SAVE_REGS 1
msr daifclr, #(8 | 4 | 1)
bl trap_el1_irq
RESTORE_REGS
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。