Skip to content

Commit

Permalink
Wrap assembly comments consistently (google#2430)
Browse files Browse the repository at this point in the history
Some lines were much longer than others. They are now consistently
wrapped at 70 columns. Why so short, why not 80 or 90? Because we
sometimes need to browse these files during class, and there we don’t
have a lot of horizontal space when using large fonts.
  • Loading branch information
mgeisler authored Nov 22, 2024
1 parent 2256525 commit 1262da8
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 47 deletions.
41 changes: 25 additions & 16 deletions src/bare-metal/aps/examples/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -34,18 +34,22 @@
.set .L_TCR_TG0_4KB, 0x0 << 14
/* 4 KiB granule size for TTBR1_EL1. */
.set .L_TCR_TG1_4KB, 0x2 << 30
/* Disable translation table walk for TTBR1_EL1, generating a translation fault instead. */

/*
* Disable translation table walk for TTBR1_EL1, generating a
* translation fault instead.
*/
.set .L_TCR_EPD1, 0x1 << 23
/* Translation table walks for TTBR0_EL1 are inner sharable. */
.set .L_TCR_SH_INNER, 0x3 << 12
/*
* Translation table walks for TTBR0_EL1 are outer write-back read-allocate write-allocate
* cacheable.
* Translation table walks for TTBR0_EL1 are outer write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_OWB, 0x1 << 10
/*
* Translation table walks for TTBR0_EL1 are inner write-back read-allocate write-allocate
* cacheable.
* Translation table walks for TTBR0_EL1 are inner write-back
* read-allocate write-allocate cacheable.
*/
.set .L_TCR_RGN_IWB, 0x1 << 8
/* Size offset for TTBR0_EL1 is 2**39 bytes (512 GiB). */
Expand All @@ -61,7 +65,9 @@
.set .L_SCTLR_ELx_C, 0x1 << 2
/* EL0 and EL1 stage 1 MMU enabled. */
.set .L_SCTLR_ELx_M, 0x1 << 0
/* Privileged Access Never is unchanged on taking an exception to EL1. */
/*
* Privileged Access Never is unchanged on taking an exception to EL1.
*/
.set .L_SCTLR_EL1_SPAN, 0x1 << 23
/* SETEND instruction disabled at EL0 in aarch32 mode. */
.set .L_SCTLR_EL1_SED, 0x1 << 8
Expand All @@ -72,18 +78,20 @@
.set .Lsctlrval, .Lsctlrval | .L_SCTLR_ELx_I | .L_SCTLR_EL1_SPAN | .L_SCTLR_EL1_RES1

/**
* This is a generic entry point for an image. It carries out the operations required to prepare the
* loaded image to be run. Specifically, it zeroes the bss section using registers x25 and above,
* prepares the stack, enables floating point, and sets up the exception vector. It preserves x0-x3
* for the Rust entry point, as these may contain boot parameters.
* This is a generic entry point for an image. It carries out the
* operations required to prepare the loaded image to be run.
* Specifically, it zeroes the bss section using registers x25 and
* above, prepares the stack, enables floating point, and sets up the
* exception vector. It preserves x0-x3 for the Rust entry point, as
* these may contain boot parameters.
*/
// ANCHOR: entry
.section .init.entry, "ax"
.global entry
entry:
/*
* Load and apply the memory management configuration, ready to enable MMU and
* caches.
* Load and apply the memory management configuration, ready to
* enable MMU and caches.
*/
adrp x30, idmap
msr ttbr0_el1, x30
Expand All @@ -101,8 +109,9 @@ entry:
mov_i x30, .Lsctlrval

/*
* Ensure everything before this point has completed, then invalidate any
* potentially stale local TLB entries before they start being used.
* Ensure everything before this point has completed, then
* invalidate any potentially stale local TLB entries before they
* start being used.
*/
isb
tlbi vmalle1
Expand All @@ -111,8 +120,8 @@ entry:
isb

/*
* Configure sctlr_el1 to enable MMU and cache and don't proceed until this
* has completed.
* Configure sctlr_el1 to enable MMU and cache and don't proceed
* until this has completed.
*/
msr sctlr_el1, x30
isb
Expand Down
58 changes: 32 additions & 26 deletions src/bare-metal/aps/examples/exceptions.S
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@
*/

/**
* Saves the volatile registers onto the stack. This currently takes 14
* instructions, so it can be used in exception handlers with 18 instructions
* left.
* Saves the volatile registers onto the stack. This currently takes
* 14 instructions, so it can be used in exception handlers with 18
* instructions left.
*
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
* which can be used as the first and second arguments of a subsequent call.
* On return, x0 and x1 are initialised to elr_el2 and spsr_el2
* respectively, which can be used as the first and second arguments
* of a subsequent call.
*/
.macro save_volatile_to_stack
/* Reserve stack space and save registers x0-x18, x29 & x30. */
Expand All @@ -37,19 +38,19 @@
stp x29, x30, [sp, #8 * 20]

/*
* Save elr_el1 & spsr_el1. This such that we can take nested exception
* and still be able to unwind.
* Save elr_el1 & spsr_el1. This such that we can take nested
* exception and still be able to unwind.
*/
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #8 * 22]
.endm

/**
* Restores the volatile registers from the stack. This currently takes 14
* instructions, so it can be used in exception handlers while still leaving 18
* instructions left; if paired with save_volatile_to_stack, there are 4
* instructions to spare.
* Restores the volatile registers from the stack. This currently
* takes 14 instructions, so it can be used in exception handlers
* while still leaving 18 instructions left; if paired with
* save_volatile_to_stack, there are 4 instructions to spare.
*/
.macro restore_volatile_from_stack
/* Restore registers x2-x18, x29 & x30. */
Expand All @@ -64,7 +65,9 @@
ldr x18, [sp, #8 * 18]
ldp x29, x30, [sp, #8 * 20]

/* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch. */
/*
* Restore registers elr_el1 & spsr_el1, using x0 & x1 as scratch.
*/
ldp x0, x1, [sp, #8 * 22]
msr elr_el1, x0
msr spsr_el1, x1
Expand All @@ -74,13 +77,16 @@
.endm

/**
* This is a generic handler for exceptions taken at the current EL while using
* SP0. It behaves similarly to the SPx case by first switching to SPx, doing
* the work, then switching back to SP0 before returning.
* This is a generic handler for exceptions taken at the current EL
* while using SP0. It behaves similarly to the SPx case by first
* switching to SPx, doing the work, then switching back to SP0 before
* returning.
*
* Switching to SPx and calling the Rust handler takes 16
* instructions. To restore and return we need an additional 16
* instructions, so we can implement the whole handler within the
* allotted 32 instructions.
*
* Switching to SPx and calling the Rust handler takes 16 instructions. To
* restore and return we need an additional 16 instructions, so we can implement
* the whole handler within the allotted 32 instructions.
*/
.macro current_exception_sp0 handler:req
msr spsel, #1
Expand All @@ -92,16 +98,16 @@
.endm

/**
* This is a generic handler for exceptions taken at the current EL while using
* SPx. It saves volatile registers, calls the Rust handler, restores volatile
* registers, then returns.
* This is a generic handler for exceptions taken at the current EL
* while using SPx. It saves volatile registers, calls the Rust
* handler, restores volatile registers, then returns.
*
* This also works for exceptions taken from EL0, if we don't care about
* non-volatile registers.
* This also works for exceptions taken from EL0, if we don't care
* about non-volatile registers.
*
* Saving state and jumping to the Rust handler takes 15 instructions, and
* restoring and returning also takes 15 instructions, so we can fit the whole
* handler in 30 instructions, under the limit of 32.
* Saving state and jumping to the Rust handler takes 15 instructions,
* and restoring and returning also takes 15 instructions, so we can
* fit the whole handler in 30 instructions, under the limit of 32.
*/
.macro current_exception_spx handler:req
save_volatile_to_stack
Expand Down
13 changes: 8 additions & 5 deletions src/bare-metal/aps/examples/image.ld
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
*/

/*
* Code will start running at this symbol which is placed at the start of the
* image.
* Code will start running at this symbol which is placed at the start
* of the image.
*/
ENTRY(entry)

Expand Down Expand Up @@ -53,8 +53,8 @@ SECTIONS
rodata_end = .;

/*
* Collect together the read-write data including .bss at the end which
* will be zero'd by the entry code.
* Collect together the read-write data including .bss at the end
* which will be zero'd by the entry code.
*/
.data : ALIGN(4096) {
data_begin = .;
Expand All @@ -67,7 +67,10 @@ SECTIONS
data_end = .;
} >image

/* Everything beyond this point will not be included in the binary. */
/*
* Everything beyond this point will not be included in the
* binary.
*/
bin_end = .;

/* The entry point code assumes that .bss is 16-byte aligned. */
Expand Down

0 comments on commit 1262da8

Please sign in to comment.