[coreboot-gerrit] Patch set updated for coreboot: 821e165 coreboot arm64: Add support for arm64 into coreboot framework

Isaac Christensen (isaac.christensen@se-eng.com) gerrit at coreboot.org
Mon Sep 22 20:01:53 CEST 2014


Isaac Christensen (isaac.christensen at se-eng.com) just uploaded a new patch set to gerrit, which you can find at http://review.coreboot.org/6915

-gerrit

commit 821e165e4a11fee3050fa78ee22debf8ae922bd8
Author: Furquan Shaikh <furquan at google.com>
Date:   Mon Apr 28 16:39:40 2014 -0700

    coreboot arm64: Add support for arm64 into coreboot framework
    
    Add support for enabling different coreboot stages (bootblock, romstage and
    ramstage) to have arm64 architecture. Most of the files have been copied over
    from arm/ or arm64-generic work.
    
    Signed-off-by: Furquan Shaikh <furquan at google.com>
    Reviewed-on: https://chromium-review.googlesource.com/197397
    Reviewed-by: Aaron Durbin <adurbin at chromium.org>
    Commit-Queue: Furquan Shaikh <furquan at chromium.org>
    Tested-by: Furquan Shaikh <furquan at chromium.org>
    (cherry picked from commit 033ba96516805502673ac7404bc97e6ce4e2a934)
    
    This patch is essentially a squash of aarch64 changes made by
    these patches:
    
    d955885 coreboot: Rename coreboot_ram stage to ramstage
    a492761 cbmem console: Locate the preram console with a symbol instead of a sect
    96e7f0e aarch64: Enable early icache and migrate SCTLR from EL3
    3f854dc aarch64: Pass coreboot table in jmp_to_elf_entry
    ab3ecaf aarch64/foundation-armv8: Set up RAM area and enter ramstage
    25fd2e9 aarch64: Remove CAR definitions from early_variables.h
    65bf77d aarch64/foundation-armv8: Enable DYNAMIC_CBMEM
    9484873 aarch64: Change default exception level to EL2
    7a152c3 aarch64: Fix formatting of exception registers dump
    6946464 aarch64: Implement basic exception handling
    c732a9d aarch64/foundation-armv8: Basic bootblock implementation
    3bc412c aarch64: Comment out some parts of code to allow build
    ab5be71 Add initial aarch64 support
    
    The ramstage support is the only portion that has been tested
    on actual hardware. Bootblock and romstage support may require
    modifications to run on hardware.
    
    Change-Id: Icd59bec55c963a471a50e30972a8092e4c9d2fb2
    Signed-off-by: Isaac Christensen <isaac.christensen at se-eng.com>
---
 Makefile.inc                                  |   5 +-
 src/Kconfig                                   |   6 +
 src/arch/arm64/Kconfig                        |  14 ++
 src/arch/arm64/Makefile.inc                   | 137 ++++++++++++++
 src/arch/arm64/armv8/Kconfig                  |   9 +
 src/arch/arm64/armv8/Makefile.inc             |  74 ++++++++
 src/arch/arm64/armv8/bootblock.S              | 101 ++++++++++
 src/arch/arm64/armv8/bootblock_simple.c       |  73 +++++++
 src/arch/arm64/armv8/cache.c                  | 148 +++++++++++++++
 src/arch/arm64/armv8/cpu.S                    | 131 +++++++++++++
 src/arch/arm64/armv8/exception.c              | 129 +++++++++++++
 src/arch/arm64/armv8/exception_asm.S          | 103 ++++++++++
 src/arch/arm64/boot.c                         |  33 ++++
 src/arch/arm64/bootblock.ld                   |  59 ++++++
 src/arch/arm64/cbmem.c                        |  30 +++
 src/arch/arm64/cpu.c                          |  42 +++++
 src/arch/arm64/div0.c                         |  33 ++++
 src/arch/arm64/eabi_compat.c                  |  28 +++
 src/arch/arm64/early_console.c                |  75 ++++++++
 src/arch/arm64/id.S                           |  20 ++
 src/arch/arm64/include/arch/asm.h             |  38 ++++
 src/arch/arm64/include/arch/boot/boot.h       |   8 +
 src/arch/arm64/include/arch/byteorder.h       |  27 +++
 src/arch/arm64/include/arch/early_variables.h |  35 ++++
 src/arch/arm64/include/arch/hlt.h             |   9 +
 src/arch/arm64/include/arch/io.h              | 137 ++++++++++++++
 src/arch/arm64/include/arch/pci_ops.h         |  29 +++
 src/arch/arm64/include/arch/stages.h          |  29 +++
 src/arch/arm64/include/armv8/arch/barrier.h   |  52 +++++
 src/arch/arm64/include/armv8/arch/cache.h     | 262 ++++++++++++++++++++++++++
 src/arch/arm64/include/armv8/arch/cpu.h       |  52 +++++
 src/arch/arm64/include/armv8/arch/exception.h |  38 ++++
 src/arch/arm64/include/armv8/arch/rules.h     |  34 ++++
 src/arch/arm64/include/bootblock_common.h     |  11 ++
 src/arch/arm64/include/clocks.h               |  44 +++++
 src/arch/arm64/include/smp/spinlock.h         |   6 +
 src/arch/arm64/include/stdint.h               |  60 ++++++
 src/arch/arm64/memcpy.S                       |  52 +++++
 src/arch/arm64/memmove.S                      |  55 ++++++
 src/arch/arm64/memset.S                       |  52 +++++
 src/arch/arm64/ramstage.ld                    | 136 +++++++++++++
 src/arch/arm64/romstage.ld                    |  89 +++++++++
 src/arch/arm64/stages.c                       |  53 ++++++
 src/arch/arm64/tables.c                       |  83 ++++++++
 src/arch/arm64/timestamp.c                    |  29 +++
 toolchain.inc                                 |   3 +
 util/cbfstool/common.c                        |   2 +-
 47 files changed, 2673 insertions(+), 2 deletions(-)

diff --git a/Makefile.inc b/Makefile.inc
index c41945a..382eead 100644
--- a/Makefile.inc
+++ b/Makefile.inc
@@ -68,7 +68,7 @@ PHONY+= clean-abuild coreboot lint lint-stable build-dirs
 subdirs-y := src/lib src/console src/device src/ec src/southbridge src/soc
 subdirs-y += src/northbridge src/superio src/drivers src/cpu src/vendorcode
 subdirs-y += util/cbfstool util/sconfig util/nvramtool
-subdirs-y += src/arch/arm src/arch/x86
+subdirs-y += src/arch/arm src/arch/arm64 src/arch/x86
 subdirs-y += src/mainboard/$(MAINBOARDDIR)
 
 subdirs-y += site-local
@@ -565,6 +565,9 @@ bootsplash.jpg-type := bootsplash
 ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM),y)
 ROMSTAGE_ELF := romstage.elf
 endif
+ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM64),y)
+ROMSTAGE_ELF := romstage.elf
+endif
 ifeq ($(CONFIG_ARCH_ROMSTAGE_X86_32),y)
 ROMSTAGE_ELF := romstage_xip.elf
 endif
diff --git a/src/Kconfig b/src/Kconfig
index 327e1bf..afcaab6 100644
--- a/src/Kconfig
+++ b/src/Kconfig
@@ -214,8 +214,13 @@ config ARCH_ARM
 	bool
 	default n
 
+config ARCH_ARM64
+	bool
+	default n
+
 source src/arch/x86/Kconfig
 source src/arch/arm/Kconfig
+source src/arch/arm64/Kconfig
 
 source src/vendorcode/Kconfig
 
@@ -274,6 +279,7 @@ config TPM
 	default n
 	select LPC_TPM if ARCH_X86
 	select I2C_TPM if ARCH_ARM
+	select I2C_TPM if ARCH_ARM64
 	help
 	  Enable this option to enable TPM support in coreboot.
 
diff --git a/src/arch/arm64/Kconfig b/src/arch/arm64/Kconfig
new file mode 100644
index 0000000..dc0e332
--- /dev/null
+++ b/src/arch/arm64/Kconfig
@@ -0,0 +1,14 @@
+config ARCH_BOOTBLOCK_ARM64
+	bool
+	default n
+	select ARCH_ARM64
+
+config ARCH_ROMSTAGE_ARM64
+	bool
+	default n
+
+config ARCH_RAMSTAGE_ARM64
+	bool
+	default n
+
+source src/arch/arm64/armv8/Kconfig
diff --git a/src/arch/arm64/Makefile.inc b/src/arch/arm64/Makefile.inc
new file mode 100644
index 0000000..64370d4
--- /dev/null
+++ b/src/arch/arm64/Makefile.inc
@@ -0,0 +1,137 @@
+################################################################################
+##
+## This file is part of the coreboot project.
+##
+## Copyright (C) 2014 Google Inc.
+## Copyright (C) 2012-2013 The ChromiumOS Authors
+## Copyright (C) 2012 Alexandru Gagniuc <mr.nuke.me at gmail.com>
+## Copyright (C) 2009-2010 coresystems GmbH
+## Copyright (C) 2009 Ronald G. Minnich
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; version 2 of the License.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+##
+################################################################################
+
+################################################################################
+# Take care of subdirectories
+################################################################################
+
+subdirs-y += armv8/
+
+################################################################################
+# ARM specific options
+################################################################################
+
+ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM64),y)
+CBFSTOOL_PRE1_OPTS = -m arm64 -b $(CONFIG_BOOTBLOCK_ROM_OFFSET) -H $(CONFIG_CBFS_HEADER_ROM_OFFSET) -o $(CONFIG_CBFS_ROM_OFFSET)
+endif
+
+################################################################################
+# bootblock
+################################################################################
+
+ifeq ($(CONFIG_ARCH_BOOTBLOCK_ARM64),y)
+
+bootblock-y += div0.c
+bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += early_console.c
+
+bootblock-y += id.S
+$(obj)/arch/arm64/id.bootblock.o: $(obj)/build.h
+
+bootblock-y += stages.c
+bootblock-y += eabi_compat.c
+
+bootblock-y += memset.S
+bootblock-y += memcpy.S
+bootblock-y += memmove.S
+
+# Build the bootblock
+
+$(objcbfs)/bootblock.debug: $(src)/arch/arm64/bootblock.ld $(obj)/ldoptions $$(bootblock-objs) $(obj)/config.h
+	@printf "    LINK       $(subst $(obj)/,,$(@))\n"
+ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y)
+	$(LD_bootblock) -nostdlib -m armelf_linux_eabi --gc-sections -static -o $@ -L$(obj) $< -T $(src)/arch/arm64/bootblock.ld
+else
+	$(CC_bootblock) $(CFLAGS_bootblock) -nostdlib -Wl,--gc-sections -static -o $@ -L$(obj) -T $(src)/arch/arm64/bootblock.ld -Wl,--start-group $(bootblock-objs) -Wl,--end-group
+endif
+
+endif # CONFIG_ARCH_BOOTBLOCK_ARM64
+
+################################################################################
+# romstage
+################################################################################
+
+ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM64),y)
+
+romstage-y += stages.c
+romstage-y += div0.c
+romstage-y += early_console.c
+romstage-y += eabi_compat.c
+romstage-y += memset.S
+romstage-y += memcpy.S
+romstage-y += memmove.S
+romstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c
+
+# Build the romstage
+
+$(objcbfs)/romstage.debug: $$(romstage-objs) $(src)/arch/arm64/romstage.ld $(obj)/ldoptions
+	@printf "    LINK       $(subst $(obj)/,,$(@))\n"
+ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y)
+	$(LD_romstage) -nostdlib --gc-sections -static -o $@ -L$(obj) $(romstage-objs) -T $(src)/arch/arm64/romstage.ld
+else
+	$(CC_romstage) $(CFLAGS_romstage) -nostdlib -Wl,--gc-sections -static -o $@ -L$(obj) -T $(src)/arch/arm64/romstage.ld -Wl,--start-group $(romstage-objs) -Wl,--end-group
+endif
+
+endif # CONFIG_ARCH_ROMSTAGE_ARM64
+
+################################################################################
+# ramstage
+################################################################################
+
+ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
+
+ramstage-y += stages.c
+ramstage-y += div0.c
+ramstage-y += cpu.c
+ramstage-y += eabi_compat.c
+ramstage-y += boot.c
+ramstage-y += tables.c
+ramstage-y += memset.S
+ramstage-y += memcpy.S
+ramstage-y += memmove.S
+
+rmodules_$(ARCH-ramstage-y) += memset.S
+rmodules_$(ARCH-ramstage-y) += memcpy.S
+rmodules_$(ARCH-ramstage-y) += memmove.S
+rmodules_$(ARCH-ramstage-y) += eabi_compat.c
+
+VBOOT_STUB_DEPS += $(obj)/arch/arm64/eabi_compat.rmodules.o
+
+$(eval $(call create_class_compiler,rmodules,arm64))
+
+ramstage-$(CONFIG_COLLECT_TIMESTAMPS) += timestamp.c
+
+ramstage-srcs += src/mainboard/$(MAINBOARDDIR)/mainboard.c
+
+# Build the ramstage
+
+$(objcbfs)/ramstage.debug: $$(ramstage-objs) $(src)/arch/arm64/ramstage.ld $(obj)/ldoptions
+	@printf "    CC         $(subst $(obj)/,,$(@))\n"
+ifeq ($(CONFIG_COMPILER_LLVM_CLANG),y)
+	$(LD_ramstage) -nostdlib -m armelf_linux_eabi --gc-sections -o $@ --wrap __divdi3 --wrap __udivdi3 --wrap __moddi3 --wrap __umoddi3 --wrap __uidiv --start-group $(ramstage-objs) --end-group -T $(src)/arch/arm64/ramstage.ld
+else
+	$(CC_ramstage) $(CFLAGS_ramstage) -nostdlib -Wl,--gc-sections -static -o $@ -L$(obj) -Wl,--start-group $(ramstage-objs) -Wl,--end-group -T $(src)/arch/arm64/ramstage.ld
+endif
+
+endif # CONFIG_ARCH_RAMSTAGE_ARM64
diff --git a/src/arch/arm64/armv8/Kconfig b/src/arch/arm64/armv8/Kconfig
new file mode 100644
index 0000000..fc2e668
--- /dev/null
+++ b/src/arch/arm64/armv8/Kconfig
@@ -0,0 +1,9 @@
+config ARCH_BOOTBLOCK_ARM_V8_64
+	def_bool n
+	select ARCH_BOOTBLOCK_ARM64
+config ARCH_ROMSTAGE_ARM_V8_64
+	def_bool n
+	select ARCH_ROMSTAGE_ARM64
+config ARCH_RAMSTAGE_ARM_V8_64
+	def_bool n
+	select ARCH_RAMSTAGE_ARM64
diff --git a/src/arch/arm64/armv8/Makefile.inc b/src/arch/arm64/armv8/Makefile.inc
new file mode 100644
index 0000000..973b391
--- /dev/null
+++ b/src/arch/arm64/armv8/Makefile.inc
@@ -0,0 +1,74 @@
+################################################################################
+##
+## This file is part of the coreboot project.
+##
+## Copyright (C) 2014 The ChromiumOS Authors
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; version 2 of the License.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+## GNU General Public License for more details.
+##
+## You should have received a copy of the GNU General Public License
+## along with this program; if not, write to the Free Software
+## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+##
+################################################################################
+
+armv8_flags = -march=armv8-a -I$(src)/arch/arm64/include/armv8/ -D__COREBOOT_ARM_ARCH__=8
+
+armv8_asm_flags = $(armv8_flags)
+
+################################################################################
+## bootblock
+################################################################################
+ifeq ($(CONFIG_ARCH_BOOTBLOCK_ARM_V8_64),y)
+
+ifneq ($(CONFIG_ARM_BOOTBLOCK_CUSTOM),y)
+bootblock-y += bootblock.S
+endif
+bootblock-$(CONFIG_ARM_BOOTBLOCK_SIMPLE) += bootblock_simple.c
+bootblock-$(CONFIG_ARM_BOOTBLOCK_NORMAL) += bootblock_normal.c
+bootblock-y += cache.c
+bootblock-y += cpu.S
+bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += exception.c
+bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += exception_asm.S
+
+bootblock-c-ccopts += $(armv8_flags)
+bootblock-S-ccopts += $(armv8_asm_flags)
+
+endif
+
+################################################################################
+## romstage
+################################################################################
+ifeq ($(CONFIG_ARCH_ROMSTAGE_ARM_V8_64),y)
+
+romstage-y += cache.c
+romstage-y += cpu.S
+romstage-y += exception.c
+romstage-y += exception_asm.S
+
+romstage-c-ccopts += $(armv8_flags)
+romstage-S-ccopts += $(armv8_asm_flags)
+
+endif
+
+################################################################################
+## ramstage
+################################################################################
+ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM_V8_64),y)
+
+ramstage-y += cache.c
+ramstage-y += cpu.S
+ramstage-y += exception.c
+ramstage-y += exception_asm.S
+
+ramstage-c-ccopts += $(armv8_flags)
+ramstage-S-ccopts += $(armv8_asm_flags)
+
+endif
diff --git a/src/arch/arm64/armv8/bootblock.S b/src/arch/arm64/armv8/bootblock.S
new file mode 100644
index 0000000..e65515f
--- /dev/null
+++ b/src/arch/arm64/armv8/bootblock.S
@@ -0,0 +1,101 @@
+/*
+ * Early initialization code for aarch64 (a.k.a. armv8)
+ *
+ * Copyright 2013  Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+.section ".start", "a", %progbits
+.globl _start
+_start: b	reset
+	.balignl 16,0xdeadbeef
+
+_cbfs_master_header:
+	/* The CBFS master header is inserted by cbfstool at the first
+	 * aligned offset after the above anchor string is found.
+	 * Hence, we leave some space for it.
+	 * Assumes 64-byte alignment.
+	 */
+	.skip 128
+
+reset:
+	/*
+	 * Set the cpu to SVC32 mode and unmask aborts. Aborts might happen
+	 * before logging is turned on and may crash the machine, but at least
+	 * the problem will show up near the code that causes it.
+	 */
+	/* FIXME: Not using supervisor mode, does it apply for aarch64? */
+
+	msr	daifclr, #0xc /* Unmask Debug and System exceptions */
+	msr	daifset, #0x3 /* Mask IRQ, FIQ */
+
+	bl	arm_init_caches
+
+	/*
+	 * Initialize the stack to a known value. This is used to check for
+	 * stack overflow later in the boot process.
+	 */
+	ldr	x0, .Stack
+	ldr	x1, .Stack_size
+	sub	x0, x0, x1
+	ldr	x1, .Stack
+	ldr	x2, =0xdeadbeefdeadbeef
+init_stack_loop:
+	str	x2, [x0]
+	add	x0, x0, #8
+	cmp	x0, x1
+	bne	init_stack_loop
+
+/* Set stackpointer in internal RAM to call bootblock main() */
+call_bootblock:
+	ldr	x0, .Stack /* Set up stack pointer */
+	mov	sp, x0
+	ldr	x0, =0x00000000
+
+	sub	sp, sp, #16
+
+	/*
+	 * Switch to EL2 already because Linux requires to be
+	 * in EL1 or EL2, see its "Booting AArch64 Linux" doc
+	 */
+	bl	switch_el3_to_el2
+	bl	main
+
+.align 3
+.Stack:
+	.word CONFIG_STACK_TOP
+.align 3
+.Stack_size:
+	.word CONFIG_STACK_SIZE
+	.section ".id", "a", %progbits
+
+	.globl __id_start
+__id_start:
+ver:
+	.asciz COREBOOT_VERSION
+vendor:
+	.asciz CONFIG_MAINBOARD_VENDOR
+part:
+	.asciz CONFIG_MAINBOARD_PART_NUMBER
+.long __id_end - ver  /* Reverse offset to the vendor id */
+.long __id_end - vendor  /* Reverse offset to the vendor id */
+.long __id_end - part    /* Reverse offset to the part number */
+.long CONFIG_ROM_SIZE    /* Size of this romimage */
+	.globl __id_end
+
+__id_end:
+.previous
diff --git a/src/arch/arm64/armv8/bootblock_simple.c b/src/arch/arm64/armv8/bootblock_simple.c
new file mode 100644
index 0000000..d8339d1
--- /dev/null
+++ b/src/arch/arm64/armv8/bootblock_simple.c
@@ -0,0 +1,73 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <bootblock_common.h>
+#include <arch/cache.h>
+#include <arch/hlt.h>
+#include <arch/stages.h>
+#include <arch/exception.h>
+#include <cbfs.h>
+#include <console/console.h>
+
+static int boot_cpu(void)
+{
+	/*
+	 * FIXME: This is a stub for now. All non-boot CPUs should be
+	 * waiting for an interrupt. We could move the chunk of assembly
+	 * which puts them to sleep in here...
+	 */
+	return 1;
+}
+
+void main(void)
+{
+	const char *stage_name = CONFIG_CBFS_PREFIX"/romstage";
+	void *entry = NULL;
+
+	/* Globally disable MMU, caches, and branch prediction (these should
+	 * be disabled by default on reset) */
+	dcache_mmu_disable();
+
+	/*
+	 * Re-enable icache and branch prediction. MMU and dcache will be
+	 * set up later.
+	 *
+	 * Note: If booting from USB, we need to disable branch prediction
+	 * before copying from USB into RAM (FIXME: why?)
+	 */
+
+	if (boot_cpu()) {
+		//bootblock_cpu_init();
+		//bootblock_mainboard_init();
+	}
+
+#ifdef CONFIG_BOOTBLOCK_CONSOLE
+	console_init();
+	exception_init();
+#endif
+
+	entry = cbfs_load_stage(CBFS_DEFAULT_MEDIA, stage_name);
+
+	printk(BIOS_SPEW, "stage_name %s, entry %p\n", stage_name, entry);
+
+	if (entry) stage_exit(entry);
+	hlt();
+}
diff --git a/src/arch/arm64/armv8/cache.c b/src/arch/arm64/armv8/cache.c
new file mode 100644
index 0000000..a0eff46
--- /dev/null
+++ b/src/arch/arm64/armv8/cache.c
@@ -0,0 +1,148 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * cache.c: Cache maintenance routines for ARMv8 (aarch64)
+ *
+ * Reference: ARM Architecture Reference Manual, ARMv8-A edition
+ */
+
+#include <stdint.h>
+
+#include <arch/cache.h>
+
+void tlb_invalidate_all(void)
+{
+	 /* TLBIALL includes dTLB and iTLB on systems that have them. */
+	 tlbiall(current_el());
+	 dsb();
+	 isb();
+}
+
+unsigned int dcache_line_bytes(void)
+{
+	uint32_t ccsidr;
+	static unsigned int line_bytes = 0;
+
+	if (line_bytes)
+		return line_bytes;
+
+	ccsidr = read_ccsidr();
+	/* [2:0] - Indicates (Log2(number of words in cache line)) - 4 */
+	line_bytes = 1 << ((ccsidr & 0x7) + 4);	/* words per line */
+	line_bytes *= sizeof(uint32_t);	/* bytes per word */
+
+	return line_bytes;
+}
+
+enum dcache_op {
+	OP_DCCSW,
+	OP_DCCISW,
+	OP_DCISW,
+	OP_DCCIVAC,
+	OP_DCCVAC,
+	OP_DCIVAC,
+};
+
+/*
+ * Do a dcache operation by virtual address. This is useful for maintaining
+ * coherency in drivers which do DMA transfers and only need to perform
+ * cache maintenance on a particular memory range rather than the entire cache.
+ */
+static void dcache_op_va(void const *addr, size_t len, enum dcache_op op)
+{
+	uint64_t line, linesize;
+
+	linesize = dcache_line_bytes();
+	line = (uint64_t)addr & ~(linesize - 1);
+
+	dsb();
+	while ((void *)line < addr + len) {
+		switch(op) {
+		case OP_DCCIVAC:
+			dccivac(line);
+			break;
+		case OP_DCCVAC:
+			dccvac(line);
+			break;
+		case OP_DCIVAC:
+			dcivac(line);
+			break;
+		default:
+			break;
+		}
+		line += linesize;
+	}
+	isb();
+}
+
+void dcache_clean_by_va(void const *addr, size_t len)
+{
+	dcache_op_va(addr, len, OP_DCCVAC);
+}
+
+void dcache_clean_invalidate_by_va(void const *addr, size_t len)
+{
+	dcache_op_va(addr, len, OP_DCCIVAC);
+}
+
+void dcache_invalidate_by_va(void const *addr, size_t len)
+{
+	dcache_op_va(addr, len, OP_DCIVAC);
+}
+
+/*
+ * CAUTION: This implementation assumes that coreboot never uses non-identity
+ * page tables for pages containing executed code. If you ever want to violate
+ * this assumption, have fun figuring out the associated problems on your own.
+ */
+void dcache_mmu_disable(void)
+{
+	uint32_t sctlr;
+
+	flush_dcache_all();
+	sctlr = read_sctlr(current_el());
+	sctlr &= ~(SCTLR_C | SCTLR_M);
+	write_sctlr(sctlr, current_el());
+}
+
+void dcache_mmu_enable(void)
+{
+	uint32_t sctlr;
+
+	sctlr = read_sctlr(current_el());
+	sctlr |= SCTLR_C | SCTLR_M;
+	write_sctlr(sctlr, current_el());
+}
+
+void cache_sync_instructions(void)
+{
+	flush_dcache_all();	/* includes trailing DSB (in assembly) */
+	iciallu();		/* includes BPIALLU (architecturally) */
+	dsb();
+	isb();
+}
diff --git a/src/arch/arm64/armv8/cpu.S b/src/arch/arm64/armv8/cpu.S
new file mode 100644
index 0000000..1eecc2b
--- /dev/null
+++ b/src/arch/arm64/armv8/cpu.S
@@ -0,0 +1,131 @@
+/*
+ * Based on arch/arm/include/asm/cacheflush.h
+ *
+ * Copyright (C) 1999-2002 Russell King.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <arch/asm.h>
+
+/*
+ *	flush_dcache_all()
+ *
+ *	Flush the whole D-cache.
+ *
+ *	Corrupted registers: x0-x7, x9-x11
+ * 	From: Linux arch/arm64/mm/cache.S
+ */
+ENTRY(flush_dcache_all)
+	dsb	sy		// ensure ordering with previous memory accesses
+	mrs	x0, clidr_el1		// read clidr
+	and	x3, x0, #0x7000000	// extract loc from clidr
+	lsr	x3, x3, #23		// left align loc bit field
+	cbz	x3, finished		// if loc is 0, then no need to clean
+	mov	x10, #0			// start clean at cache level 0
+loop1:
+	add	x2, x10, x10, lsr #1	// work out 3x current cache level
+	lsr	x1, x0, x2		// extract cache type bits from clidr
+	and	x1, x1, #7		// mask of the bits for current cache only
+	cmp	x1, #2			// see what cache we have at this level
+	b.lt	skip			// skip if no cache, or just i-cache
+	mrs	x9, daif		// make CSSELR and CCSIDR access atomic
+	msr	csselr_el1, x10		// select current cache level in csselr
+	isb				// isb to sych the new cssr&csidr
+	mrs	x1, ccsidr_el1		// read the new ccsidr
+	msr	daif, x9
+	and	x2, x1, #7		// extract the length of the cache lines
+	add	x2, x2, #4		// add 4 (line length offset)
+	mov	x4, #0x3ff
+	and	x4, x4, x1, lsr #3	// find maximum number on the way size
+	clz	x5, x4			// find bit position of way size increment
+	mov	x7, #0x7fff
+	and	x7, x7, x1, lsr #13	// extract max number of the index size
+loop2:
+	mov	x9, x4			// create working copy of max way size
+loop3:
+	lsl	x6, x9, x5
+	orr	x11, x10, x6		// factor way and cache number into x11
+	lsl	x6, x7, x2
+	orr	x11, x11, x6		// factor index number into x11
+	dc	cisw, x11		// clean & invalidate by set/way
+	subs	x9, x9, #1		// decrement the way
+	b.ge	loop3
+	subs	x7, x7, #1		// decrement the index
+	b.ge	loop2
+skip:
+	add	x10, x10, #2		// increment cache number
+	cmp	x3, x10
+	b.gt	loop1
+finished:
+	mov	x10, #0			// swith back to cache level 0
+	msr	csselr_el1, x10		// select current cache level in csselr
+	dsb	sy
+	isb
+	ret
+ENDPROC(flush_dcache_all)
+
+/*
+ * Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
+ * known state regarding caches/SCTLR. Completely cleans and invalidates
+ * icache/dcache, disables MMU and dcache (if active), and enables unaligned
+ * accesses, icache and branch prediction (if inactive). Clobbers x4 and x5.
+ */
+ENTRY(arm_init_caches)
+	/* w4: SCTLR, return address: x8 (stay valid for the whole function) */
+	mov	x8, x30
+	/* XXX: Assume that we always start running at EL3 */
+	mrs	x4, sctlr_el3
+
+	/* FIXME: How to enable branch prediction on ARMv8? */
+
+	/* Flush and invalidate dcache */
+	bl	flush_dcache_all
+
+	/* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
+	and	x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
+	/* Activate ICache (12) already for speed */
+	orr	x4, x4, #(1 << 12)
+	msr	sctlr_el3, x4
+
+	/* Invalidate icache and TLB for good measure */
+	ic	iallu
+	tlbi	alle3
+	dsb	sy
+	isb
+
+	ret	x8
+ENDPROC(arm_init_caches)
+
+/* Based on u-boot transition.S */
+ENTRY(switch_el3_to_el2)
+	mov	x0, #0x5b1	/* Non-secure EL0/EL1 | HVC | 64bit EL2 */
+	msr	scr_el3, x0
+	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */
+	mov	x0, #0x33ff
+	msr	cptr_el2, x0	/* Disable coprocessor traps to EL2 */
+
+	/* Return to the EL2_SP2 mode from EL3 */
+	mov	x0, sp
+	msr	sp_el2, x0	/* Migrate SP */
+	mrs	x0, vbar_el3
+	msr	vbar_el2, x0	/* Migrate VBAR */
+	mrs	x0, sctlr_el3
+	msr	sctlr_el2, x0	/* Migrate SCTLR */
+	mov	x0, #0x3c9
+	msr	spsr_el3, x0	/* EL2_SP2 | D | A | I | F */
+	msr	elr_el3, x30
+	eret
+ENDPROC(switch_el3_to_el2)
diff --git a/src/arch/arm64/armv8/exception.c b/src/arch/arm64/armv8/exception.c
new file mode 100644
index 0000000..31e3131
--- /dev/null
+++ b/src/arch/arm64/armv8/exception.c
@@ -0,0 +1,129 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <types.h>
+#include <arch/cache.h>
+#include <arch/exception.h>
+#include <console/console.h>
+
+void exception_sync_el0(uint64_t *regs, uint64_t esr);
+void exception_irq_el0(uint64_t *regs, uint64_t esr);
+void exception_fiq_el0(uint64_t *regs, uint64_t esr);
+void exception_serror_el0(uint64_t *regs, uint64_t esr);
+void exception_sync(uint64_t *regs, uint64_t esr);
+void exception_irq(uint64_t *regs, uint64_t esr);
+void exception_fiq(uint64_t *regs, uint64_t esr);
+void exception_serror(uint64_t *regs, uint64_t esr);
+
+static void print_regs(uint64_t *regs)
+{
+	int i;
+
+	/* ELR contains the restart PC at target exception level */
+	printk(BIOS_ERR, "ELR = 0x%016llx        ", regs[0]);
+	printk(BIOS_ERR, "X00 = 0x%016llx\n", regs[1]);
+
+	for (i = 2; i < 31; i+=2) {
+		printk(BIOS_ERR, "X%02d = 0x%016llx        ", i - 1, regs[i]);
+		printk(BIOS_ERR, "X%02d = 0x%016llx\n", i, regs[i + 1]);
+	}
+}
+
+void exception_sync_el0(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _sync_el0 (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_irq_el0(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _irq_el0 (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_fiq_el0(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _fiq_el0 (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_serror_el0(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _serror_el0 (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_sync(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _sync (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_irq(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _irq (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_fiq(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _fiq (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_serror(uint64_t *regs, uint64_t esr)
+{
+	printk(BIOS_ERR, "exception _serror (ESR = 0x%08llx)\n", esr);
+	print_regs(regs);
+	die("exception");
+}
+
+void exception_init(void)
+{
+	//uint32_t sctlr = read_sctlr();
+	/* Handle exceptions in ARM mode. */
+	//sctlr &= ~SCTLR_TE;
+	/* Set V=0 in SCTLR so VBAR points to the exception vector table. */
+	//sctlr &= ~SCTLR_V;
+	/* Enforce alignment temporarily. */
+	//write_sctlr(sctlr);
+
+	extern uint32_t exception_table[];
+	set_vbar((uintptr_t)exception_table);
+
+	printk(BIOS_DEBUG, "Exception handlers installed.\n");
+}
diff --git a/src/arch/arm64/armv8/exception_asm.S b/src/arch/arm64/armv8/exception_asm.S
new file mode 100644
index 0000000..b1f1a94
--- /dev/null
+++ b/src/arch/arm64/armv8/exception_asm.S
@@ -0,0 +1,103 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <arch/asm.h>
+
+	.text
+
+	.align 11
+	.global exception_table
+exception_table:
+	.align 7
+	bl	exception_prologue
+	bl	exception_sync_el0
+	.align 7
+	bl	exception_prologue
+	bl	exception_irq_el0
+	.align 7
+	bl	exception_prologue
+	bl	exception_fiq_el0
+	.align 7
+	bl	exception_prologue
+	bl	exception_serror_el0
+	.align 7
+	bl	exception_prologue
+	bl	exception_sync
+	.align 7
+	bl	exception_prologue
+	bl	exception_irq
+	.align 7
+	bl	exception_prologue
+	bl	exception_fiq
+	.align 7
+	bl	exception_prologue
+	bl	exception_serror
+
+/*
+ * Save state (register file + ELR) to stack
+ * and set arguments x0 and x1 for exception call
+ */
+ENTRY(exception_prologue)
+	stp	x29, x30, [sp, #-16]!
+	stp	x27, x28, [sp, #-16]!
+	stp	x25, x26, [sp, #-16]!
+	stp	x23, x24, [sp, #-16]!
+	stp	x21, x22, [sp, #-16]!
+	stp	x19, x20, [sp, #-16]!
+	stp	x17, x18, [sp, #-16]!
+	stp	x15, x16, [sp, #-16]!
+	stp	x13, x14, [sp, #-16]!
+	stp	x11, x12, [sp, #-16]!
+	stp	x9, x10, [sp, #-16]!
+	stp	x7, x8, [sp, #-16]!
+	stp	x5, x6, [sp, #-16]!
+	stp	x3, x4, [sp, #-16]!
+	stp	x1, x2, [sp, #-16]!
+
+	/* FIXME: Don't assume always running in EL2 */
+	mrs	x1, elr_el2
+	stp	x1, x0, [sp, #-16]!
+
+	mrs	x1, esr_el2
+	mov	x0, sp
+
+	ret
+ENDPROC(exception_prologue)
+
+	.global exception_stack_end
+exception_stack_end:
+	.quad 0
+
+exception_handler:
+	.word 0
+
+	.global set_vbar
+set_vbar:
+	msr	vbar_el2, x0
+	ret
diff --git a/src/arch/arm64/boot.c b/src/arch/arm64/boot.c
new file mode 100644
index 0000000..6fb0e65
--- /dev/null
+++ b/src/arch/arm64/boot.c
@@ -0,0 +1,33 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <arch/cache.h>
+#include <arch/stages.h>
+#include <cbmem.h>
+#include <console/console.h>
+
+void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size)
+{
+	void (*doit)(void *) = entry;
+	void *cb_tables = cbmem_find(CBMEM_ID_CBTABLE);
+
+	printk(BIOS_SPEW, "entry    = %p\n", entry);
+	cache_sync_instructions();
+	doit(cb_tables);
+}
diff --git a/src/arch/arm64/bootblock.ld b/src/arch/arm64/bootblock.ld
new file mode 100644
index 0000000..acce1f1
--- /dev/null
+++ b/src/arch/arm64/bootblock.ld
@@ -0,0 +1,59 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2008-2010 coresystems GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* We use ELF as output format. So that we can debug the code in some form. */
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+INCLUDE ldoptions
+
+PHDRS
+{
+	to_load PT_LOAD;
+}
+
+ENTRY(_start)
+TARGET(binary)
+SECTIONS
+{
+	. = CONFIG_BOOTBLOCK_BASE;
+
+	.bootblock . : {
+		*(.text._start);
+		KEEP(*(.id));
+		*(.text);
+		*(.text.*);
+		*(.rodata);
+		*(.rodata.*);
+		*(.data);
+		*(.data.*);
+		*(.bss);
+		*(.bss.*);
+		*(.sbss);
+		*(.sbss.*);
+	} : to_load = 0xff
+
+	/DISCARD/ : {
+		*(.comment)
+		*(.note)
+		*(.comment.*)
+		*(.note.*)
+		*(.ARM.*)
+	}
+}
diff --git a/src/arch/arm64/cbmem.c b/src/arch/arm64/cbmem.c
new file mode 100644
index 0000000..b33e9b8
--- /dev/null
+++ b/src/arch/arm64/cbmem.c
@@ -0,0 +1,30 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <cbmem.h>
+
+unsigned long get_top_of_ram(void);
+
+unsigned long get_top_of_ram(void)
+{
+        return CONFIG_RAMTOP;
+}
+
+void *cbmem_top(void)
+{
+        return (void *)get_top_of_ram();
+}
diff --git a/src/arch/arm64/cpu.c b/src/arch/arm64/cpu.c
new file mode 100644
index 0000000..f90c759
--- /dev/null
+++ b/src/arch/arm64/cpu.c
@@ -0,0 +1,42 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+#include <stdlib.h>
+#include <arch/cpu.h>
+
+/* Return the cpu struct which is at the high memory address of the stack.
+ */
+struct cpu_info *cpu_info(void)
+{
+	uintptr_t addr = ALIGN((uintptr_t)__builtin_frame_address(0),
+		CONFIG_STACK_SIZE);
+	addr -= sizeof(struct cpu_info);
+	return (void *)addr;
+}
+
diff --git a/src/arch/arm64/div0.c b/src/arch/arm64/div0.c
new file mode 100644
index 0000000..ab06ad3
--- /dev/null
+++ b/src/arch/arm64/div0.c
@@ -0,0 +1,33 @@
+/*
+ * (C) Copyright 2002
+ * Wolfgang Denk, DENX Software Engineering, wd at denx.de.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <console/console.h>
+
+void __div0(void); // called from asm so no need for a prototype in a header
+
+/* Replacement (=dummy) for GNU/Linux division-by zero handler */
+/* recursion is ok here because we have no formats ... */
+void __div0 (void)
+{
+	printk(BIOS_EMERG, "DIVIDE BY ZERO! continuing ... \n");
+}
diff --git a/src/arch/arm64/eabi_compat.c b/src/arch/arm64/eabi_compat.c
new file mode 100644
index 0000000..c27c54e
--- /dev/null
+++ b/src/arch/arm64/eabi_compat.c
@@ -0,0 +1,28 @@
+/*
+ * Utility functions needed for (some) EABI conformant tool chains.
+ *
+ * (C) Copyright 2009 Wolfgang Denk <wd at denx.de>
+ *
+ * This program is Free Software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#include <console/console.h>
+
+int raise (int signum) __attribute__((used));
+int raise (int signum)
+{
+	return 0;
+}
+
+void __aeabi_unwind_cpp_pr0(void) __attribute__((used));
+void __aeabi_unwind_cpp_pr0(void)
+{
+};
+
+void __aeabi_unwind_cpp_pr1(void) __attribute__((used));
+void __aeabi_unwind_cpp_pr1(void)
+{
+};
diff --git a/src/arch/arm64/early_console.c b/src/arch/arm64/early_console.c
new file mode 100644
index 0000000..aa287a3
--- /dev/null
+++ b/src/arch/arm64/early_console.c
@@ -0,0 +1,75 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of
+ * the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ */
+
+#include <console/console.h>
+#include <console/vtxprintf.h>
+
+/* FIXME: need to make console driver more generic */
+void console_tx_byte(unsigned char byte)
+{
+	if (byte == '\n')
+		console_tx_byte('\r');
+
+#if CONFIG_CONSOLE_SERIAL8250MEM
+	if (oxford_oxpcie_present) {
+		uart8250_mem_tx_byte(
+			CONFIG_OXFORD_OXPCIE_BASE_ADDRESS + 0x1000, byte);
+	}
+#endif
+#if CONFIG_CONSOLE_SERIAL_UART
+	uart_tx_byte(byte);
+#endif
+#if CONFIG_USBDEBUG
+	usbdebug_tx_byte(0, byte);
+#endif
+#if CONFIG_CONSOLE_CBMEM && !defined(__BOOT_BLOCK__)
+	cbmemc_tx_byte(byte);
+#endif
+}
+
+void console_tx_flush(void)
+{
+#if CONFIG_CONSOLE_SERIAL8250MEM
+	uart8250_mem_tx_flush(CONFIG_OXFORD_OXPCIE_BASE_ADDRESS + 0x1000);
+#endif
+#if CONFIG_CONSOLE_SERIAL_UART
+	uart_tx_flush();
+#endif
+#if CONFIG_USBDEBUG
+	usbdebug_tx_flush(0);
+#endif
+}
+
+int do_printk(int msg_level, const char *fmt, ...)
+{
+	va_list args;
+	int i;
+
+	if (msg_level > console_loglevel) {
+		return 0;
+	}
+
+	va_start(args, fmt);
+	i = vtxprintf(console_tx_byte, fmt, args);
+	va_end(args);
+
+	console_tx_flush();
+
+	return i;
+}
diff --git a/src/arch/arm64/id.S b/src/arch/arm64/id.S
new file mode 100644
index 0000000..a588f1e
--- /dev/null
+++ b/src/arch/arm64/id.S
@@ -0,0 +1,20 @@
+#include <build.h>
+
+	.section ".id", "a", %progbits
+
+	.globl __id_start
+__id_start:
+ver:
+	.asciz COREBOOT_VERSION
+vendor:
+	.asciz CONFIG_MAINBOARD_VENDOR
+part:
+	.asciz CONFIG_MAINBOARD_PART_NUMBER
+.long __id_end - ver  /* Reverse offset to the vendor id */
+.long __id_end - vendor  /* Reverse offset to the vendor id */
+.long __id_end - part    /* Reverse offset to the part number */
+.long CONFIG_ROM_SIZE                               /* Size of this romimage */
+	.globl __id_end
+
+__id_end:
+.previous
diff --git a/src/arch/arm64/include/arch/asm.h b/src/arch/arm64/include/arch/asm.h
new file mode 100644
index 0000000..7760bad
--- /dev/null
+++ b/src/arch/arm64/include/arch/asm.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ARM_ARM64_ASM_H
+#define __ARM_ARM64_ASM_H
+
+#define ALIGN .align 0
+
+#define ENDPROC(name) \
+	.type name, %function; \
+	END(name)
+
+#define ENTRY(name) \
+	.section .text.name, "ax", %progbits; \
+	.global name; \
+	ALIGN; \
+	name:
+
+#define END(name) \
+	.size name, .-name
+
+#endif	/* __ARM_ARM64_ASM_H */
diff --git a/src/arch/arm64/include/arch/boot/boot.h b/src/arch/arm64/include/arch/boot/boot.h
new file mode 100644
index 0000000..16763c6
--- /dev/null
+++ b/src/arch/arm64/include/arch/boot/boot.h
@@ -0,0 +1,8 @@
+#ifndef ASM_ARM64_BOOT_H
+#define ASM_ARM64_BOOT_H
+
+#define ELF_CLASS	ELFCLASS64
+#define ELF_DATA	ELFDATA2LSB
+#define ELF_ARCH	EM_AARCH64
+
+#endif /* ASM_ARM_BOOT_H */
diff --git a/src/arch/arm64/include/arch/byteorder.h b/src/arch/arm64/include/arch/byteorder.h
new file mode 100644
index 0000000..8dc069f
--- /dev/null
+++ b/src/arch/arm64/include/arch/byteorder.h
@@ -0,0 +1,27 @@
+#ifndef _BYTEORDER_H
+#define _BYTEORDER_H
+
+#define __LITTLE_ENDIAN 1234
+
+#include <stdint.h>
+#include <swab.h>
+
+#define cpu_to_le64(x) ((uint64_t)(x))
+#define le64_to_cpu(x) ((uint64_t)(x))
+#define cpu_to_le32(x) ((uint32_t)(x))
+#define le32_to_cpu(x) ((uint32_t)(x))
+#define cpu_to_le16(x) ((uint16_t)(x))
+#define le16_to_cpu(x) ((uint16_t)(x))
+#define cpu_to_be64(x) swab64(x)
+#define be64_to_cpu(x) swab64(x)
+#define cpu_to_be32(x) swab32((x))
+#define be32_to_cpu(x) swab32((x))
+#define cpu_to_be16(x) swab16((x))
+#define be16_to_cpu(x) swab16((x))
+
+#define ntohll(x) be64_to_cpu(x)
+#define htonll(x) cpu_to_be64(x)
+#define ntohl(x)  be32_to_cpu(x)
+#define htonl(x)  cpu_to_be32(x)
+
+#endif /* _BYTEORDER_H */
diff --git a/src/arch/arm64/include/arch/early_variables.h b/src/arch/arm64/include/arch/early_variables.h
new file mode 100644
index 0000000..3d9fa26
--- /dev/null
+++ b/src/arch/arm64/include/arch/early_variables.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2011 The ChromiumOS Authors.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#ifndef ARCH_EARLY_VARIABLES_H
+#define ARCH_EARLY_VARIABLES_H
+
+#if CONFIG_CAR_MIGRATION
+	#error "This is ARM, silly... we don't have CAR here."
+#endif
+
+#define CAR_GLOBAL
+
+#define CAR_MIGRATE(migrate_fn_)
+static inline void *car_get_var_ptr(void *var) { return var; }
+#define car_get_var(var) (var)
+#define car_set_var(var, val) do { (var) = (val); } while (0)
+static inline void car_migrate_variables(void) { }
+
+#endif
diff --git a/src/arch/arm64/include/arch/hlt.h b/src/arch/arm64/include/arch/hlt.h
new file mode 100644
index 0000000..285b6f8
--- /dev/null
+++ b/src/arch/arm64/include/arch/hlt.h
@@ -0,0 +1,9 @@
+#ifndef ARCH_HLT_H
+#define ARCH_HLT_H
+
+static inline __attribute__((always_inline)) void hlt(void)
+{
+	for (;;) ;
+}
+
+#endif /* ARCH_HLT_H */
diff --git a/src/arch/arm64/include/arch/io.h b/src/arch/arm64/include/arch/io.h
new file mode 100644
index 0000000..49d851d
--- /dev/null
+++ b/src/arch/arm64/include/arch/io.h
@@ -0,0 +1,137 @@
+/*
+ * Based on (linux) arch/arm/include/asm/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright 2013 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARM_IO_H
+#define __ASM_ARM_IO_H
+
+#include <types.h>
+#include <arch/byteorder.h>
+#include <arch/barrier.h>
+
+/*
+ * Generic IO read/write.  These perform native-endian accesses.
+ */
+static inline void __raw_writeb(u8 val, volatile void *addr)
+{
+	asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
+}
+
+static inline void __raw_writew(u16 val, volatile void *addr)
+{
+	asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
+}
+
+static inline void __raw_writel(u32 val, volatile void *addr)
+{
+	asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
+}
+
+static inline void __raw_writeq(u64 val, volatile void *addr)
+{
+	asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
+}
+
+static inline u8 __raw_readb(const volatile void *addr)
+{
+	u8 val;
+	asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+static inline u16 __raw_readw(const volatile void *addr)
+{
+	u16 val;
+	asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+static inline u32 __raw_readl(const volatile void *addr)
+{
+	u32 val;
+	asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+static inline u64 __raw_readq(const volatile void *addr)
+{
+	u64 val;
+	asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
+	return val;
+}
+
+/* IO barriers */
+#define __iormb()		rmb()
+#define __iowmb()		wmb()
+
+#define mmiowb()		do { } while (0)
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses.
+ */
+#define readb_relaxed(c)	({ u8  __u = __raw_readb(c); __u; })
+#define readw_relaxed(c)	({ u16 __u = le16_to_cpu((__force __le16)__raw_readw(c)); __u; })
+#define readl_relaxed(c)	({ u32 __u = le32_to_cpu((__force __le32)__raw_readl(c)); __u; })
+
+#define writeb_relaxed(v,c)	((void)__raw_writeb((v),(c)))
+#define writew_relaxed(v,c)	((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
+#define writel_relaxed(v,c)	((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any
+ * following Normal memory access. Writes are ordered relative to any prior
+ * Normal memory access.
+ */
+#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+
+#define writeb(v,c)		({ __iowmb(); writeb_relaxed((v),(c)); })
+#define writew(v,c)		({ __iowmb(); writew_relaxed((v),(c)); })
+#define writel(v,c)		({ __iowmb(); writel_relaxed((v),(c)); })
+
+#define inb_p(addr)	inb(addr)
+#define inw_p(addr)	inw(addr)
+#define inl_p(addr)	inl(addr)
+
+#define outb_p(x, addr)	outb((x), (addr))
+#define outw_p(x, addr)	outw((x), (addr))
+#define outl_p(x, addr)	outl((x), (addr))
+
+#define insb_p(port,to,len)	insb(port,to,len)
+#define insw_p(port,to,len)	insw(port,to,len)
+#define insl_p(port,to,len)	insl(port,to,len)
+
+#define outsb_p(port,from,len)	outsb(port,from,len)
+#define outsw_p(port,from,len)	outsw(port,from,len)
+#define outsl_p(port,from,len)	outsl(port,from,len)
+
+/*
+ * String version of I/O memory access operations.
+ */
+extern void __memcpy_fromio(void *, const volatile void *, size_t);
+extern void __memcpy_toio(volatile void *, const void *, size_t);
+extern void __memset_io(volatile void *, int, size_t);
+
+#define memset_io(c,v,l)	__memset_io((c),(v),(l))
+#define memcpy_fromio(a,c,l)	__memcpy_fromio((a),(c),(l))
+#define memcpy_toio(c,a,l)	__memcpy_toio((c),(a),(l))
+
+#endif	/* __ASM_ARM_IO_H */
diff --git a/src/arch/arm64/include/arch/pci_ops.h b/src/arch/arm64/include/arch/pci_ops.h
new file mode 100644
index 0000000..26a2c9b
--- /dev/null
+++ b/src/arch/arm64/include/arch/pci_ops.h
@@ -0,0 +1,29 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef ARCH_ARM64_PCI_OPS_H
+#define ARCH_ARM64_PCI_OPS_H
+
+/* V8 has PCI in some form. We will need to fill this in. */
+static inline const struct pci_bus_operations *pci_config_default(void)
+{
+	return NULL;
+}
+
+#endif
diff --git a/src/arch/arm64/include/arch/stages.h b/src/arch/arm64/include/arch/stages.h
new file mode 100644
index 0000000..e7a2401
--- /dev/null
+++ b/src/arch/arm64/include/arch/stages.h
@@ -0,0 +1,29 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2013 The ChromiumOS Authors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __ARCH_STAGES_H
+#define __ARCH_STAGES_H
+
+extern void main(void);
+
+void stage_entry(void) __attribute__((section(".text.stage_entry.aarch64")));
+void stage_exit(void *);
+void jmp_to_elf_entry(void *entry, unsigned long buffer, unsigned long size);
+
+#endif
diff --git a/src/arch/arm64/include/armv8/arch/barrier.h b/src/arch/arm64/include/armv8/arch/barrier.h
new file mode 100644
index 0000000..dfcf5a5
--- /dev/null
+++ b/src/arch/arm64/include/armv8/arch/barrier.h
@@ -0,0 +1,52 @@
+/*
+ * Based on arch/arm/include/asm/barrier.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARM_BARRIER_H
+#define __ASM_ARM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define sev()		asm volatile("sev" : : : "memory")
+#define wfe()		asm volatile("wfe" : : : "memory")
+#define wfi()		asm volatile("wfi" : : : "memory")
+
+#define isb()		asm volatile("isb" : : : "memory")
+#define dsb()		asm volatile("dsb sy" : : : "memory")
+
+#define mb()		dsb()
+#define rmb()		asm volatile("dsb ld" : : : "memory")
+#define wmb()		asm volatile("dsb st" : : : "memory")
+
+#ifndef CONFIG_SMP
+#define smp_mb()	barrier()
+#define smp_rmb()	barrier()
+#define smp_wmb()	barrier()
+#else
+#define smp_mb()	asm volatile("dmb ish" : : : "memory")
+#define smp_rmb()	asm volatile("dmb ishld" : : : "memory")
+#define smp_wmb()	asm volatile("dmb ishst" : : : "memory")
+#endif
+
+#define read_barrier_depends()		do { } while(0)
+#define smp_read_barrier_depends()	do { } while(0)
+
+#define set_mb(var, value)	do { var = value; smp_mb(); } while (0)
+#define nop()		asm volatile("nop");
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* __ASM_ARM_BARRIER_H */
diff --git a/src/arch/arm64/include/armv8/arch/cache.h b/src/arch/arm64/include/armv8/arch/cache.h
new file mode 100644
index 0000000..325b857
--- /dev/null
+++ b/src/arch/arm64/include/armv8/arch/cache.h
@@ -0,0 +1,262 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * cache.h: Cache maintenance API for ARM64
+ */
+
+#ifndef ARM_ARM64_CACHE_H
+#define ARM_ARM64_CACHE_H
+
+#include <config.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* SCTLR_ELx common bits */
+#define SCTLR_M		(1 << 0)	/* MMU enable			*/
+#define SCTLR_A		(1 << 1)	/* Alignment check enable	*/
+#define SCTLR_C		(1 << 2)	/* Data/unified cache enable	*/
+#define SCTLR_SA	(1 << 3)	/* Stack alignment check enable	*/
+#define SCTLR_I		(1 << 12)	/* Instruction cache enable	*/
+#define SCTLR_WXN	(1 << 19)	/* Write permission implies XN	*/
+#define SCTLR_EE	(1 << 25)	/* Exception endianness		*/
+
+/* SCTLR_EL1 bits */
+#define SCTLR_EL1_CP15B	(1 << 5)	/* CP15 barrier enable		*/
+#define SCTLR_EL1_ITD	(1 << 7)	/* IT disable			*/
+#define SCTLR_EL1_SED	(1 << 8)	/* SETEND disable		*/
+#define SCTLR_EL1_UMA	(1 << 9)	/* User mask access		*/
+#define SCTLR_EL1_DZE	(1 << 14)	/* DC ZVA instruction at EL0	*/
+#define SCTLR_EL1_UCT	(1 << 15)	/* CTR_EL0 register EL0 access	*/
+#define SCTLR_EL1_NTWI	(1 << 16)	/* Not trap WFI		 	*/
+#define SCTLR_EL1_NTWE	(1 << 18)	/* Not trap WFE		 	*/
+#define SCTLR_EL1_E0E	(1 << 24)	/* Exception endianness at EL0	*/
+#define SCTLR_EL1_UCI	(1 << 26)	/* EL0 access to cache instructions */
+
+/*
+ * Utility macro to choose an instruction according to the exception
+ * level (EL) passed, which number is concatenated between insa and insb parts
+ */
+#define SWITCH_EL(insa, insb, el) if (el == 1) asm volatile(insa "1" insb); \
+	else if (el == 2) asm volatile (insa "2" insb); \
+	else asm volatile (insa "3" insb)
+
+/* get current exception level (EL1-EL3) */
+static inline uint32_t current_el(void)
+{
+	uint32_t el;
+	asm volatile ("mrs %0, CurrentEL" : "=r" (el));
+	return el >> 2;
+}
+
+/*
+ * Sync primitives
+ */
+
+/* data memory barrier */
+static inline void dmb(void)
+{
+	asm volatile ("dmb sy" : : : "memory");
+}
+
+/* data sync barrier */
+static inline void dsb(void)
+{
+	asm volatile ("dsb sy" : : : "memory");
+}
+
+/* instruction sync barrier */
+static inline void isb(void)
+{
+	asm volatile ("isb sy" : : : "memory");
+}
+
+/*
+ * Low-level TLB maintenance operations
+ */
+
+/* invalidate entire unified TLB */
+static inline void tlbiall(uint32_t el)
+{
+	SWITCH_EL("tlbi alle", : : : "memory", el);
+}
+
+/* invalidate unified TLB by VA, all ASID (EL1) */
+static inline void tlbivaa(uint64_t va)
+{
+	asm volatile("tlbi vaae1, %0" : : "r" (va) : "memory");
+}
+
+/* write translation table base register 0 (TTBR0_ELx) */
+static inline void write_ttbr0(uint64_t val, uint32_t el)
+{
+	SWITCH_EL("msr ttbr0_el", ", %0" : : "r" (val) : "memory", el);
+}
+
+/* read translation control register (TCR_ELx) */
+static inline uint64_t read_tcr(uint32_t el)
+{
+	uint64_t val = 0;
+	SWITCH_EL("mrs %0, tcr_el", : "=r" (val), el);
+	return val;
+}
+
+/* write translation control register (TCR_ELx) */
+static inline void write_tcr(uint64_t val, uint32_t el)
+{
+	SWITCH_EL("msr tcr_el", ", %0" : : "r" (val) : "memory", el);
+}
+
+/*
+ * Low-level cache maintenance operations
+ */
+
+/* data cache clean and invalidate by VA to PoC */
+static inline void dccivac(uint64_t va)
+{
+	asm volatile ("dc civac, %0" : : "r" (va) : "memory");
+}
+
+/* data cache clean and invalidate by set/way */
+static inline void dccisw(uint64_t val)
+{
+	asm volatile ("dc cisw, %0" : : "r" (val) : "memory");
+}
+
+/* data cache clean by VA to PoC */
+static inline void dccvac(uint64_t va)
+{
+	asm volatile ("dc cvac, %0" : : "r" (va) : "memory");
+}
+
+/* data cache clean by set/way */
+static inline void dccsw(uint64_t val)
+{
+	asm volatile ("dc csw, %0" : : "r" (val) : "memory");
+}
+
+/* data cache invalidate by VA to PoC */
+static inline void dcivac(uint64_t va)
+{
+	asm volatile ("dc ivac, %0" : : "r" (va) : "memory");
+}
+
+/* data cache invalidate by set/way */
+static inline void dcisw(uint64_t val)
+{
+	asm volatile ("dc isw, %0" : : "r" (val) : "memory");
+}
+
+/* instruction cache invalidate all */
+static inline void iciallu(void)
+{
+	asm volatile ("ic iallu" : : : "memory");
+}
+
+/*
+ * Cache registers functions
+ */
+
+/* read cache level ID register (CLIDR_EL1) */
+static inline uint32_t read_clidr(void)
+{
+	uint32_t val = 0;
+	asm volatile ("mrs %0, clidr_el1" : "=r" (val));
+	return val;
+}
+
+/* read cache size ID register register (CCSIDR_EL1) */
+static inline uint32_t read_ccsidr(void)
+{
+	uint32_t val = 0;
+	asm volatile ("mrs %0, ccsidr_el1" : "=r" (val));
+	return val;
+}
+
+/* read cache size selection register (CSSELR_EL1) */
+static inline uint32_t read_csselr(void)
+{
+	uint32_t val = 0;
+	asm volatile ("mrs %0, csselr_el1" : "=r" (val));
+	return val;
+}
+
+/* write to cache size selection register (CSSELR_EL1) */
+static inline void write_csselr(uint32_t val)
+{
+	/*
+	 * Bits [3:1] - Cache level + 1 (0b000 = L1, 0b110 = L7, 0b111 is rsvd)
+	 * Bit 0 - 0 = data or unified cache, 1 = instruction cache
+	 */
+	asm volatile ("msr csselr_el1, %0" : : "r" (val));
+	isb();	/* ISB to sync the change to CCSIDR_EL1 */
+}
+
+/* read system control register (SCTLR_ELx) */
+static inline uint32_t read_sctlr(uint32_t el)
+{
+	uint32_t val;
+	SWITCH_EL("mrs %0, sctlr_el", : "=r" (val), el);
+	return val;
+}
+
+/* write system control register (SCTLR_ELx) */
+static inline void write_sctlr(uint32_t val, uint32_t el)
+{
+	SWITCH_EL("msr sctlr_el", ", %0" : : "r" (val) : "cc", el);
+	isb();
+}
+
+
+/* dcache clean by virtual address to PoC */
+void dcache_clean_by_va(void const *addr, size_t len);
+
+/* dcache clean and invalidate by virtual address to PoC */
+void dcache_clean_invalidate_by_va(void const *addr, size_t len);
+
+/* dcache invalidate by virtual address to PoC */
+void dcache_invalidate_by_va(void const *addr, size_t len);
+
+/* dcache invalidate all */
+void flush_dcache_all(void);
+
+/* returns number of bytes per cache line */
+unsigned int dcache_line_bytes(void);
+
+/* dcache and MMU disable */
+void dcache_mmu_disable(void);
+
+/* dcache and MMU enable */
+void dcache_mmu_enable(void);
+
+/* perform all icache/dcache maintenance needed after loading new code */
+void cache_sync_instructions(void);
+
+/* tlb invalidate all */
+void tlb_invalidate_all(void);
+
+#endif /* ARM_ARM64_CACHE_H */
diff --git a/src/arch/arm64/include/armv8/arch/cpu.h b/src/arch/arm64/include/armv8/arch/cpu.h
new file mode 100644
index 0000000..5a8f145
--- /dev/null
+++ b/src/arch/arm64/include/armv8/arch/cpu.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2012 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#ifndef __ARCH_CPU_H__
+#define __ARCH_CPU_H__
+
+#define asmlinkage
+
+#if !defined(__PRE_RAM__)
+#include <device/device.h>
+
+struct cpu_driver {
+	struct device_operations *ops;
+	struct cpu_device_id *id_table;
+};
+
+struct thread;
+
+struct cpu_info {
+	device_t cpu;
+	unsigned long index;
+#if CONFIG_COOP_MULTITASKING
+	struct thread *thread;
+#endif
+};
+
+struct cpuinfo_arm {
+        uint8_t    arm;            /* CPU family */
+        uint8_t    arm_vendor;     /* CPU vendor */
+        uint8_t    arm_model;
+};
+
+#endif
+
+struct cpu_info *cpu_info(void);
+#endif /* __ARCH_CPU_H__ */
diff --git a/src/arch/arm64/include/armv8/arch/exception.h b/src/arch/arm64/include/armv8/arch/exception.h
new file mode 100644
index 0000000..5987d85
--- /dev/null
+++ b/src/arch/arm64/include/armv8/arch/exception.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the libpayload project.
+ *
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARCH_EXCEPTION_H
+#define _ARCH_EXCEPTION_H
+
+#include <stdint.h>
+
+void exception_init(void);
+void set_vbar(uint64_t vbar);
+
+#endif
diff --git a/src/arch/arm64/include/armv8/arch/rules.h b/src/arch/arm64/include/armv8/arch/rules.h
new file mode 100644
index 0000000..a790365
--- /dev/null
+++ b/src/arch/arm64/include/armv8/arch/rules.h
@@ -0,0 +1,34 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _ARCH_RULES_H
+#define _ARCH_RULES_H
+
+/* For romstage and ramstage always build with simple device model, ie.
+ * PCI, PNP and CPU functions operate without use of devicetree.
+ *
+ * For ramstage individual source file may define __SIMPLE_DEVICE__
+ * before including any header files to force that particular source
+ * be built with simple device model.
+ */
+
+#if defined(__PRE_RAM__)
+#define __SIMPLE_DEVICE__
+#endif
+
+#endif /* _ARCH_RULES_H */
diff --git a/src/arch/arm64/include/bootblock_common.h b/src/arch/arm64/include/bootblock_common.h
new file mode 100644
index 0000000..2fa705f
--- /dev/null
+++ b/src/arch/arm64/include/bootblock_common.h
@@ -0,0 +1,11 @@
+#ifdef CONFIG_BOOTBLOCK_CPU_INIT
+#include CONFIG_BOOTBLOCK_CPU_INIT
+#endif
+
+#ifdef CONFIG_BOOTBLOCK_MAINBOARD_INIT
+#include CONFIG_BOOTBLOCK_MAINBOARD_INIT
+#else
+static void bootblock_mainboard_init(void)
+{
+}
+#endif
diff --git a/src/arch/arm64/include/clocks.h b/src/arch/arm64/include/clocks.h
new file mode 100644
index 0000000..8f35303
--- /dev/null
+++ b/src/arch/arm64/include/clocks.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 The Chromium OS Authors.
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/* Standard clock speeds */
+
+/*
+ * We define some commonly-used clock speeds to avoid error since long
+ * numbers are hard to read.
+ *
+ * The format of the label is
+ * CLK_x_yU where:
+ *	x is the integer speed
+ *	y is the fractional part which can be omitted if 0
+ *	U is the units (blank for Hz, K or M for KHz and MHz)
+ *
+ * Please order the items by increasing Hz
+ */
+enum {
+	CLK_32768	= 32768,
+	CLK_20M		= 20000000,
+	CLK_24M		= 24000000,
+	CLK_144M	= 144000000,
+	CLK_216M	= 216000000,
+	CLK_300M	= 300000000,
+};
+
diff --git a/src/arch/arm64/include/smp/spinlock.h b/src/arch/arm64/include/smp/spinlock.h
new file mode 100644
index 0000000..8a89d1f
--- /dev/null
+++ b/src/arch/arm64/include/smp/spinlock.h
@@ -0,0 +1,6 @@
+#ifndef ARCH_SMP_SPINLOCK_H
+#define ARCH_SMP_SPINLOCK_H
+
+#error "spinlocks: implement this for ARM64"
+
+#endif
diff --git a/src/arch/arm64/include/stdint.h b/src/arch/arm64/include/stdint.h
new file mode 100644
index 0000000..2907d8e
--- /dev/null
+++ b/src/arch/arm64/include/stdint.h
@@ -0,0 +1,60 @@
+#ifndef ARM64_STDINT_H
+#define ARM64_STDINT_H
+
+/* Exact integral types */
+typedef unsigned char      uint8_t;
+typedef signed char        int8_t;
+
+typedef unsigned short     uint16_t;
+typedef signed short       int16_t;
+
+typedef unsigned int       uint32_t;
+typedef signed int         int32_t;
+
+typedef unsigned long long uint64_t;
+typedef signed long long   int64_t;
+
+/* Small types */
+typedef unsigned char      uint_least8_t;
+typedef signed char        int_least8_t;
+
+typedef unsigned short     uint_least16_t;
+typedef signed short       int_least16_t;
+
+typedef unsigned int       uint_least32_t;
+typedef signed int         int_least32_t;
+
+typedef unsigned long long uint_least64_t;
+typedef signed long long   int_least64_t;
+
+/* Fast Types */
+typedef unsigned char      uint_fast8_t;
+typedef signed char        int_fast8_t;
+
+typedef unsigned int       uint_fast16_t;
+typedef signed int         int_fast16_t;
+
+typedef unsigned int       uint_fast32_t;
+typedef signed int         int_fast32_t;
+
+typedef unsigned long long uint_fast64_t;
+typedef signed long long   int_fast64_t;
+
+typedef long long int      intmax_t;
+typedef unsigned long long uintmax_t;
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
+
+
+/* Types for `void *' pointers.  */
+typedef s64             intptr_t;
+typedef u64		uintptr_t;
+
+#endif /* ARM64_STDINT_H */
diff --git a/src/arch/arm64/memcpy.S b/src/arch/arm64/memcpy.S
new file mode 100644
index 0000000..ccad4cf
--- /dev/null
+++ b/src/arch/arm64/memcpy.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <arch/asm.h>
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ *	x0 - dest
+ *	x1 - src
+ *	x2 - n
+ * Returns:
+ *	x0 - dest
+ */
+ENTRY(memcpy)
+	mov	x4, x0
+	subs	x2, x2, #8
+	b.mi	2f
+1:	ldr	x3, [x1], #8
+	subs	x2, x2, #8
+	str	x3, [x4], #8
+	b.pl	1b
+2:	adds	x2, x2, #4
+	b.mi	3f
+	ldr	w3, [x1], #4
+	sub	x2, x2, #4
+	str	w3, [x4], #4
+3:	adds	x2, x2, #2
+	b.mi	4f
+	ldrh	w3, [x1], #2
+	sub	x2, x2, #2
+	strh	w3, [x4], #2
+4:	adds	x2, x2, #1
+	b.mi	5f
+	ldrb	w3, [x1]
+	strb	w3, [x4]
+5:	ret
+ENDPROC(memcpy)
diff --git a/src/arch/arm64/memmove.S b/src/arch/arm64/memmove.S
new file mode 100644
index 0000000..1f9e263
--- /dev/null
+++ b/src/arch/arm64/memmove.S
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <arch/asm.h>
+/*
+ * Move a buffer from src to test (alignment handled by the hardware).
+ * If dest <= src, call memcpy, otherwise copy in reverse order.
+ *
+ * Parameters:
+ *	x0 - dest
+ *	x1 - src
+ *	x2 - n
+ * Returns:
+ *	x0 - dest
+ */
+ENTRY(memmove)
+	cmp	x0, x1
+	b.ls	memcpy
+	add	x4, x0, x2
+	add	x1, x1, x2
+	subs	x2, x2, #8
+	b.mi	2f
+1:	ldr	x3, [x1, #-8]!
+	subs	x2, x2, #8
+	str	x3, [x4, #-8]!
+	b.pl	1b
+2:	adds	x2, x2, #4
+	b.mi	3f
+	ldr	w3, [x1, #-4]!
+	sub	x2, x2, #4
+	str	w3, [x4, #-4]!
+3:	adds	x2, x2, #2
+	b.mi	4f
+	ldrh	w3, [x1, #-2]!
+	sub	x2, x2, #2
+	strh	w3, [x4, #-2]!
+4:	adds	x2, x2, #1
+	b.mi	5f
+	ldrb	w3, [x1, #-1]
+	strb	w3, [x4, #-1]
+5:	ret
+ENDPROC(memmove)
diff --git a/src/arch/arm64/memset.S b/src/arch/arm64/memset.S
new file mode 100644
index 0000000..0cea55d
--- /dev/null
+++ b/src/arch/arm64/memset.S
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <arch/asm.h>
+
+/*
+ * Fill in the buffer with character c (alignment handled by the hardware)
+ *
+ * Parameters:
+ *	x0 - buf
+ *	x1 - c
+ *	x2 - n
+ * Returns:
+ *	x0 - buf
+ */
+ENTRY(memset)
+	mov	x4, x0
+	and	w1, w1, #0xff
+	orr	w1, w1, w1, lsl #8
+	orr	w1, w1, w1, lsl #16
+	orr	x1, x1, x1, lsl #32
+	subs	x2, x2, #8
+	b.mi	2f
+1:	str	x1, [x4], #8
+	subs	x2, x2, #8
+	b.pl	1b
+2:	adds	x2, x2, #4
+	b.mi	3f
+	sub	x2, x2, #4
+	str	w1, [x4], #4
+3:	adds	x2, x2, #2
+	b.mi	4f
+	sub	x2, x2, #2
+	strh	w1, [x4], #2
+4:	adds	x2, x2, #1
+	b.mi	5f
+	strb	w1, [x4]
+5:	ret
+ENDPROC(memset)
diff --git a/src/arch/arm64/ramstage.ld b/src/arch/arm64/ramstage.ld
new file mode 100644
index 0000000..e878820
--- /dev/null
+++ b/src/arch/arm64/ramstage.ld
@@ -0,0 +1,136 @@
+/*
+ *	Memory map:
+ *
+ *	CONFIG_RAMBASE		: text segment
+ *				: rodata segment
+ *				: data segment
+ *				: bss segment
+ *				: stack
+ *				: heap
+ */
+/*
+ * Copyright 2013 Google Inc.
+ * Bootstrap code for the STPC Consumer
+ * Copyright (c) 1999 by Net Insight AB. All Rights Reserved.
+ */
+
+/*
+ *	Written by Johan Rydberg, based on work by Daniel Kahlin.
+ *      Rewritten by Eric Biederman
+ *  2005.12 yhlu add ramstage cross the vga font buffer handling
+ */
+
+/* We use ELF as output format. So that we can debug the code in some form. */
+INCLUDE ldoptions
+
+ENTRY(stage_entry)
+
+PHDRS
+{
+	to_load PT_LOAD;
+}
+
+SECTIONS
+{
+	. = CONFIG_SYS_SDRAM_BASE;
+	/* First we place the code and read only data (typically const declared).
+	 * This could theoretically be placed in rom.
+	 */
+	.text : {
+		_text = .;
+		_start = .;
+		*(.text.stage_entry.aarch64);
+		*(.text);
+		*(.text.*);
+		. = ALIGN(16);
+		_etext = .;
+	} : to_load
+
+	.ctors : {
+		. = ALIGN(0x100);
+		__CTOR_LIST__ = .;
+		*(.ctors);
+		LONG(0);
+		__CTOR_END__ = .;
+	}
+
+	.rodata : {
+		_rodata = .;
+		. = ALIGN(4);
+		console_drivers = .;
+		*(.rodata.console_drivers)
+		econsole_drivers = . ;
+		. = ALIGN(4);
+		pci_drivers = . ;
+		*(.rodata.pci_driver)
+		epci_drivers = . ;
+		cpu_drivers = . ;
+		*(.rodata.cpu_driver)
+		ecpu_drivers = . ;
+		_bs_init_begin = .;
+		*(.bs_init)
+		_bs_init_end = .;
+		*(.rodata)
+		*(.rodata.*)
+		/* kevinh/Ispiri - Added an align, because the objcopy tool
+		 * incorrectly converts sections that are not long word aligned.
+		 */
+		 . = ALIGN(4);
+
+		_erodata = .;
+	}
+	/* After the code we place initialized data (typically initialized
+	 * global variables). This gets copied into ram by startup code.
+	 * __data_start and __data_end shows where in ram this should be placed,
+	 * whereas __data_loadstart and __data_loadend shows where in rom to
+	 * copy from.
+	 */
+	.data : {
+		_data = .;
+		*(.data)
+		_edata = .;
+	}
+
+	/* bss does not contain data, it is just a space that should be zero
+	 * initialized on startup. (typically uninitialized global variables)
+	 * crt0.S fills between _bss and _ebss with zeroes.
+	 */
+	_bss = .;
+	.bss . : {
+		*(.bss)
+		*(.sbss)
+		*(COMMON)
+	}
+	_ebss = .;
+	_end = .;
+
+	/* coreboot really "ends" here. Only heap and stack are placed after
+	 * this line.
+	 */
+
+        _heap = .;
+        .heap . : {
+                /* Reserve CONFIG_HEAP_SIZE bytes for the heap */
+                . = CONFIG_HEAP_SIZE ;
+                . = ALIGN(4);
+        }
+        _eheap = .;
+
+	_stack = CONFIG_STACK_BOTTOM;
+	_estack = CONFIG_STACK_TOP;
+
+	/* The ram segment. This includes all memory used by the memory
+	 * resident copy of coreboot, except the tables that are produced on
+	 * the fly, but including stack and heap.
+	 */
+	_ram_seg = _text;
+	_eram_seg = _eheap;
+
+	/* Discard the sections we don't need/want */
+
+	/DISCARD/ : {
+		*(.comment)
+		*(.note)
+		*(.note.*)
+	}
+}
diff --git a/src/arch/arm64/romstage.ld b/src/arch/arm64/romstage.ld
new file mode 100644
index 0000000..d05fdd1
--- /dev/null
+++ b/src/arch/arm64/romstage.ld
@@ -0,0 +1,89 @@
+/*
+ *	Memory map:
+ *
+ *	CONFIG_ROMSTAGE_BASE	: text segment
+ *				: rodata segment
+ *				: data segment
+ *				: bss segment
+ *				: stack
+ *				: heap
+ */
+/*
+ * Bootstrap code for the STPC Consumer
+ * Copyright (c) 1999 by Net Insight AB. All Rights Reserved.
+ */
+
+/*
+ *	Written by Johan Rydberg, based on work by Daniel Kahlin.
+ *      Rewritten by Eric Biederman
+ *  2005.12 yhlu add ramstage cross the vga font buffer handling
+ */
+
+/* We use ELF as output format. So that we can debug the code in some form. */
+/*
+ INCLUDE ldoptions
+ */
+
+/* We use ELF as output format. So that we can debug the code in some form. */
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-littleaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+INCLUDE ldoptions
+
+PHDRS
+{
+	to_load PT_LOAD;
+}
+
+ENTRY(stage_entry)
+
+SECTIONS
+{
+	. = CONFIG_ROMSTAGE_BASE;
+
+	.romtext . : {
+		_start = .;
+		*(.text.stage_entry.aarch64);
+		*(.text.startup);
+		*(.text);
+		*(.text.*);
+	} : to_load
+
+	.romdata . : {
+		*(.rodata);
+		*(.rodata.*);
+		*(.machine_param);
+		*(.data);
+		*(.data.*);
+		. = ALIGN(8);
+		_erom = .;
+	}
+
+	__image_copy_end = .;
+
+	/* bss does not contain data, it is just a space that should be zero
+	 * initialized on startup. (typically uninitialized global variables)
+	 * crt0.S fills between _bss and _ebss with zeroes.
+	 */
+	.bss . : {
+		. = ALIGN(8);
+		_bss = .;
+		*(.bss)
+		*(.bss.*)
+		*(.sbss)
+		*(.sbss.*)
+		_ebss = .;
+	}
+
+	_end = .;
+
+	preram_cbmem_console = CONFIG_CBMEM_CONSOLE_PRERAM_BASE;
+
+	/* Discard the sections we don't need/want */
+	/DISCARD/ : {
+		*(.comment)
+		*(.note)
+		*(.comment.*)
+		*(.note.*)
+		*(.eh_frame);
+	}
+}
diff --git a/src/arch/arm64/stages.c b/src/arch/arm64/stages.c
new file mode 100644
index 0000000..e1ab21a
--- /dev/null
+++ b/src/arch/arm64/stages.c
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * This file contains entry/exit functions for each stage during coreboot
+ * execution (bootblock entry and ramstage exit will depend on external
+ * loading).
+ *
+ * Entry points must be placed at the location the previous stage jumps
+ * to (the lowest address in the stage image). This is done by giving
+ * stage_entry() its own section in .text and placing it first in the
+ * linker script.
+ */
+
+#include <arch/stages.h>
+#include <arch/cache.h>
+
+void stage_entry(void)
+{
+	main();
+}
+
+/* we had marked 'doit' as 'noreturn'.
+ * There is no apparent harm in leaving it as something we can return from, and in the one
+ * case where we call a payload, the payload is allowed to return.
+ * Hence, leave it as something we can return from.
+ */
+void stage_exit(void *addr)
+{
+	void (*doit)(void) = addr;
+	/*
+	 * Most stages load code so we need to sync caches here. Should maybe
+	 * go into cbfs_load_stage() instead...
+	 */
+	cache_sync_instructions();
+	doit();
+}
diff --git a/src/arch/arm64/tables.c b/src/arch/arm64/tables.c
new file mode 100644
index 0000000..49fab9f
--- /dev/null
+++ b/src/arch/arm64/tables.c
@@ -0,0 +1,83 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (C) 2003 Eric Biederman
+ * Copyright (C) 2005 Steve Magnani
+ * Copyright (C) 2008-2009 coresystems GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <console/console.h>
+#include <cpu/cpu.h>
+#include <boot/tables.h>
+#include <boot/coreboot_tables.h>
+#include <string.h>
+#include <cbmem.h>
+#include <lib.h>
+
+#define MAX_COREBOOT_TABLE_SIZE (8 * 1024)
+
+#if ! CONFIG_DYNAMIC_CBMEM
+/*
+ * TODO: "High" tables are a convention used on x86. Maybe we can
+ * clean up that naming at some point.
+ */
+uint64_t high_tables_base = 0;
+uint64_t high_tables_size;
+#endif
+
+void cbmem_arch_init(void)
+{
+}
+
+struct lb_memory *write_tables(void)
+{
+	unsigned long table_pointer, new_table_pointer;
+
+#if ! CONFIG_DYNAMIC_CBMEM
+	if (!high_tables_base) {
+		printk(BIOS_ERR, "ERROR: high_tables_base is not set.\n");
+	}
+
+	printk(BIOS_DEBUG, "high_tables_base: %llx.\n", high_tables_base);
+#endif
+
+	post_code(0x9d);
+
+	table_pointer = (unsigned long)cbmem_add(CBMEM_ID_CBTABLE,
+						MAX_COREBOOT_TABLE_SIZE);
+	if (!table_pointer) {
+		printk(BIOS_ERR, "Could not add CBMEM for coreboot table.\n");
+		return NULL;
+	}
+
+	new_table_pointer = write_coreboot_table(0UL, 0UL,
+				table_pointer, table_pointer);
+
+	if (new_table_pointer > (table_pointer + MAX_COREBOOT_TABLE_SIZE)) {
+		printk(BIOS_ERR, "coreboot table didn't fit (%lx/%x bytes)\n",
+			   new_table_pointer - table_pointer, MAX_COREBOOT_TABLE_SIZE);
+	}
+
+	printk(BIOS_DEBUG, "coreboot table: %ld bytes.\n",
+			new_table_pointer - table_pointer);
+
+	post_code(0x9e);
+
+	/* Print CBMEM sections */
+	cbmem_list();
+
+	return get_lb_mem();
+}
diff --git a/src/arch/arm64/timestamp.c b/src/arch/arm64/timestamp.c
new file mode 100644
index 0000000..e6a8159
--- /dev/null
+++ b/src/arch/arm64/timestamp.c
@@ -0,0 +1,29 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2014 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
+ */
+
+#include <timestamp.h>
+#include <timer.h>
+
+uint64_t timestamp_get(void)
+{
+	struct mono_time timestamp;
+	timer_monotonic_get(&timestamp);
+	return (uint64_t)timestamp.microseconds;
+}
+
diff --git a/toolchain.inc b/toolchain.inc
index 326e474..89173d1 100644
--- a/toolchain.inc
+++ b/toolchain.inc
@@ -56,12 +56,15 @@ COREBOOT_STANDARD_STAGES := bootblock romstage ramstage
 ARCHDIR-i386    := x86
 ARCHDIR-x86_32  := x86
 ARCHDIR-arm     := arm
+ARCHDIR-arm64   := arm64
 
 CFLAGS_arm += \
 	-mno-unaligned-access\
 	-mthumb\
 	-mthumb-interwork
 
+CFLAGS_arm64 := -ffunction-sections -fdata-sections
+
 toolchain_to_dir = \
 	$(foreach arch,$(ARCH_SUPPORTED),\
 	$(eval CPPFLAGS_$(arch) += \
diff --git a/util/cbfstool/common.c b/util/cbfstool/common.c
index 07a098e..7347fcb 100644
--- a/util/cbfstool/common.c
+++ b/util/cbfstool/common.c
@@ -130,7 +130,7 @@ static struct {
 	uint32_t arch;
 	const char *name;
 } arch_names[] = {
-	{ CBFS_ARCHITECTURE_AARCH64, "aarch64" },
+	{ CBFS_ARCHITECTURE_AARCH64, "arm64" },
 	{ CBFS_ARCHITECTURE_ARM, "arm" },
 	{ CBFS_ARCHITECTURE_X86, "x86" },
 	{ CBFS_ARCHITECTURE_UNKNOWN, "unknown" }



More information about the coreboot-gerrit mailing list