 9ca8f72a92
			
		
	
	
	9ca8f72a92
	
	
	
		
			
			As things currently stand, traditional EFI boot loaders and the EFI boot stub are carrying essentially the same initialisation code required to setup an EFI machine for booting a kernel. There's really no need to have this code in two places and the hope is that, with this new protocol, initialisation and booting of the kernel can be left solely to the kernel's EFI boot stub. The responsibilities of the boot loader then become, o Loading the kernel image from boot media File system code still needs to be carried by boot loaders for the scenario where the kernel and initrd files reside on a file system that the EFI firmware doesn't natively understand, such as ext4, etc. o Providing a user interface Boot loaders still need to display any menus/interfaces, for example to allow the user to select from a list of kernels. Bump the boot protocol number because we added the 'handover_offset' field to indicate the location of the handover protocol entry point. Cc: H. Peter Anvin <hpa@zytor.com> Cc: Peter Jones <pjones@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Signed-off-by: Matt Fleming <matt.fleming@intel.com> Acked-and-Tested-by: Matthew Garrett <mjg@redhat.com> Link: http://lkml.kernel.org/r/1342689828-16815-1-git-send-email-matt@console-pimps.org Signed-off-by: H. Peter Anvin <hpa@zytor.com>
		
			
				
	
	
		
			235 lines
		
	
	
	
		
			4.5 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
			
		
		
	
	
			235 lines
		
	
	
	
		
			4.5 KiB
			
		
	
	
	
		
			ArmAsm
		
	
	
	
	
	
| /*
 | |
|  *  linux/boot/head.S
 | |
|  *
 | |
|  *  Copyright (C) 1991, 1992, 1993  Linus Torvalds
 | |
|  */
 | |
| 
 | |
| /*
 | |
|  *  head.S contains the 32-bit startup code.
 | |
|  *
 | |
|  * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
 | |
|  * the page directory will exist. The startup code will be overwritten by
 | |
|  * the page directory. [According to comments etc elsewhere on a compressed
 | |
|  * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
 | |
|  *
 | |
|  * Page 0 is deliberately kept safe, since System Management Mode code in
 | |
|  * laptops may need to access the BIOS data stored there.  This is also
 | |
|  * useful for future device drivers that either access the BIOS via VM86
 | |
|  * mode.
 | |
|  */
 | |
| 
 | |
| /*
 | |
|  * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
 | |
|  */
 | |
| 	.text
 | |
| 
 | |
| #include <linux/init.h>
 | |
| #include <linux/linkage.h>
 | |
| #include <asm/segment.h>
 | |
| #include <asm/page_types.h>
 | |
| #include <asm/boot.h>
 | |
| #include <asm/asm-offsets.h>
 | |
| 
 | |
| 	__HEAD
 | |
| ENTRY(startup_32)
 | |
| #ifdef CONFIG_EFI_STUB
 | |
| 	jmp	preferred_addr
 | |
| 
 | |
| 	.balign	0x10
 | |
| 	/*
 | |
| 	 * We don't need the return address, so set up the stack so
 | |
| 	 * efi_main() can find its arugments.
 | |
| 	 */
 | |
| 	add	$0x4, %esp
 | |
| 
 | |
| 	call	make_boot_params
 | |
| 	cmpl	$0, %eax
 | |
| 	je	1f
 | |
| 	movl	0x4(%esp), %esi
 | |
| 	movl	(%esp), %ecx
 | |
| 	pushl	%eax
 | |
| 	pushl	%esi
 | |
| 	pushl	%ecx
 | |
| 
 | |
| 	.org 0x30,0x90
 | |
| 	call	efi_main
 | |
| 	cmpl	$0, %eax
 | |
| 	movl	%eax, %esi
 | |
| 	jne	2f
 | |
| 1:
 | |
| 	/* EFI init failed, so hang. */
 | |
| 	hlt
 | |
| 	jmp	1b
 | |
| 2:
 | |
| 	call	3f
 | |
| 3:
 | |
| 	popl	%eax
 | |
| 	subl	$3b, %eax
 | |
| 	subl	BP_pref_address(%esi), %eax
 | |
| 	add	BP_code32_start(%esi), %eax
 | |
| 	leal	preferred_addr(%eax), %eax
 | |
| 	jmp	*%eax
 | |
| 
 | |
| preferred_addr:
 | |
| #endif
 | |
| 	cld
 | |
| 	/*
 | |
| 	 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
 | |
| 	 * us to not reload segments
 | |
| 	 */
 | |
| 	testb	$(1<<6), BP_loadflags(%esi)
 | |
| 	jnz	1f
 | |
| 
 | |
| 	cli
 | |
| 	movl	$__BOOT_DS, %eax
 | |
| 	movl	%eax, %ds
 | |
| 	movl	%eax, %es
 | |
| 	movl	%eax, %fs
 | |
| 	movl	%eax, %gs
 | |
| 	movl	%eax, %ss
 | |
| 1:
 | |
| 
 | |
| /*
 | |
|  * Calculate the delta between where we were compiled to run
 | |
|  * at and where we were actually loaded at.  This can only be done
 | |
|  * with a short local call on x86.  Nothing  else will tell us what
 | |
|  * address we are running at.  The reserved chunk of the real-mode
 | |
|  * data at 0x1e4 (defined as a scratch field) are used as the stack
 | |
|  * for this calculation. Only 4 bytes are needed.
 | |
|  */
 | |
| 	leal	(BP_scratch+4)(%esi), %esp
 | |
| 	call	1f
 | |
| 1:	popl	%ebp
 | |
| 	subl	$1b, %ebp
 | |
| 
 | |
| /*
 | |
|  * %ebp contains the address we are loaded at by the boot loader and %ebx
 | |
|  * contains the address where we should move the kernel image temporarily
 | |
|  * for safe in-place decompression.
 | |
|  */
 | |
| 
 | |
| #ifdef CONFIG_RELOCATABLE
 | |
| 	movl	%ebp, %ebx
 | |
| 	movl	BP_kernel_alignment(%esi), %eax
 | |
| 	decl	%eax
 | |
| 	addl    %eax, %ebx
 | |
| 	notl	%eax
 | |
| 	andl    %eax, %ebx
 | |
| #else
 | |
| 	movl	$LOAD_PHYSICAL_ADDR, %ebx
 | |
| #endif
 | |
| 
 | |
| 	/* Target address to relocate to for decompression */
 | |
| 	addl	$z_extract_offset, %ebx
 | |
| 
 | |
| 	/* Set up the stack */
 | |
| 	leal	boot_stack_end(%ebx), %esp
 | |
| 
 | |
| 	/* Zero EFLAGS */
 | |
| 	pushl	$0
 | |
| 	popfl
 | |
| 
 | |
| /*
 | |
|  * Copy the compressed kernel to the end of our buffer
 | |
|  * where decompression in place becomes safe.
 | |
|  */
 | |
| 	pushl	%esi
 | |
| 	leal	(_bss-4)(%ebp), %esi
 | |
| 	leal	(_bss-4)(%ebx), %edi
 | |
| 	movl	$(_bss - startup_32), %ecx
 | |
| 	shrl	$2, %ecx
 | |
| 	std
 | |
| 	rep	movsl
 | |
| 	cld
 | |
| 	popl	%esi
 | |
| 
 | |
| /*
 | |
|  * Jump to the relocated address.
 | |
|  */
 | |
| 	leal	relocated(%ebx), %eax
 | |
| 	jmp	*%eax
 | |
| ENDPROC(startup_32)
 | |
| 
 | |
| 	.text
 | |
| relocated:
 | |
| 
 | |
| /*
 | |
|  * Clear BSS (stack is currently empty)
 | |
|  */
 | |
| 	xorl	%eax, %eax
 | |
| 	leal	_bss(%ebx), %edi
 | |
| 	leal	_ebss(%ebx), %ecx
 | |
| 	subl	%edi, %ecx
 | |
| 	shrl	$2, %ecx
 | |
| 	rep	stosl
 | |
| 
 | |
| /*
 | |
|  * Adjust our own GOT
 | |
|  */
 | |
| 	leal	_got(%ebx), %edx
 | |
| 	leal	_egot(%ebx), %ecx
 | |
| 1:
 | |
| 	cmpl	%ecx, %edx
 | |
| 	jae	2f
 | |
| 	addl	%ebx, (%edx)
 | |
| 	addl	$4, %edx
 | |
| 	jmp	1b
 | |
| 2:
 | |
| 
 | |
| /*
 | |
|  * Do the decompression, and jump to the new kernel..
 | |
|  */
 | |
| 	leal	z_extract_offset_negative(%ebx), %ebp
 | |
| 				/* push arguments for decompress_kernel: */
 | |
| 	pushl	%ebp		/* output address */
 | |
| 	pushl	$z_input_len	/* input_len */
 | |
| 	leal	input_data(%ebx), %eax
 | |
| 	pushl	%eax		/* input_data */
 | |
| 	leal	boot_heap(%ebx), %eax
 | |
| 	pushl	%eax		/* heap area */
 | |
| 	pushl	%esi		/* real mode pointer */
 | |
| 	call	decompress_kernel
 | |
| 	addl	$20, %esp
 | |
| 
 | |
| #if CONFIG_RELOCATABLE
 | |
| /*
 | |
|  * Find the address of the relocations.
 | |
|  */
 | |
| 	leal	z_output_len(%ebp), %edi
 | |
| 
 | |
| /*
 | |
|  * Calculate the delta between where vmlinux was compiled to run
 | |
|  * and where it was actually loaded.
 | |
|  */
 | |
| 	movl	%ebp, %ebx
 | |
| 	subl	$LOAD_PHYSICAL_ADDR, %ebx
 | |
| 	jz	2f	/* Nothing to be done if loaded at compiled addr. */
 | |
| /*
 | |
|  * Process relocations.
 | |
|  */
 | |
| 
 | |
| 1:	subl	$4, %edi
 | |
| 	movl	(%edi), %ecx
 | |
| 	testl	%ecx, %ecx
 | |
| 	jz	2f
 | |
| 	addl	%ebx, -__PAGE_OFFSET(%ebx, %ecx)
 | |
| 	jmp	1b
 | |
| 2:
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * Jump to the decompressed kernel.
 | |
|  */
 | |
| 	xorl	%ebx, %ebx
 | |
| 	jmp	*%ebp
 | |
| 
 | |
| /*
 | |
|  * Stack and heap for uncompression
 | |
|  */
 | |
| 	.bss
 | |
| 	.balign 4
 | |
| boot_heap:
 | |
| 	.fill BOOT_HEAP_SIZE, 1, 0
 | |
| boot_stack:
 | |
| 	.fill BOOT_STACK_SIZE, 1, 0
 | |
| boot_stack_end:
 |