IT博客汇
  • 首页
  • 精华
  • 技术
  • 设计
  • 资讯
  • 扯淡
  • 权利声明
  • 登录 注册

    Linux内存初始化(C语言部分)

    Liu Yutao发表于 2016-03-15 11:04:00
    love 0

    这篇博客接着上篇博客,继续介绍Linux内核启动过程中内存的初始化过程。

    相比于汇编代码,分析C代码有一个优势,因为在之前的汇编代码中已经开启了分页模式,所以可以通过一些symbol直接在某些函数上设置断点,然后通过gdb进行调试。如何用gdb调试内核可以参考这篇博客。

    进入x86_64_start_kernel

    之前我们讲到,在secondary_startup_64最后,我们通过far jump进入了C语言实现的函数x86_64_start_kernel,那么这篇我们就从这个函数开始讲起。

    这个函数在arch/x86/kernel/head64.c文件中,该函数有一个参数,是char * real_mode_data,这个参数是在之前通过movq %rsi, %rdi传进来的。

    在该函数的开头,先做了一些sanity检查:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    
      /*
       * Build-time sanity checks on the kernel image and module
       * area mappings. (these are purely build-time and produce no code)
       */
      BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
      BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
      BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
      BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
      BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
      BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
      BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
            (__START_KERNEL & PGDIR_MASK)));
      BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
    

    主要是针对Module地址的检查(module被映射在0xffffffffa0000000上面)。

    之后做了一个操作,将之前建立的identity-map给清除掉了,因为我们现在已经用高地址进行页表翻译了,所以那个identity-map也就没有用了:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    
    /* Wipe all early page tables except for the kernel symbol map */
    static void __init reset_early_page_tables(void)
    {
      unsigned long i;
    
      for (i = 0; i < PTRS_PER_PGD-1; i++)
        early_level4_pgt[i].pgd = 0;
    
      next_early_pgt = 0;
    
      write_cr3(__pa(early_level4_pgt));
    }
    
      /* Kill off the identity-map trampoline */
      reset_early_page_tables();
    

    注意这里有一个__pa(early_level4_pgt),我们来看一下__pa的定义:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    
    static inline unsigned long __phys_addr_nodebug(unsigned long x)
    {
      unsigned long y = x - __START_KERNEL_map;
    
      /* use the carry flag to determine if x was < __START_KERNEL_map */
      x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
    
      return x;
    }
    
    #define __phys_addr(x)  __phys_addr_nodebug(x)
    #define __pa(x)   __phys_addr((unsigned long)(x))
    

    之后调用clear_bss,即将bss中的内容清零:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    
    /* Don't add a printk in there. printk relies on the PDA which is not initialized 
       yet. */
    static void __init clear_bss(void)
    {
      memset(__bss_start, 0,
             (unsigned long) __bss_stop - (unsigned long) __bss_start);
    }
    
      /* clear bss before set_intr_gate with early_idt_handler */
      clear_bss();
    

    然后创建idtr gate:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    
    static inline void native_load_idt(const struct desc_ptr *dtr)
    {
      asm volatile("lidt %0"::"m" (*dtr));
    }
    
    #define load_idt(dtr) native_load_idt(dtr)
    
      for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
        set_intr_gate(i, early_idt_handlers[i]);
      load_idt((const struct desc_ptr *)&idt_descr);
    

    然后调用copy_bootdata:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    
    static void __init copy_bootdata(char *real_mode_data)
    {
      char * command_line;
      unsigned long cmd_line_ptr;
    
      memcpy(&boot_params, real_mode_data, sizeof boot_params);
      sanitize_boot_params(&boot_params);
      cmd_line_ptr = get_cmd_line_ptr();
      if (cmd_line_ptr) {
        command_line = __va(cmd_line_ptr);
        memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
      }
    }
    
      copy_bootdata(__va(real_mode_data));
    

    这里面主要是copy一些boot的参数,之后调用load_ucode_bsp和early_printk,这里都不详述。

    然后设置init_level4_pgt:

    1
    2
    3
    
      clear_page(init_level4_pgt);
      /* set init_level4_pgt kernel high mapping*/
      init_level4_pgt[511] = early_level4_pgt[511];
    

    后来还有一些函数调用和boot相关的,这里也不细说,最后调用start_kernel:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    
    asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
    {
      ...
    
      x86_64_start_reservations(real_mode_data);
    }
    
    void __init x86_64_start_reservations(char *real_mode_data)
    {
      ...
    
      start_kernel();
    }
    

    start_kernel

    下面进入start_kernel函数,该函数定义在init/main.c文件中。

    里面调用了很多函数来做各种目的的初始化,其中和内存初始化相关的函数调用如下:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    
    asmlinkage void __init start_kernel(void)
    {
      ...
      setup_arch(&command_line);
      ...
      setup_per_cpu_areas();
      ...
      build_all_zonelist(NULL, NULL);
      page_alloc_init();
      ...
      mm_init();
      ...
      setup_per_cpu_pageset();
    }
    

    如下图所示(截图自这里):

    start_kernel

    下面我们逐个函数进行介绍。

    setup_arch

    x86的setup_arch定义在arch/x86/kernel/setup.c文件中,其中和内存初始化相关的函数如下所示:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    
    void __init setup_arch(char **cmdline_p)
    {
      setup_memory_map();
      e820_reserve_setup_data();
    
      init_mm.start_code = (unsigned long) _text;
      init_mm.end_code = (unsigned long) _etext;
      init_mm.end_data = (unsigned long) _edata;
      init_mm.brk = _brk_end;
    
      e820_add_kernel_range();
      ...
      cleanup_highmap();
      ...
      init_mem_mapping();
      early_trap_pf_init();
      ...
      x86_init.paging.pagetable_init(); // native_pagetable_init() -> paging_init (arch/x86/mm/init_64.c)
      ...
    }
    

    其中,前面一直是在通过BIOS获得E820内存分布(e820请查阅这篇博客),以及初始化init_mm。我们从cleanup_highmap开始分析,该函数在arch/x86/mm/init_64.c中:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    
    /*
     * The head.S code sets up the kernel high mapping:
     *
     *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
     *
     * phys_base holds the negative offset to the kernel, which is added
     * to the compile time generated pmds. This results in invalid pmds up
     * to the point where we hit the physaddr 0 mapping.
     *
     * We limit the mappings to the region from _text to _brk_end.  _brk_end
     * is rounded up to the 2MB boundary. This catches the invalid pmds as
     * well, as they are located before _text:
     */
    void __init cleanup_highmap(void)
    {
      unsigned long vaddr = __START_KERNEL_map;
      unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
      unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
      pmd_t *pmd = level2_kernel_pgt;
    
      /*
       * Native path, max_pfn_mapped is not set yet.
       * Xen has valid max_pfn_mapped set in
       *  arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
       */
      if (max_pfn_mapped)
        vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
    
      for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
        if (pmd_none(*pmd))
          continue;
        if (vaddr < (unsigned long) _text || vaddr > end)
          set_pmd(pmd, __pmd(0));
      }
    }
    

    这段代码非常好理解,加上看注释,可以知道其功能就是将小于_text和大于_brk_end的地址都从页表中unmap掉。

    接下来是init_mem_mapping这个函数,该函数位于arch/x86/mm/init.c:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    
    void __init init_mem_mapping(void)
    {
      ...
      end = max_pfn << PAGE_SHIFT;
    
      /* the ISA range is always mapped regardless of memory holes */
      init_memory_mapping(0, ISA_END_ADDRESS);
    
      memory_map_top_down(ISA_END_ADDRESS, end);
    
      if (max_pfn > max_low_pfn) {
        /* can we preseve max_low_pfn ?*/
        max_low_pfn = max_pfn;
      }
    
      load_cr3(swapper_pg_dir);
      __flush_tlb_all();
    
      early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
    }
    

    这里面虽然代码少,但是信息量还是蛮大的,我们一个一个来看。

    首先是init_memory_mapping:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    
    /*
     * Setup the direct mapping of the physical memory at PAGE_OFFSET.
     * This runs before bootmem is initialized and gets pages directly from
     * the physical memory. To access them they are temporarily mapped.
     */
    unsigned long __init_refok init_memory_mapping(unsigned long start,
                     unsigned long end)
    {
      struct map_range mr[NR_RANGE_MR];
      unsigned long ret = 0;
      int nr_range, i;
    
      pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
             start, end - 1);
    
      memset(mr, 0, sizeof(mr));
      nr_range = split_mem_range(mr, 0, start, end);
    
      for (i = 0; i < nr_range; i++)
        ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
                   mr[i].page_size_mask);
    
      add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
    
      return ret >> PAGE_SHIFT;
    }
    

    这里注释中提到的PAGE_OFFSET值为0xffff880000000000(0xffff880000000000到0xffffc7ffffffffff为所有物理地址的direct mapping)。

    这里有两个主要的函数,我们先来看split_mem_range(位于arch/x86/mm/init.c):

    1
    2
    3
    4
    5
    6
    
    static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                 unsigned long start,
                 unsigned long end)
    {
      ...
    }
    

    里面代码比较复杂,和之前在分析xen代码中某个函数有点像,这里就不逐段分析。简单说一下它做了什么吧。split_mem_range的作用就是将整个物理地址段进行了一个分类,把所有地址分为三类:

    • 大于1G的地址段
    • 2M到1G的地址段
    • 其它

    然后将start到end的物理地址段分别塞进这些段中,然后将每个段的信息保存在mr这个数据结构中。这个数据结构包括了每个地址段的起始地址、结束地址、以及alignment。最后有一个merge过程,将mr中相邻且alignment相同的项进行合并。

    最后分出来的地址段的结果如下图所示:

    split_mem_range

    另外一个函数为kernel_physical_mapping_init(位于arch/x86/mm/init_64.c):

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    
    unsigned long __meminit
    kernel_physical_mapping_init(unsigned long start,
               unsigned long end,
               unsigned long page_size_mask)
    {
      bool pgd_changed = false;
      unsigned long next, last_map_addr = end;
      unsigned long addr;
    
      start = (unsigned long)__va(start);
      end = (unsigned long)__va(end);
      addr = start;
    
      for (; start < end; start = next) {
        pgd_t *pgd = pgd_offset_k(start);
        pud_t *pud;
    
        next = (start & PGDIR_MASK) + PGDIR_SIZE;
    
        if (pgd_val(*pgd)) {
          pud = (pud_t *)pgd_page_vaddr(*pgd);
          last_map_addr = phys_pud_init(pud, __pa(start),
                 __pa(end), page_size_mask);
          continue;
        }
    
        pud = alloc_low_page();
        last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
                 page_size_mask);
    
        spin_lock(&init_mm.page_table_lock);
        pgd_populate(&init_mm, pgd, pud);
        spin_unlock(&init_mm.page_table_lock);
        pgd_changed = true;
      }
    
      if (pgd_changed)
        sync_global_pgds(addr, end - 1);
    
      __flush_tlb_all();
    
      return last_map_addr;
    }
    

    这是一个非常关键的函数,它的作用就是填充页表,将所有之前探寻到并且分割好的物理地址映射到对应的虚拟内存中,并在页表中体现出来。我们来逐段分析:

    首先通过__va这个宏将物理地址转换成其对应的(direct mapping)虚拟地址,即加上0xffff880000000000。

    1
    2
    
      start = (unsigned long)__va(start);
      end = (unsigned long)__va(end);
    

    然后就是传统的走页表过程了,这里有个宏需要说明:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    
    #define swapper_pg_dir init_level4_pgt;
    
    struct mm_struct init_mm = {
      .pgd = swapper_pg_dir,
      ...
    }
    
    #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
    
    #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
    
    pgd_t *pgd = pgd_offset_k(start);
    

    也就是说,在这个时候,pgt_dir从原来的early_level4_pgt变成了init_level4_pgt,这个数据结构同样是在arch/x86/kernel/head_64.S中定义的:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    
    NEXT_PAGE(init_level4_pgt)
      .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
      .org    init_level4_pgt + L4_PAGE_OFFSET*8, 0
      .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
      .org    init_level4_pgt + L4_START_KERNEL*8, 0
      /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
      .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
    
    NEXT_PAGE(level3_ident_pgt)
      .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
      .fill 511, 8, 0
    NEXT_PAGE(level2_ident_pgt)
      /* Since I easily can, map the first 1G.
       * Don't set NX because code runs from these pages.
       */
      PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
    

    因此,加上init_level4_pgt这个页表后,内存的分布图如下所示:

    init level4 pgt

    所以kernel_physical_mapping_init后面的代码就是根据不同mr数据结构中的地址段将这个页表进行填充,这里就不详述了。

    在执行完init_memory_mapping之后,init_mem_mapping函数又执行了一个memory_map_top_down函数,里面其实也是根据不同的地址段,连续调用init_range_memory_mapping,从而间接调用init_memory_mapping函数。

    最后,将swapper_pg_dir加载进cr3,完成页表的转换。

    现在让我们回到setup_arch,调用paging_init(位于arch/x86/mm/init_64.c)。里面主要完成一些zones的初始化,不详述。

    再次回到start_kernel,在setup_arch之后还陆续调用了几个和percpu以及memory zones,memory allocator相关的函数,这里也不详细说了。

    这个系列就先简单介绍到这里,其实后面还有很多内容没有详细介绍,主要目的是搞清楚内核是如何创建页表的。



沪ICP备19023445号-2号
友情链接