函数源码

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source File:arch\x86\kernel\crash_dump_32.c Create Date:2022-07-27 09:37:12
首页 Copyright©Brick

33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
/**
 * copy_oldmem_page - copy one page from "oldmem"
 * @pfn: page frame number to be copied
 * @buf: target memory address for the copy; this can be in kernel address
 *  space or user address space (see @userbuf)
 * @csize: number of bytes to copy
 * @offset: offset in bytes into the page (based on pfn) to begin the copy
 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
 *  otherwise @buf is in kernel address space, use memcpy().
 *
 * Copy a page from "oldmem". For this page, there is no pte mapped
 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
 *
 * Calling copy_to_user() in atomic context is not desirable. Hence first
 * copying the data to a pre-allocated kernel page and then copying to user
 * space in non-atomic context.
 */
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
                               size_t csize, unsigned long offset, int userbuf)
{
    void  *vaddr;
 
    if (!csize)
        return 0;
 
    if (!is_crashed_pfn_valid(pfn))
        return -EFAULT;
 
    vaddr = kmap_atomic_pfn(pfn);
 
    if (!userbuf) {
        memcpy(buf, (vaddr + offset), csize);
        kunmap_atomic(vaddr);
    } else {
        if (!kdump_buf_page) {
            printk(KERN_WARNING "Kdump: Kdump buffer page not"
                " allocated\n");
            kunmap_atomic(vaddr);
            return -EFAULT;
        }
        copy_page(kdump_buf_page, vaddr);
        kunmap_atomic(vaddr);
        if (copy_to_user(buf, (kdump_buf_page + offset), csize))
            return -EFAULT;
    }
 
    return csize;
}