Function report |
Source Code:mm\hugetlb.c |
Create Date:2022-07-28 15:28:53 |
Last Modify:2020-03-12 14:18:49 | Copyright©Brick |
home page | Tree |
Annotation kernel can get tool activity | Download SCCT | Chinese |
Name:hugetlb_no_page
Proto:static vm_fault_t hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, unsigned long idx, unsigned long address, pte_t *ptep, unsigned int flags)
Type:vm_fault_t
Parameter:
Type | Parameter | Name |
---|---|---|
struct mm_struct * | mm | |
struct vm_area_struct * | vma | |
struct address_space * | mapping | |
unsigned long | idx | |
unsigned long | address | |
pte_t * | ptep | |
unsigned int | flags |
3793 | h = hstate_vma(vma) |
3794 | ret = VM_FAULT_SIGBUS |
3795 | anon_rmap = 0 |
3800 | haddr = address & huge_page_mask(h) |
3801 | bool new_page = false |
3808 | If is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED) Then |
3809 | pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", pid) |
3811 | Return ret |
3818 | retry : |
3820 | If Not page Then |
3828 | If userfaultfd_missing(vma) Then |
3830 | struct vm_fault vmf = {Target VMA = vma, Faulting virtual address = haddr, FAULT_FLAG_xxx flags = flags, } |
3848 | hash = For uniprocesor systems we always use a single mutex, so just* return 0 and avoid the hashing overhead. |
3850 | ret = handle_userfault( & vmf, missing pages tracking ) |
3851 | mutex_lock( & hugetlb_fault_mutex_table[hash]) |
3852 | Go to out |
3855 | page = alloc_huge_page(vma, haddr, 0) |
3869 | ptl = huge_pte_lock(h, mm, ptep) |
3870 | If Not huge_pte_none(huge_ptep_get(ptep)) Then |
3875 | spin_unlock(ptl) |
3877 | Go to out |
3879 | clear_huge_page(page, address, pages_per_huge_page(h)) |
3880 | __SetPageUptodate(page) |
3881 | new_page = true |
3883 | If Flags, see mm.h. & VM_MAYSHARE Then |
3891 | Else |
3894 | ret = VM_FAULT_OOM |
3895 | Go to backout_unlocked |
3897 | anon_rmap = 1 |
3899 | Else |
3908 | Go to backout_unlocked |
3918 | If flags & Fault was a write access && Not (Flags, see mm.h. & VM_SHARED) Then |
3919 | If vma_needs_reservation(h, vma, haddr) < 0 Then |
3920 | ret = VM_FAULT_OOM |
3921 | Go to backout_unlocked |
3924 | vma_end_reservation(h, vma, haddr) |
3927 | ptl = huge_pte_lock(h, mm, ptep) |
3932 | ret = 0 |
3933 | If Not huge_pte_none(huge_ptep_get(ptep)) Then Go to backout |
3936 | If anon_rmap Then |
3938 | hugepage_add_new_anon_rmap(page, vma, haddr) |
3939 | Else page_dup_rmap(page, true) |
3941 | new_pte = make_huge_pte(vma, page, ((Flags, see mm.h. & VM_WRITE) && (Flags, see mm.h. & VM_SHARED))) |
3943 | set_huge_pte_at(mm, haddr, ptep, new_pte) |
3945 | hugetlb_count_add(pages_per_huge_page(h), mm) |
3946 | If flags & Fault was a write access && Not (Flags, see mm.h. & VM_SHARED) Then |
3951 | spin_unlock(ptl) |
3958 | If new_page Then ver called for tail page |
3962 | out : |
3963 | Return ret |
3965 | backout : |
3966 | spin_unlock(ptl) |
3967 | backout_unlocked : |
3971 | Go to out |
Name | Describe |
---|---|
hugetlb_fault |
Source code conversion tool public plug-in interface | X |
---|---|
Support c/c++/esqlc/java Oracle/Informix/Mysql Plug-in can realize: logical Report Code generation and batch code conversion |