Function report |
Source Code:mm\huge_memory.c |
Create Date:2022-07-28 16:02:38 |
Last Modify:2020-03-12 14:18:49 | Copyright©Brick |
home page | Tree |
Annotation kernel can get tool activity | Download SCCT | Chinese |
Name:__split_huge_pmd_locked
Proto:static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze)
Type:void
Parameter:
Type | Parameter | Name |
---|---|---|
struct vm_area_struct * | vma | |
pmd_t * | pmd | |
unsigned long | haddr | |
bool | freeze |
2147 | bool young, write, soft_dirty, pmd_migration = false |
2151 | VM_BUG_ON(haddr & ~HPAGE_PMD_MASK) |
2154 | VM_BUG_ON(!is_pmd_migration_entry( * pmd) && !pmd_trans_huge( * pmd) && !pmd_devmap( * pmd)) |
2157 | Disable counters |
2159 | If Not vma_is_anonymous(vma) Then |
2160 | _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd) |
2165 | If arch_needs_pgtable_deposit() Then zap_deposited_table(mm, pmd) |
2167 | If vma_is_dax(vma) Then Return |
2169 | page = Currently stuck as a macro due to indirect forward reference to* linux/mmzone.h's __section_mem_map_addr() definition:(_pmd) |
2170 | If Not PageDirty(page) && pmd_dirty(_pmd) Then Dirty a page |
2172 | If Not PageReferenced(page) && pmd_young(_pmd) Then SetPageReferenced(page) |
2176 | add_mm_counter(mm, Optimized variant when page is already known not to be PageAnon , - HPAGE_PMD_NR) |
2177 | Return |
2178 | Else if is_huge_zero_pmd( * pmd) Then |
2188 | Return __split_huge_zero_page_pmd(vma, haddr, pmd) |
2211 | old_pmd = pmdp_invalidate(vma, haddr, pmd) |
2217 | entry = pmd_to_swp_entry(old_pmd) |
2218 | page = pfn_to_page(Extract the `offset' field from a swp_entry_t. The swp_entry_t is in* arch-independent format) |
2219 | write = is_write_migration_entry(entry) |
2220 | young = false |
2221 | soft_dirty = pmd_swp_soft_dirty(old_pmd) |
2222 | Else |
2223 | page = Currently stuck as a macro due to indirect forward reference to* linux/mmzone.h's __section_mem_map_addr() definition:(old_pmd) |
2224 | If pmd_dirty(old_pmd) Then SetPageDirty(page) |
2228 | soft_dirty = pmd_soft_dirty(old_pmd) |
2230 | VM_BUG_ON_PAGE(!page_count(page), page) |
2231 | page_ref_add(page, HPAGE_PMD_NR - 1) |
2238 | pmd_populate(mm, & _pmd, pgtable) |
2240 | When i < HPAGE_PMD_NR cycle |
2247 | If freeze || pmd_migration Then |
2249 | swp_entry = make_migration_entry(page + i, write) |
2250 | entry = Convert the arch-independent representation of a swp_entry_t into the* arch-dependent pte representation. |
2251 | If soft_dirty Then entry = pte_swp_mksoft_dirty(entry) |
2253 | Else |
2256 | If Not write Then entry = pte_wrprotect(entry) |
2260 | If soft_dirty Then entry = pte_mksoft_dirty(entry) |
2263 | pte = pte_offset_map( & _pmd, addr) |
2265 | set_pte_at(mm, addr, pte, entry) |
2274 | If compound_mapcount(page) > 1 && Not TestSetPageDoubleMap(page) Then |
2279 | If atomic_add_negative( - 1, compound_mapcount_ptr(page)) Then |
2282 | If TestClearPageDoubleMap(page) Then |
2289 | smp_wmb() |
2290 | pmd_populate(mm, pmd, pgtable) |
2292 | If freeze Then |
2293 | When i < HPAGE_PMD_NR cycle |
Name | Describe |
---|---|
__split_huge_pmd |
Source code conversion tool public plug-in interface | X |
---|---|
Support c/c++/esqlc/java Oracle/Informix/Mysql Plug-in can realize: logical Report Code generation and batch code conversion |