Function report |
Source Code:mm\rmap.c |
Create Date:2022-07-28 14:57:00 |
| Last Modify:2020-03-12 14:18:49 | Copyright©Brick |
| home page | Tree |
| Annotation kernel can get tool activity | Download SCCT | Chinese |
Name:@arg: enum ttu_flags will be passed to this argument
Proto:static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg)
Type:bool
Parameter:
| Type | Parameter | Name |
|---|---|---|
| struct page * | page | |
| struct vm_area_struct * | vma | |
| unsigned long | address | |
| void * | arg |
| 1377 | bool ret = true |
| 1382 | If flags & munlock mode && Not (Flags, see mm.h. & VM_LOCKED) Then Return true |
| 1385 | If IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',* 0 otherwise.(CONFIG_MIGRATION) && flags & migration mode && is_zone_device_page(page) && Not is_device_private_page(page) Then Return true |
| 1389 | If flags & split huge PMD if any Then |
| 1410 | adjust_range_if_pmd_sharing_possible(vma, & start, & end) |
| 1413 | mmu_notifier_invalidate_range_start( & range) |
| 1431 | If Not (flags & ignore mlock ) Then |
| 1432 | If Flags, see mm.h. & VM_LOCKED Then |
| 1445 | If flags & munlock mode Then Continue |
| 1450 | VM_BUG_ON_PAGE(!pte, page) |
| 1456 | If huge_pmd_unshare(mm, & address, pte) Then |
| 1464 | flush_cache_range(vma, start, end) |
| 1465 | flush_tlb_range(vma, start, end) |
| 1466 | mmu_notifier_invalidate_range(mm, start, end) |
| 1478 | page_vma_mapped_walk_done( & pvmw) |
| 1479 | Break |
| 1489 | pteval = ptep_get_and_clear(mm, address, pte) |
| 1496 | entry = make_migration_entry(page, 0) |
| 1497 | swp_pte = Convert the arch-independent representation of a swp_entry_t into the* arch-dependent pte representation. |
| 1498 | If pte_soft_dirty(pteval) Then swp_pte = pte_swp_mksoft_dirty(swp_pte) |
| 1500 | set_pte_at(mm, address, pte, swp_pte) |
| 1513 | Go to discard |
| 1517 | If ptep_clear_flush_young_notify(vma, address, pte) Then |
| 1526 | flush_cache_page(vma, address, pte_pfn( * pte)) |
| 1527 | If should_defer_flush(mm, flags) Then |
| 1536 | pteval = ptep_get_and_clear(mm, address, pte) |
| 1539 | Else |
| 1540 | pteval = ptep_clear_flush(vma, address, pte) |
| 1544 | If The following only work if pte_present() is true.* Undefined behaviour if not.. Then Dirty a page |
| 1548 | update_hiwater_rss(mm) |
| 1550 | If PageHWPoison(page) && Not (flags & corrupted page is recoverable ) Then |
| 1551 | pteval = Convert the arch-independent representation of a swp_entry_t into the* arch-dependent pte representation. |
| 1554 | set_huge_swap_pte_at(mm, address, pte, pteval, vma_mmu_pagesize(vma)) |
| 1557 | Else |
| 1558 | dec_mm_counter(mm, mm_counter(page)) |
| 1559 | set_pte_at(mm, address, pte, pteval) |
| 1573 | dec_mm_counter(mm, mm_counter(page)) |
| 1575 | mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE) |
| 1582 | If Some architectures support metadata associated with a page < 0 Then |
| 1583 | set_pte_at(mm, address, pte, pteval) |
| 1584 | ret = false |
| 1585 | page_vma_mapped_walk_done( & pvmw) |
| 1586 | Break |
| 1594 | entry = make_migration_entry(subpage, pte_write(pteval)) |
| 1596 | swp_pte = Convert the arch-independent representation of a swp_entry_t into the* arch-dependent pte representation. |
| 1597 | If pte_soft_dirty(pteval) Then swp_pte = pte_swp_mksoft_dirty(swp_pte) |
| 1599 | set_pte_at(mm, address, pte, swp_pte) |
| 1605 | swp_entry_t entry = {val = page_private(subpage)} |
| 1612 | WARN_ON_ONCE(1) |
| 1613 | ret = false |
| 1615 | mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE) |
| 1617 | page_vma_mapped_walk_done( & pvmw) |
| 1618 | Break |
| 1622 | If Not PageSwapBacked(page) Then |
| 1625 | mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE) |
| 1627 | dec_mm_counter(mm, MM_ANONPAGES) |
| 1628 | Go to discard |
| 1635 | set_pte_at(mm, address, pte, pteval) |
| 1636 | SetPageSwapBacked(page) |
| 1637 | ret = false |
| 1638 | page_vma_mapped_walk_done( & pvmw) |
| 1639 | Break |
| 1642 | If swap_duplicate(entry) < 0 Then |
| 1643 | set_pte_at(mm, address, pte, pteval) |
| 1644 | ret = false |
| 1645 | page_vma_mapped_walk_done( & pvmw) |
| 1646 | Break |
| 1648 | If Some architectures support metadata associated with a page < 0 Then |
| 1649 | set_pte_at(mm, address, pte, pteval) |
| 1650 | ret = false |
| 1651 | page_vma_mapped_walk_done( & pvmw) |
| 1652 | Break |
| 1655 | spin_lock( & mmlist_lock) |
| 1658 | spin_unlock( & mmlist_lock) |
| 1660 | dec_mm_counter(mm, MM_ANONPAGES) |
| 1661 | inc_mm_counter(mm, MM_SWAPENTS) |
| 1662 | swp_pte = Convert the arch-independent representation of a swp_entry_t into the* arch-dependent pte representation. |
| 1663 | If pte_soft_dirty(pteval) Then swp_pte = pte_swp_mksoft_dirty(swp_pte) |
| 1665 | set_pte_at(mm, address, pte, swp_pte) |
| 1667 | mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE) |
| 1669 | Else |
| 1682 | discard : |
| 1694 | mmu_notifier_invalidate_range_end( & range) |
| 1696 | Return ret |
| Source code conversion tool public plug-in interface | X |
|---|---|
| Support c/c++/esqlc/java Oracle/Informix/Mysql Plug-in can realize: logical Report Code generation and batch code conversion |