函数源码

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source File:mm\vmscan.c Create Date:2022-07-27 15:42:23
首页 Copyright©Brick

1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
/*
 * shrink_inactive_list() is a helper for shrink_node().  It returns the number
 * of reclaimed pages
 */
static noinline_for_stack unsigned long
shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
             struct scan_control *sc, enum lru_list lru)
{
    LIST_HEAD(page_list);
    unsigned long nr_scanned;
    unsigned long nr_reclaimed = 0;
    unsigned long nr_taken;
    struct reclaim_stat stat;
    int file = is_file_lru(lru);
    enum vm_event_item item;
    struct pglist_data *pgdat = lruvec_pgdat(lruvec);
    struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
    bool stalled = false;
 
    while (unlikely(too_many_isolated(pgdat, file, sc))) {
        if (stalled)
            return 0;
 
        /* wait a bit for the reclaimer. */
        msleep(100);
        stalled = true;
 
        /* We are about to die and free our memory. Return now. */
        if (fatal_signal_pending(current))
            return SWAP_CLUSTER_MAX;
    }
 
    lru_add_drain();
 
    spin_lock_irq(&pgdat->lru_lock);
 
    nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
                     &nr_scanned, sc, lru);
 
    __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
    reclaim_stat->recent_scanned[file] += nr_taken;
 
    item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
    if (!cgroup_reclaim(sc))
        __count_vm_events(item, nr_scanned);
    __count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
    spin_unlock_irq(&pgdat->lru_lock);
 
    if (nr_taken == 0)
        return 0;
 
    nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, 0,
                &stat, false);
 
    spin_lock_irq(&pgdat->lru_lock);
 
    item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
    if (!cgroup_reclaim(sc))
        __count_vm_events(item, nr_reclaimed);
    __count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
    reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
    reclaim_stat->recent_rotated[1] += stat.nr_activate[1];
 
    move_pages_to_lru(lruvec, &page_list);
 
    __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
 
    spin_unlock_irq(&pgdat->lru_lock);
 
    mem_cgroup_uncharge_list(&page_list);
    free_unref_page_list(&page_list);
 
    /*
     * If dirty pages are scanned that are not queued for IO, it
     * implies that flushers are not doing their job. This can
     * happen when memory pressure pushes dirty pages to the end of
     * the LRU before the dirty limits are breached and the dirty
     * data has expired. It can also happen when the proportion of
     * dirty pages grows not through writes but through memory
     * pressure reclaiming all the clean cache. And in some cases,
     * the flushers simply cannot keep up with the allocation
     * rate. Nudge the flusher threads in case they are asleep.
     */
    if (stat.nr_unqueued_dirty == nr_taken)
        wakeup_flusher_threads(WB_REASON_VMSCAN);
 
    sc->nr.dirty += stat.nr_dirty;
    sc->nr.congested += stat.nr_congested;
    sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
    sc->nr.writeback += stat.nr_writeback;
    sc->nr.immediate += stat.nr_immediate;
    sc->nr.taken += nr_taken;
    if (file)
        sc->nr.file_taken += nr_taken;
 
    trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
            nr_scanned, nr_reclaimed, &stat, sc->priority, file);
    return nr_reclaimed;
}