函数源码

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source File:mm\mlock.c Create Date:2022-07-27 16:13:10
首页 Copyright©Brick

360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
/*
 * Fill up pagevec for __munlock_pagevec using pte walk
 *
 * The function expects that the struct page corresponding to @start address is
 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
 *
 * The rest of @pvec is filled by subsequent pages within the same pmd and same
 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
 * pages also get pinned.
 *
 * Returns the address of the next page that should be scanned. This equals
 * @start + PAGE_SIZE when no page could be added by the pte walk.
 */
static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
            struct vm_area_struct *vma, struct zone *zone,
            unsigned long start, unsigned long end)
{
    pte_t *pte;
    spinlock_t *ptl;
 
    /*
     * Initialize pte walk starting at the already pinned page where we
     * are sure that there is a pte, as it was pinned under the same
     * mmap_sem write op.
     */
    pte = get_locked_pte(vma->vm_mm, start, &ptl);
    /* Make sure we do not cross the page table boundary */
    end = pgd_addr_end(start, end);
    end = p4d_addr_end(start, end);
    end = pud_addr_end(start, end);
    end = pmd_addr_end(start, end);
 
    /* The page next to the pinned page is the first we will try to get */
    start += PAGE_SIZE;
    while (start < end) {
        struct page *page = NULL;
        pte++;
        if (pte_present(*pte))
            page = vm_normal_page(vma, start, *pte);
        /*
         * Break if page could not be obtained or the page's node+zone does not
         * match
         */
        if (!page || page_zone(page) != zone)
            break;
 
        /*
         * Do not use pagevec for PTE-mapped THP,
         * munlock_vma_pages_range() will handle them.
         */
        if (PageTransCompound(page))
            break;
 
        get_page(page);
        /*
         * Increase the address that will be returned *before* the
         * eventual break due to pvec becoming full by adding the page
         */
        start += PAGE_SIZE;
        if (pagevec_add(pvec, page) == 0)
            break;
    }
    pte_unmap_unlock(pte, ptl);
    return start;
}