函数源码

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source File:mm\zsmalloc.c Create Date:2022-07-27 18:03:19
首页 Copyright©Brick

2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
/**
 * zs_create_pool - Creates an allocation pool to work from.
 * @name: pool name to be created
 *
 * This function must be called before anything when using
 * the zsmalloc allocator.
 *
 * On success, a pointer to the newly created pool is returned,
 * otherwise NULL.
 */
struct zs_pool *zs_create_pool(const char *name)
{
    int i;
    struct zs_pool *pool;
    struct size_class *prev_class = NULL;
 
    pool = kzalloc(sizeof(*pool), GFP_KERNEL);
    if (!pool)
        return NULL;
 
    init_deferred_free(pool);
 
    pool->name = kstrdup(name, GFP_KERNEL);
    if (!pool->name)
        goto err;
 
#ifdef CONFIG_COMPACTION
    init_waitqueue_head(&pool->migration_wait);
#endif
 
    if (create_cache(pool))
        goto err;
 
    /*
     * Iterate reversely, because, size of size_class that we want to use
     * for merging should be larger or equal to current size.
     */
    for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
        int size;
        int pages_per_zspage;
        int objs_per_zspage;
        struct size_class *class;
        int fullness = 0;
 
        size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
        if (size > ZS_MAX_ALLOC_SIZE)
            size = ZS_MAX_ALLOC_SIZE;
        pages_per_zspage = get_pages_per_zspage(size);
        objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
 
        /*
         * We iterate from biggest down to smallest classes,
         * so huge_class_size holds the size of the first huge
         * class. Any object bigger than or equal to that will
         * endup in the huge class.
         */
        if (pages_per_zspage != 1 && objs_per_zspage != 1 &&
                !huge_class_size) {
            huge_class_size = size;
            /*
             * The object uses ZS_HANDLE_SIZE bytes to store the
             * handle. We need to subtract it, because zs_malloc()
             * unconditionally adds handle size before it performs
             * size class search - so object may be smaller than
             * huge class size, yet it still can end up in the huge
             * class because it grows by ZS_HANDLE_SIZE extra bytes
             * right before class lookup.
             */
            huge_class_size -= (ZS_HANDLE_SIZE - 1);
        }
 
        /*
         * size_class is used for normal zsmalloc operation such
         * as alloc/free for that size. Although it is natural that we
         * have one size_class for each size, there is a chance that we
         * can get more memory utilization if we use one size_class for
         * many different sizes whose size_class have same
         * characteristics. So, we makes size_class point to
         * previous size_class if possible.
         */
        if (prev_class) {
            if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
                pool->size_class[i] = prev_class;
                continue;
            }
        }
 
        class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
        if (!class)
            goto err;
 
        class->size = size;
        class->index = i;
        class->pages_per_zspage = pages_per_zspage;
        class->objs_per_zspage = objs_per_zspage;
        spin_lock_init(&class->lock);
        pool->size_class[i] = class;
        for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
                            fullness++)
            INIT_LIST_HEAD(&class->fullness_list[fullness]);
 
        prev_class = class;
    }
 
    /* debug only, don't abort if it fails */
    zs_pool_stat_create(pool, name);
 
    if (zs_register_migration(pool))
        goto err;
 
    /*
     * Not critical since shrinker is only used to trigger internal
     * defragmentation of the pool which is pretty optional thing.  If
     * registration fails we still can use the pool normally and user can
     * trigger compaction manually. Thus, ignore return code.
     */
    zs_register_shrinker(pool);
 
    return pool;
 
err:
    zs_destroy_pool(pool);
    return NULL;
}