函数源码 |
Source File:mm\mempool.c |
Create Date:2022-07-27 15:27:30 |
首页 | Copyright©Brick |
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 | #endif /* MM_SLAB_H */ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) static void poison_error(mempool_t *pool, void *element, size_t size, size_t byte) { const int nr = pool->curr_nr; const int start = max_t( int , byte - (BITS_PER_LONG / 8), 0); const int end = min_t( int , byte + (BITS_PER_LONG / 8), size); int i; pr_err( "BUG: mempool element poison mismatch\n" ); pr_err( "Mempool %p size %zu\n" , pool, size); pr_err( " nr=%d @ %p: %s0x" , nr, element, start > 0 ? "... " : "" ); for (i = start; i < end; i++) pr_cont( "%x " , *(u8 *)(element + i)); pr_cont( "%s\n" , end < size ? "..." : "" ); dump_stack(); } static void __check_element(mempool_t *pool, void *element, size_t size) { u8 *obj = element; size_t i; for (i = 0; i < size; i++) { u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; if (obj[i] != exp ) { poison_error(pool, element, size, i); return ; } } memset (obj, POISON_INUSE, size); } static void check_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ if (pool-> free == mempool_free_slab || pool-> free == mempool_kfree) __check_element(pool, element, ksize(element)); /* Mempools backed by page allocator */ if (pool-> free == mempool_free_pages) { int order = ( int )( long )pool->pool_data; void *addr = kmap_atomic(( struct page *)element); __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); kunmap_atomic(addr); } } static void __poison_element( void *element, size_t size) { u8 *obj = element; memset (obj, POISON_FREE, size - 1); obj[size - 1] = POISON_END; } static void poison_element(mempool_t *pool, void *element) { /* Mempools backed by slab allocator */ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) __poison_element(element, ksize(element)); /* Mempools backed by page allocator */ if (pool->alloc == mempool_alloc_pages) { int order = ( int )( long )pool->pool_data; void *addr = kmap_atomic(( struct page *)element); __poison_element(addr, 1UL << (PAGE_SHIFT + order)); kunmap_atomic(addr); } } #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ static inline void check_element(mempool_t *pool, void *element) { } |