Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:kernel\bpf\bpf_lru_list.c Create Date:2022-07-28 13:12:44
Last Modify:2020-03-12 14:18:49 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:bpf_common_lru_pop_free

Proto:static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru, u32 hash)

Type:struct bpf_lru_node

Parameter:

TypeParameterName
struct bpf_lru *lru
u32hash
435  clru = common_lru
439  cpu = These macros fold the SMP functionality into a single CPU system()
441  loc_l = per_cpu_ptr(local_list, cpu)
443  raw_spin_lock_irqsave( & lock, flags)
445  node = __local_list_pop_free(loc_l)
446  If Not node Then
447  bpf_lru_list_pop_free_to_local(lru, loc_l)
448  node = __local_list_pop_free(loc_l)
451  If node Then __local_list_add_pending(lru, loc_l, cpu, node, hash)
454  raw_spin_unlock_irqrestore( & lock, flags)
456  If node Then Return node
467  first_steal = next_steal
468  steal = first_steal
469  Do
470  steal_loc_l = per_cpu_ptr(local_list, steal)
472  raw_spin_lock_irqsave( & lock, flags)
474  node = __local_list_pop_free(steal_loc_l)
475  If Not node Then node = __local_list_pop_pending(lru, steal_loc_l)
478  raw_spin_unlock_irqrestore( & lock, flags)
480  steal = get_next_cpu(steal)
481  When Not node && steal != first_steal cycle
483  next_steal = steal
485  If node Then
486  raw_spin_lock_irqsave( & lock, flags)
487  __local_list_add_pending(lru, loc_l, cpu, node, hash)
488  raw_spin_unlock_irqrestore( & lock, flags)
491  Return node
Caller
NameDescribe
bpf_lru_pop_free