Function report

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source Code:block\blk-mq.c Create Date:2022-07-28 17:10:33
Last Modify:2020-03-17 23:18:05 Copyright©Brick
home page Tree
Annotation kernel can get tool activityDownload SCCTChinese

Name:blk_mq_make_request

Proto:static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

Type:blk_qc_t

Parameter:

TypeParameterName
struct request_queue *q
struct bio *bio
1910  is_sync = Reads are always treated as synchronous, as are requests with the FUA or* PREFLUSH flag. Other operations may be marked as synchronous using the* REQ_SYNC flag.
1911  is_flush_fua = Check if the bio or request is one that needs special treatment in the* flush state machine.
1912  struct blk_mq_alloc_data data = {flags = 0}
1915  struct request * same_queue_rq = NULL
1919  blk_queue_bounce(q, & bio)
1920  __blk_queue_split - split a bio and submit the second half*@q: [in] request queue pointer*@bio: [in, out] bio to be split*@nr_segs: [out] number of segments in the first bio* Split a bio into two bios, chain the two bios, submit the second half and
1922  If Not _integrity_prep - Prepare bio for integrity I/O*@bio: bio to prepare* Description: Checks if the bio already has an integrity payload attached.* If it does, the payload has been generated by another kernel subsystem,* and we just pass it through Then Return BLK_QC_T_NONE
1925  If Not is_flush_fua && Not blk_queue_nomerges(q) && lk_attempt_plug_merge - try to merge with %current's plugged list*@q: request_queue new bio is being queued at*@bio: new bio being queued*@nr_segs: number of segments in @bio*@same_queue_rq: pointer to &struct request that gets filled in when* another Then Return BLK_QC_T_NONE
1929  If blk_mq_sched_bio_merge(q, bio, nr_segs) Then Return BLK_QC_T_NONE
1932  rq_qos_throttle(q, bio)
1934  cmd_flags = bottom bits req flags, * top bits REQ_OP. Use * accessors.
1935  rq = blk_mq_get_request(q, bio, & data)
1936  If Value for the false possibility is greater at compile time(!rq) Then
1937  rq_qos_cleanup(q, bio)
1938  If bottom bits req flags, * top bits REQ_OP. Use * accessors. & REQ_NOWAIT Then bio_wouldblock_error(bio)
1940  Return BLK_QC_T_NONE
1943  lock_getrq - get a free request entry in queue for block IO operations*@q: queue for operations*@bio: pending block IO operation (can be %NULL)*@rw: low bit indicates a read (%0) or a write (%1)* A request struct for queue @q has been allocated to handle
1945  rq_qos_track(q, rq, bio)
1947  cookie = request_to_qc_t(hctx, rq)
1949  blk_mq_bio_to_request(rq, bio, nr_segs)
1951  plug = lk_mq_plug() - Get caller context plug*@q: request queue*@bio : the bio being submitted by the caller context* Plugging, by design, may delay the insertion of BIOs into the elevator in* order to increase BIO merging opportunities
1952  If Value for the false possibility is greater at compile time(is_flush_fua) Then
1954  lk_insert_flush - insert a new PREFLUSH/FUA request*@rq: request to insert* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.* or __blk_mq_run_hw_queue() to dispatch request.*@rq is being submitted
1955  blk_mq_run_hw_queue(hctx, true)
1956  Else if plug && ( nr_hw_queues == 1 || commit_rqs || Not blk_queue_nonrot(q) ) Then
1965  request_count = rq_count
1966  struct request * last = NULL
1968  If Not request_count Then lock_plug - keep operations requests in request queue*@q: request queue to plug* Plug the request queue @q. Do not allow block operation requests* to be sent to the device driver. Instead, accumulate requests in
1970  Else last = list_entry_rq(prev)
1975  blk_flush_plug_list(plug, false)
1979  blk_add_rq_to_plug(plug, rq)
1980  Else if elevator Then
1981  blk_mq_sched_insert_request(rq, TSC's on different sockets may be reset asynchronously.* This may cause the TSC ADJUST value on socket 0 to be NOT 0., true, true)
1982  Else if plug && Not blk_queue_nomerges(q) Then
1990  If list_empty - tests whether a list is empty*@head: the list to test. Then same_queue_rq = NULL
1992  If same_queue_rq Then
1994  rq_count--
1996  blk_add_rq_to_plug(plug, rq)
1997  lock_plug - keep operations requests in request queue*@q: request queue to plug* Plug the request queue @q. Do not allow block operation requests* to be sent to the device driver. Instead, accumulate requests in
1999  If same_queue_rq Then
2000  hctx = mq_hctx
2005  Else if nr_hw_queues > 1 && is_sync || Not @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to* decide if the hw_queue is busy using Exponential Weighted Moving* Average algorithm. Then
2007  blk_mq_try_issue_directly(hctx, rq, & cookie)
2008  Else
2009  blk_mq_sched_insert_request(rq, TSC's on different sockets may be reset asynchronously.* This may cause the TSC ADJUST value on socket 0 to be NOT 0., true, true)
2012  Return cookie