函数源码

Linux Kernel

v5.5.9

Brick Technologies Co., Ltd

Source File:fs\aio.c Create Date:2022-07-29 10:53:25
首页 Copyright©Brick

1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
/* aio_complete
 *  Called when the io request on the given iocb is complete.
 */
static void aio_complete(struct aio_kiocb *iocb)
{
    struct kioctx   *ctx = iocb->ki_ctx;
    struct aio_ring *ring;
    struct io_event *ev_page, *event;
    unsigned tail, pos, head;
    unsigned long   flags;
 
    /*
     * Add a completion event to the ring buffer. Must be done holding
     * ctx->completion_lock to prevent other code from messing with the tail
     * pointer since we might be called from irq context.
     */
    spin_lock_irqsave(&ctx->completion_lock, flags);
 
    tail = ctx->tail;
    pos = tail + AIO_EVENTS_OFFSET;
 
    if (++tail >= ctx->nr_events)
        tail = 0;
 
    ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
    event = ev_page + pos % AIO_EVENTS_PER_PAGE;
 
    *event = iocb->ki_res;
 
    kunmap_atomic(ev_page);
    flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
 
    pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
         (void __user *)(unsigned long)iocb->ki_res.obj,
         iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
 
    /* after flagging the request as done, we
     * must never even look at it again
     */
    smp_wmb();  /* make event visible before updating tail */
 
    ctx->tail = tail;
 
    ring = kmap_atomic(ctx->ring_pages[0]);
    head = ring->head;
    ring->tail = tail;
    kunmap_atomic(ring);
    flush_dcache_page(ctx->ring_pages[0]);
 
    ctx->completed_events++;
    if (ctx->completed_events > 1)
        refill_reqs_available(ctx, head, tail);
    spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
    pr_debug("added to ring %p at [%u]\n", iocb, tail);
 
    /*
     * Check if the user asked us to deliver the result through an
     * eventfd. The eventfd_signal() function is safe to be called
     * from IRQ context.
     */
    if (iocb->ki_eventfd)
        eventfd_signal(iocb->ki_eventfd, 1);
 
    /*
     * We have to order our ring_info tail store above and test
     * of the wait list below outside the wait lock.  This is
     * like in wake_up_bit() where clearing a bit has to be
     * ordered with the unlocked test.
     */
    smp_mb();
 
    if (waitqueue_active(&ctx->wait))
        wake_up(&ctx->wait);
}