Statistics
| Branch: | Revision:

root / include / block / aio.h @ 8c116b0e

History | View | Annotate | Download (9 kB)

1
/*
2
 * QEMU aio implementation
3
 *
4
 * Copyright IBM, Corp. 2008
5
 *
6
 * Authors:
7
 *  Anthony Liguori   <aliguori@us.ibm.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2.  See
10
 * the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#ifndef QEMU_AIO_H
15
#define QEMU_AIO_H
16

    
17
#include "qemu/typedefs.h"
18
#include "qemu-common.h"
19
#include "qemu/queue.h"
20
#include "qemu/event_notifier.h"
21
#include "qemu/thread.h"
22
#include "qemu/timer.h"
23

    
24
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
25
typedef void BlockDriverCompletionFunc(void *opaque, int ret);
26

    
27
typedef struct AIOCBInfo {
28
    void (*cancel)(BlockDriverAIOCB *acb);
29
    size_t aiocb_size;
30
} AIOCBInfo;
31

    
32
struct BlockDriverAIOCB {
33
    const AIOCBInfo *aiocb_info;
34
    BlockDriverState *bs;
35
    BlockDriverCompletionFunc *cb;
36
    void *opaque;
37
};
38

    
39
void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
40
                   BlockDriverCompletionFunc *cb, void *opaque);
41
void qemu_aio_release(void *p);
42

    
43
typedef struct AioHandler AioHandler;
44
typedef void QEMUBHFunc(void *opaque);
45
typedef void IOHandler(void *opaque);
46

    
47
struct AioContext {
48
    GSource source;
49

    
50
    /* The list of registered AIO handlers */
51
    QLIST_HEAD(, AioHandler) aio_handlers;
52

    
53
    /* This is a simple lock used to protect the aio_handlers list.
54
     * Specifically, it's used to ensure that no callbacks are removed while
55
     * we're walking and dispatching callbacks.
56
     */
57
    int walking_handlers;
58

    
59
    /* lock to protect between bh's adders and deleter */
60
    QemuMutex bh_lock;
61
    /* Anchor of the list of Bottom Halves belonging to the context */
62
    struct QEMUBH *first_bh;
63

    
64
    /* A simple lock used to protect the first_bh list, and ensure that
65
     * no callbacks are removed while we're walking and dispatching callbacks.
66
     */
67
    int walking_bh;
68

    
69
    /* Used for aio_notify.  */
70
    EventNotifier notifier;
71

    
72
    /* GPollFDs for aio_poll() */
73
    GArray *pollfds;
74

    
75
    /* Thread pool for performing work and receiving completion callbacks */
76
    struct ThreadPool *thread_pool;
77

    
78
    /* TimerLists for calling timers - one per clock type */
79
    QEMUTimerListGroup tlg;
80
};
81

    
82
/**
83
 * aio_context_new: Allocate a new AioContext.
84
 *
85
 * AioContext provide a mini event-loop that can be waited on synchronously.
86
 * They also provide bottom halves, a service to execute a piece of code
87
 * as soon as possible.
88
 */
89
AioContext *aio_context_new(void);
90

    
91
/**
92
 * aio_context_ref:
93
 * @ctx: The AioContext to operate on.
94
 *
95
 * Add a reference to an AioContext.
96
 */
97
void aio_context_ref(AioContext *ctx);
98

    
99
/**
100
 * aio_context_unref:
101
 * @ctx: The AioContext to operate on.
102
 *
103
 * Drop a reference to an AioContext.
104
 */
105
void aio_context_unref(AioContext *ctx);
106

    
107
/**
108
 * aio_bh_new: Allocate a new bottom half structure.
109
 *
110
 * Bottom halves are lightweight callbacks whose invocation is guaranteed
111
 * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
112
 * is opaque and must be allocated prior to its use.
113
 */
114
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
115

    
116
/**
117
 * aio_notify: Force processing of pending events.
118
 *
119
 * Similar to signaling a condition variable, aio_notify forces
120
 * aio_wait to exit, so that the next call will re-examine pending events.
121
 * The caller of aio_notify will usually call aio_wait again very soon,
122
 * or go through another iteration of the GLib main loop.  Hence, aio_notify
123
 * also has the side effect of recalculating the sets of file descriptors
124
 * that the main loop waits for.
125
 *
126
 * Calling aio_notify is rarely necessary, because for example scheduling
127
 * a bottom half calls it already.
128
 */
129
void aio_notify(AioContext *ctx);
130

    
131
/**
132
 * aio_bh_poll: Poll bottom halves for an AioContext.
133
 *
134
 * These are internal functions used by the QEMU main loop.
135
 * And notice that multiple occurrences of aio_bh_poll cannot
136
 * be called concurrently
137
 */
138
int aio_bh_poll(AioContext *ctx);
139

    
140
/**
141
 * qemu_bh_schedule: Schedule a bottom half.
142
 *
143
 * Scheduling a bottom half interrupts the main loop and causes the
144
 * execution of the callback that was passed to qemu_bh_new.
145
 *
146
 * Bottom halves that are scheduled from a bottom half handler are instantly
147
 * invoked.  This can create an infinite loop if a bottom half handler
148
 * schedules itself.
149
 *
150
 * @bh: The bottom half to be scheduled.
151
 */
152
void qemu_bh_schedule(QEMUBH *bh);
153

    
154
/**
155
 * qemu_bh_cancel: Cancel execution of a bottom half.
156
 *
157
 * Canceling execution of a bottom half undoes the effect of calls to
158
 * qemu_bh_schedule without freeing its resources yet.  While cancellation
159
 * itself is also wait-free and thread-safe, it can of course race with the
160
 * loop that executes bottom halves unless you are holding the iothread
161
 * mutex.  This makes it mostly useless if you are not holding the mutex.
162
 *
163
 * @bh: The bottom half to be canceled.
164
 */
165
void qemu_bh_cancel(QEMUBH *bh);
166

    
167
/**
168
 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
169
 *
170
 * Deleting a bottom half frees the memory that was allocated for it by
171
 * qemu_bh_new.  It also implies canceling the bottom half if it was
172
 * scheduled.
173
 * This func is async. The bottom half will do the delete action at the finial
174
 * end.
175
 *
176
 * @bh: The bottom half to be deleted.
177
 */
178
void qemu_bh_delete(QEMUBH *bh);
179

    
180
/* Return whether there are any pending callbacks from the GSource
181
 * attached to the AioContext.
182
 *
183
 * This is used internally in the implementation of the GSource.
184
 */
185
bool aio_pending(AioContext *ctx);
186

    
187
/* Progress in completing AIO work to occur.  This can issue new pending
188
 * aio as a result of executing I/O completion or bh callbacks.
189
 *
190
 * If there is no pending AIO operation or completion (bottom half),
191
 * return false.  If there are pending AIO operations of bottom halves,
192
 * return true.
193
 *
194
 * If there are no pending bottom halves, but there are pending AIO
195
 * operations, it may not be possible to make any progress without
196
 * blocking.  If @blocking is true, this function will wait until one
197
 * or more AIO events have completed, to ensure something has moved
198
 * before returning.
199
 */
200
bool aio_poll(AioContext *ctx, bool blocking);
201

    
202
#ifdef CONFIG_POSIX
203
/* Register a file descriptor and associated callbacks.  Behaves very similarly
204
 * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
205
 * be invoked when using qemu_aio_wait().
206
 *
207
 * Code that invokes AIO completion functions should rely on this function
208
 * instead of qemu_set_fd_handler[2].
209
 */
210
void aio_set_fd_handler(AioContext *ctx,
211
                        int fd,
212
                        IOHandler *io_read,
213
                        IOHandler *io_write,
214
                        void *opaque);
215
#endif
216

    
217
/* Register an event notifier and associated callbacks.  Behaves very similarly
218
 * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
219
 * will be invoked when using qemu_aio_wait().
220
 *
221
 * Code that invokes AIO completion functions should rely on this function
222
 * instead of event_notifier_set_handler.
223
 */
224
void aio_set_event_notifier(AioContext *ctx,
225
                            EventNotifier *notifier,
226
                            EventNotifierHandler *io_read);
227

    
228
/* Return a GSource that lets the main loop poll the file descriptors attached
229
 * to this AioContext.
230
 */
231
GSource *aio_get_g_source(AioContext *ctx);
232

    
233
/* Return the ThreadPool bound to this AioContext */
234
struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
235

    
236
/* Functions to operate on the main QEMU AioContext.  */
237

    
238
bool qemu_aio_wait(void);
239
void qemu_aio_set_event_notifier(EventNotifier *notifier,
240
                                 EventNotifierHandler *io_read);
241

    
242
#ifdef CONFIG_POSIX
243
void qemu_aio_set_fd_handler(int fd,
244
                             IOHandler *io_read,
245
                             IOHandler *io_write,
246
                             void *opaque);
247
#endif
248

    
249
/**
250
 * aio_timer_new:
251
 * @ctx: the aio context
252
 * @type: the clock type
253
 * @scale: the scale
254
 * @cb: the callback to call on timer expiry
255
 * @opaque: the opaque pointer to pass to the callback
256
 *
257
 * Allocate a new timer attached to the context @ctx.
258
 * The function is responsible for memory allocation.
259
 *
260
 * The preferred interface is aio_timer_init. Use that
261
 * unless you really need dynamic memory allocation.
262
 *
263
 * Returns: a pointer to the new timer
264
 */
265
static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
266
                                       int scale,
267
                                       QEMUTimerCB *cb, void *opaque)
268
{
269
    return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
270
}
271

    
272
/**
273
 * aio_timer_init:
274
 * @ctx: the aio context
275
 * @ts: the timer
276
 * @type: the clock type
277
 * @scale: the scale
278
 * @cb: the callback to call on timer expiry
279
 * @opaque: the opaque pointer to pass to the callback
280
 *
281
 * Initialise a new timer attached to the context @ctx.
282
 * The caller is responsible for memory allocation.
283
 */
284
static inline void aio_timer_init(AioContext *ctx,
285
                                  QEMUTimer *ts, QEMUClockType type,
286
                                  int scale,
287
                                  QEMUTimerCB *cb, void *opaque)
288
{
289
    timer_init(ts, ctx->tlg.tl[type], scale, cb, opaque);
290
}
291

    
292
#endif