Statistics
| Branch: | Revision:

root / hw / block / xen_disk.c @ 34b5d2c6

History | View | Annotate | Download (30.6 kB)

1 62d23efa aliguori
/*
2 62d23efa aliguori
 *  xen paravirt block device backend
3 62d23efa aliguori
 *
4 62d23efa aliguori
 *  (c) Gerd Hoffmann <kraxel@redhat.com>
5 62d23efa aliguori
 *
6 62d23efa aliguori
 *  This program is free software; you can redistribute it and/or modify
7 62d23efa aliguori
 *  it under the terms of the GNU General Public License as published by
8 62d23efa aliguori
 *  the Free Software Foundation; under version 2 of the License.
9 62d23efa aliguori
 *
10 62d23efa aliguori
 *  This program is distributed in the hope that it will be useful,
11 62d23efa aliguori
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 62d23efa aliguori
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 62d23efa aliguori
 *  GNU General Public License for more details.
14 62d23efa aliguori
 *
15 62d23efa aliguori
 *  You should have received a copy of the GNU General Public License along
16 8167ee88 Blue Swirl
 *  with this program; if not, see <http://www.gnu.org/licenses/>.
17 6b620ca3 Paolo Bonzini
 *
18 6b620ca3 Paolo Bonzini
 *  Contributions after 2012-01-13 are licensed under the terms of the
19 6b620ca3 Paolo Bonzini
 *  GNU GPL, version 2 or (at your option) any later version.
20 62d23efa aliguori
 */
21 62d23efa aliguori
22 62d23efa aliguori
#include <stdio.h>
23 62d23efa aliguori
#include <stdlib.h>
24 62d23efa aliguori
#include <stdarg.h>
25 62d23efa aliguori
#include <string.h>
26 62d23efa aliguori
#include <unistd.h>
27 62d23efa aliguori
#include <signal.h>
28 62d23efa aliguori
#include <inttypes.h>
29 62d23efa aliguori
#include <time.h>
30 62d23efa aliguori
#include <fcntl.h>
31 62d23efa aliguori
#include <errno.h>
32 62d23efa aliguori
#include <sys/ioctl.h>
33 62d23efa aliguori
#include <sys/types.h>
34 62d23efa aliguori
#include <sys/stat.h>
35 62d23efa aliguori
#include <sys/mman.h>
36 62d23efa aliguori
#include <sys/uio.h>
37 62d23efa aliguori
38 83c9f4ca Paolo Bonzini
#include "hw/hw.h"
39 0d09e41a Paolo Bonzini
#include "hw/xen/xen_backend.h"
40 47b43a1f Paolo Bonzini
#include "xen_blkif.h"
41 9c17d615 Paolo Bonzini
#include "sysemu/blockdev.h"
42 62d23efa aliguori
43 62d23efa aliguori
/* ------------------------------------------------------------- */
44 62d23efa aliguori
45 62d23efa aliguori
static int batch_maps   = 0;
46 62d23efa aliguori
47 62d23efa aliguori
static int max_requests = 32;
48 62d23efa aliguori
49 62d23efa aliguori
/* ------------------------------------------------------------- */
50 62d23efa aliguori
51 62d23efa aliguori
#define BLOCK_SIZE  512
52 62d23efa aliguori
#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
53 62d23efa aliguori
54 9e496d74 Roger Pau Monne
struct PersistentGrant {
55 9e496d74 Roger Pau Monne
    void *page;
56 9e496d74 Roger Pau Monne
    struct XenBlkDev *blkdev;
57 9e496d74 Roger Pau Monne
};
58 9e496d74 Roger Pau Monne
59 9e496d74 Roger Pau Monne
typedef struct PersistentGrant PersistentGrant;
60 9e496d74 Roger Pau Monne
61 62d23efa aliguori
struct ioreq {
62 62d23efa aliguori
    blkif_request_t     req;
63 62d23efa aliguori
    int16_t             status;
64 62d23efa aliguori
65 62d23efa aliguori
    /* parsed request */
66 62d23efa aliguori
    off_t               start;
67 62d23efa aliguori
    QEMUIOVector        v;
68 62d23efa aliguori
    int                 presync;
69 62d23efa aliguori
    int                 postsync;
70 c6961b7d Stefano Stabellini
    uint8_t             mapped;
71 62d23efa aliguori
72 62d23efa aliguori
    /* grant mapping */
73 62d23efa aliguori
    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 62d23efa aliguori
    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75 62d23efa aliguori
    int                 prot;
76 62d23efa aliguori
    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
77 62d23efa aliguori
    void                *pages;
78 9e496d74 Roger Pau Monne
    int                 num_unmap;
79 62d23efa aliguori
80 62d23efa aliguori
    /* aio status */
81 62d23efa aliguori
    int                 aio_inflight;
82 62d23efa aliguori
    int                 aio_errors;
83 62d23efa aliguori
84 62d23efa aliguori
    struct XenBlkDev    *blkdev;
85 72cf2d4f Blue Swirl
    QLIST_ENTRY(ioreq)   list;
86 a597e79c Christoph Hellwig
    BlockAcctCookie     acct;
87 62d23efa aliguori
};
88 62d23efa aliguori
89 62d23efa aliguori
struct XenBlkDev {
90 62d23efa aliguori
    struct XenDevice    xendev;  /* must be first */
91 62d23efa aliguori
    char                *params;
92 62d23efa aliguori
    char                *mode;
93 62d23efa aliguori
    char                *type;
94 62d23efa aliguori
    char                *dev;
95 62d23efa aliguori
    char                *devtype;
96 454ae734 Stefano Stabellini
    bool                directiosafe;
97 62d23efa aliguori
    const char          *fileproto;
98 62d23efa aliguori
    const char          *filename;
99 62d23efa aliguori
    int                 ring_ref;
100 62d23efa aliguori
    void                *sring;
101 62d23efa aliguori
    int64_t             file_blk;
102 62d23efa aliguori
    int64_t             file_size;
103 62d23efa aliguori
    int                 protocol;
104 62d23efa aliguori
    blkif_back_rings_t  rings;
105 62d23efa aliguori
    int                 more_work;
106 62d23efa aliguori
    int                 cnt_map;
107 62d23efa aliguori
108 62d23efa aliguori
    /* request lists */
109 72cf2d4f Blue Swirl
    QLIST_HEAD(inflight_head, ioreq) inflight;
110 72cf2d4f Blue Swirl
    QLIST_HEAD(finished_head, ioreq) finished;
111 72cf2d4f Blue Swirl
    QLIST_HEAD(freelist_head, ioreq) freelist;
112 62d23efa aliguori
    int                 requests_total;
113 62d23efa aliguori
    int                 requests_inflight;
114 62d23efa aliguori
    int                 requests_finished;
115 62d23efa aliguori
116 9e496d74 Roger Pau Monne
    /* Persistent grants extension */
117 9e496d74 Roger Pau Monne
    gboolean            feature_persistent;
118 9e496d74 Roger Pau Monne
    GTree               *persistent_gnts;
119 9e496d74 Roger Pau Monne
    unsigned int        persistent_gnt_count;
120 9e496d74 Roger Pau Monne
    unsigned int        max_grants;
121 9e496d74 Roger Pau Monne
122 62d23efa aliguori
    /* qemu block driver */
123 751c6a17 Gerd Hoffmann
    DriveInfo           *dinfo;
124 62d23efa aliguori
    BlockDriverState    *bs;
125 62d23efa aliguori
    QEMUBH              *bh;
126 62d23efa aliguori
};
127 62d23efa aliguori
128 62d23efa aliguori
/* ------------------------------------------------------------- */
129 62d23efa aliguori
130 282c6a2f Roger Pau Monne
static void ioreq_reset(struct ioreq *ioreq)
131 282c6a2f Roger Pau Monne
{
132 282c6a2f Roger Pau Monne
    memset(&ioreq->req, 0, sizeof(ioreq->req));
133 282c6a2f Roger Pau Monne
    ioreq->status = 0;
134 282c6a2f Roger Pau Monne
    ioreq->start = 0;
135 282c6a2f Roger Pau Monne
    ioreq->presync = 0;
136 282c6a2f Roger Pau Monne
    ioreq->postsync = 0;
137 282c6a2f Roger Pau Monne
    ioreq->mapped = 0;
138 282c6a2f Roger Pau Monne
139 282c6a2f Roger Pau Monne
    memset(ioreq->domids, 0, sizeof(ioreq->domids));
140 282c6a2f Roger Pau Monne
    memset(ioreq->refs, 0, sizeof(ioreq->refs));
141 282c6a2f Roger Pau Monne
    ioreq->prot = 0;
142 282c6a2f Roger Pau Monne
    memset(ioreq->page, 0, sizeof(ioreq->page));
143 282c6a2f Roger Pau Monne
    ioreq->pages = NULL;
144 282c6a2f Roger Pau Monne
145 282c6a2f Roger Pau Monne
    ioreq->aio_inflight = 0;
146 282c6a2f Roger Pau Monne
    ioreq->aio_errors = 0;
147 282c6a2f Roger Pau Monne
148 282c6a2f Roger Pau Monne
    ioreq->blkdev = NULL;
149 282c6a2f Roger Pau Monne
    memset(&ioreq->list, 0, sizeof(ioreq->list));
150 282c6a2f Roger Pau Monne
    memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151 282c6a2f Roger Pau Monne
152 282c6a2f Roger Pau Monne
    qemu_iovec_reset(&ioreq->v);
153 282c6a2f Roger Pau Monne
}
154 282c6a2f Roger Pau Monne
155 9e496d74 Roger Pau Monne
static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156 9e496d74 Roger Pau Monne
{
157 9e496d74 Roger Pau Monne
    uint ua = GPOINTER_TO_UINT(a);
158 9e496d74 Roger Pau Monne
    uint ub = GPOINTER_TO_UINT(b);
159 9e496d74 Roger Pau Monne
    return (ua > ub) - (ua < ub);
160 9e496d74 Roger Pau Monne
}
161 9e496d74 Roger Pau Monne
162 9e496d74 Roger Pau Monne
static void destroy_grant(gpointer pgnt)
163 9e496d74 Roger Pau Monne
{
164 9e496d74 Roger Pau Monne
    PersistentGrant *grant = pgnt;
165 9e496d74 Roger Pau Monne
    XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
166 9e496d74 Roger Pau Monne
167 9e496d74 Roger Pau Monne
    if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
168 9e496d74 Roger Pau Monne
        xen_be_printf(&grant->blkdev->xendev, 0,
169 9e496d74 Roger Pau Monne
                      "xc_gnttab_munmap failed: %s\n",
170 9e496d74 Roger Pau Monne
                      strerror(errno));
171 9e496d74 Roger Pau Monne
    }
172 9e496d74 Roger Pau Monne
    grant->blkdev->persistent_gnt_count--;
173 9e496d74 Roger Pau Monne
    xen_be_printf(&grant->blkdev->xendev, 3,
174 9e496d74 Roger Pau Monne
                  "unmapped grant %p\n", grant->page);
175 9e496d74 Roger Pau Monne
    g_free(grant);
176 9e496d74 Roger Pau Monne
}
177 9e496d74 Roger Pau Monne
178 62d23efa aliguori
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
179 62d23efa aliguori
{
180 62d23efa aliguori
    struct ioreq *ioreq = NULL;
181 62d23efa aliguori
182 72cf2d4f Blue Swirl
    if (QLIST_EMPTY(&blkdev->freelist)) {
183 209cd7ab Anthony PERARD
        if (blkdev->requests_total >= max_requests) {
184 209cd7ab Anthony PERARD
            goto out;
185 209cd7ab Anthony PERARD
        }
186 209cd7ab Anthony PERARD
        /* allocate new struct */
187 7267c094 Anthony Liguori
        ioreq = g_malloc0(sizeof(*ioreq));
188 209cd7ab Anthony PERARD
        ioreq->blkdev = blkdev;
189 209cd7ab Anthony PERARD
        blkdev->requests_total++;
190 62d23efa aliguori
        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
191 62d23efa aliguori
    } else {
192 209cd7ab Anthony PERARD
        /* get one from freelist */
193 209cd7ab Anthony PERARD
        ioreq = QLIST_FIRST(&blkdev->freelist);
194 209cd7ab Anthony PERARD
        QLIST_REMOVE(ioreq, list);
195 62d23efa aliguori
    }
196 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
197 62d23efa aliguori
    blkdev->requests_inflight++;
198 62d23efa aliguori
199 62d23efa aliguori
out:
200 62d23efa aliguori
    return ioreq;
201 62d23efa aliguori
}
202 62d23efa aliguori
203 62d23efa aliguori
static void ioreq_finish(struct ioreq *ioreq)
204 62d23efa aliguori
{
205 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
206 62d23efa aliguori
207 72cf2d4f Blue Swirl
    QLIST_REMOVE(ioreq, list);
208 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
209 62d23efa aliguori
    blkdev->requests_inflight--;
210 62d23efa aliguori
    blkdev->requests_finished++;
211 62d23efa aliguori
}
212 62d23efa aliguori
213 ed547766 Jan Beulich
static void ioreq_release(struct ioreq *ioreq, bool finish)
214 62d23efa aliguori
{
215 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
216 62d23efa aliguori
217 72cf2d4f Blue Swirl
    QLIST_REMOVE(ioreq, list);
218 282c6a2f Roger Pau Monne
    ioreq_reset(ioreq);
219 62d23efa aliguori
    ioreq->blkdev = blkdev;
220 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
221 ed547766 Jan Beulich
    if (finish) {
222 ed547766 Jan Beulich
        blkdev->requests_finished--;
223 ed547766 Jan Beulich
    } else {
224 ed547766 Jan Beulich
        blkdev->requests_inflight--;
225 ed547766 Jan Beulich
    }
226 62d23efa aliguori
}
227 62d23efa aliguori
228 62d23efa aliguori
/*
229 62d23efa aliguori
 * translate request into iovec + start offset
230 62d23efa aliguori
 * do sanity checks along the way
231 62d23efa aliguori
 */
232 62d23efa aliguori
static int ioreq_parse(struct ioreq *ioreq)
233 62d23efa aliguori
{
234 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
235 62d23efa aliguori
    uintptr_t mem;
236 62d23efa aliguori
    size_t len;
237 62d23efa aliguori
    int i;
238 62d23efa aliguori
239 62d23efa aliguori
    xen_be_printf(&blkdev->xendev, 3,
240 209cd7ab Anthony PERARD
                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
241 209cd7ab Anthony PERARD
                  ioreq->req.operation, ioreq->req.nr_segments,
242 209cd7ab Anthony PERARD
                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
243 62d23efa aliguori
    switch (ioreq->req.operation) {
244 62d23efa aliguori
    case BLKIF_OP_READ:
245 209cd7ab Anthony PERARD
        ioreq->prot = PROT_WRITE; /* to memory */
246 209cd7ab Anthony PERARD
        break;
247 7e7b7cba Stefano Stabellini
    case BLKIF_OP_FLUSH_DISKCACHE:
248 7e7b7cba Stefano Stabellini
        ioreq->presync = 1;
249 5cbdebe3 Stefano Stabellini
        if (!ioreq->req.nr_segments) {
250 5cbdebe3 Stefano Stabellini
            return 0;
251 5cbdebe3 Stefano Stabellini
        }
252 209cd7ab Anthony PERARD
        /* fall through */
253 62d23efa aliguori
    case BLKIF_OP_WRITE:
254 209cd7ab Anthony PERARD
        ioreq->prot = PROT_READ; /* from memory */
255 209cd7ab Anthony PERARD
        break;
256 62d23efa aliguori
    default:
257 209cd7ab Anthony PERARD
        xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
258 209cd7ab Anthony PERARD
                      ioreq->req.operation);
259 209cd7ab Anthony PERARD
        goto err;
260 62d23efa aliguori
    };
261 62d23efa aliguori
262 908c7b9f Gerd Hoffmann
    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
263 908c7b9f Gerd Hoffmann
        xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
264 908c7b9f Gerd Hoffmann
        goto err;
265 908c7b9f Gerd Hoffmann
    }
266 908c7b9f Gerd Hoffmann
267 62d23efa aliguori
    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
268 62d23efa aliguori
    for (i = 0; i < ioreq->req.nr_segments; i++) {
269 209cd7ab Anthony PERARD
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
270 209cd7ab Anthony PERARD
            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
271 209cd7ab Anthony PERARD
            goto err;
272 209cd7ab Anthony PERARD
        }
273 209cd7ab Anthony PERARD
        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
274 209cd7ab Anthony PERARD
            xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
275 209cd7ab Anthony PERARD
            goto err;
276 209cd7ab Anthony PERARD
        }
277 209cd7ab Anthony PERARD
        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
278 209cd7ab Anthony PERARD
            xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
279 209cd7ab Anthony PERARD
            goto err;
280 209cd7ab Anthony PERARD
        }
281 209cd7ab Anthony PERARD
282 209cd7ab Anthony PERARD
        ioreq->domids[i] = blkdev->xendev.dom;
283 209cd7ab Anthony PERARD
        ioreq->refs[i]   = ioreq->req.seg[i].gref;
284 209cd7ab Anthony PERARD
285 209cd7ab Anthony PERARD
        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
286 209cd7ab Anthony PERARD
        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
287 62d23efa aliguori
        qemu_iovec_add(&ioreq->v, (void*)mem, len);
288 62d23efa aliguori
    }
289 62d23efa aliguori
    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
290 209cd7ab Anthony PERARD
        xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
291 209cd7ab Anthony PERARD
        goto err;
292 62d23efa aliguori
    }
293 62d23efa aliguori
    return 0;
294 62d23efa aliguori
295 62d23efa aliguori
err:
296 62d23efa aliguori
    ioreq->status = BLKIF_RSP_ERROR;
297 62d23efa aliguori
    return -1;
298 62d23efa aliguori
}
299 62d23efa aliguori
300 62d23efa aliguori
static void ioreq_unmap(struct ioreq *ioreq)
301 62d23efa aliguori
{
302 d5b93ddf Anthony PERARD
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
303 62d23efa aliguori
    int i;
304 62d23efa aliguori
305 9e496d74 Roger Pau Monne
    if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
306 62d23efa aliguori
        return;
307 209cd7ab Anthony PERARD
    }
308 62d23efa aliguori
    if (batch_maps) {
309 209cd7ab Anthony PERARD
        if (!ioreq->pages) {
310 209cd7ab Anthony PERARD
            return;
311 209cd7ab Anthony PERARD
        }
312 9e496d74 Roger Pau Monne
        if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
313 209cd7ab Anthony PERARD
            xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
314 209cd7ab Anthony PERARD
                          strerror(errno));
315 209cd7ab Anthony PERARD
        }
316 9e496d74 Roger Pau Monne
        ioreq->blkdev->cnt_map -= ioreq->num_unmap;
317 209cd7ab Anthony PERARD
        ioreq->pages = NULL;
318 62d23efa aliguori
    } else {
319 9e496d74 Roger Pau Monne
        for (i = 0; i < ioreq->num_unmap; i++) {
320 209cd7ab Anthony PERARD
            if (!ioreq->page[i]) {
321 209cd7ab Anthony PERARD
                continue;
322 209cd7ab Anthony PERARD
            }
323 209cd7ab Anthony PERARD
            if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
324 209cd7ab Anthony PERARD
                xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
325 209cd7ab Anthony PERARD
                              strerror(errno));
326 209cd7ab Anthony PERARD
            }
327 209cd7ab Anthony PERARD
            ioreq->blkdev->cnt_map--;
328 209cd7ab Anthony PERARD
            ioreq->page[i] = NULL;
329 209cd7ab Anthony PERARD
        }
330 62d23efa aliguori
    }
331 c6961b7d Stefano Stabellini
    ioreq->mapped = 0;
332 62d23efa aliguori
}
333 62d23efa aliguori
334 62d23efa aliguori
static int ioreq_map(struct ioreq *ioreq)
335 62d23efa aliguori
{
336 d5b93ddf Anthony PERARD
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
337 9e496d74 Roger Pau Monne
    uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
338 9e496d74 Roger Pau Monne
    uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
339 9e496d74 Roger Pau Monne
    void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
340 9e496d74 Roger Pau Monne
    int i, j, new_maps = 0;
341 9e496d74 Roger Pau Monne
    PersistentGrant *grant;
342 9e496d74 Roger Pau Monne
    /* domids and refs variables will contain the information necessary
343 9e496d74 Roger Pau Monne
     * to map the grants that are needed to fulfill this request.
344 9e496d74 Roger Pau Monne
     *
345 9e496d74 Roger Pau Monne
     * After mapping the needed grants, the page array will contain the
346 9e496d74 Roger Pau Monne
     * memory address of each granted page in the order specified in ioreq
347 9e496d74 Roger Pau Monne
     * (disregarding if it's a persistent grant or not).
348 9e496d74 Roger Pau Monne
     */
349 62d23efa aliguori
350 c6961b7d Stefano Stabellini
    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
351 62d23efa aliguori
        return 0;
352 209cd7ab Anthony PERARD
    }
353 9e496d74 Roger Pau Monne
    if (ioreq->blkdev->feature_persistent) {
354 9e496d74 Roger Pau Monne
        for (i = 0; i < ioreq->v.niov; i++) {
355 9e496d74 Roger Pau Monne
            grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
356 9e496d74 Roger Pau Monne
                                    GUINT_TO_POINTER(ioreq->refs[i]));
357 9e496d74 Roger Pau Monne
358 9e496d74 Roger Pau Monne
            if (grant != NULL) {
359 9e496d74 Roger Pau Monne
                page[i] = grant->page;
360 9e496d74 Roger Pau Monne
                xen_be_printf(&ioreq->blkdev->xendev, 3,
361 9e496d74 Roger Pau Monne
                              "using persistent-grant %" PRIu32 "\n",
362 9e496d74 Roger Pau Monne
                              ioreq->refs[i]);
363 9e496d74 Roger Pau Monne
            } else {
364 9e496d74 Roger Pau Monne
                    /* Add the grant to the list of grants that
365 9e496d74 Roger Pau Monne
                     * should be mapped
366 9e496d74 Roger Pau Monne
                     */
367 9e496d74 Roger Pau Monne
                    domids[new_maps] = ioreq->domids[i];
368 9e496d74 Roger Pau Monne
                    refs[new_maps] = ioreq->refs[i];
369 9e496d74 Roger Pau Monne
                    page[i] = NULL;
370 9e496d74 Roger Pau Monne
                    new_maps++;
371 9e496d74 Roger Pau Monne
            }
372 9e496d74 Roger Pau Monne
        }
373 9e496d74 Roger Pau Monne
        /* Set the protection to RW, since grants may be reused later
374 9e496d74 Roger Pau Monne
         * with a different protection than the one needed for this request
375 9e496d74 Roger Pau Monne
         */
376 9e496d74 Roger Pau Monne
        ioreq->prot = PROT_WRITE | PROT_READ;
377 9e496d74 Roger Pau Monne
    } else {
378 9e496d74 Roger Pau Monne
        /* All grants in the request should be mapped */
379 9e496d74 Roger Pau Monne
        memcpy(refs, ioreq->refs, sizeof(refs));
380 9e496d74 Roger Pau Monne
        memcpy(domids, ioreq->domids, sizeof(domids));
381 9e496d74 Roger Pau Monne
        memset(page, 0, sizeof(page));
382 9e496d74 Roger Pau Monne
        new_maps = ioreq->v.niov;
383 9e496d74 Roger Pau Monne
    }
384 9e496d74 Roger Pau Monne
385 9e496d74 Roger Pau Monne
    if (batch_maps && new_maps) {
386 209cd7ab Anthony PERARD
        ioreq->pages = xc_gnttab_map_grant_refs
387 9e496d74 Roger Pau Monne
            (gnt, new_maps, domids, refs, ioreq->prot);
388 209cd7ab Anthony PERARD
        if (ioreq->pages == NULL) {
389 209cd7ab Anthony PERARD
            xen_be_printf(&ioreq->blkdev->xendev, 0,
390 209cd7ab Anthony PERARD
                          "can't map %d grant refs (%s, %d maps)\n",
391 9e496d74 Roger Pau Monne
                          new_maps, strerror(errno), ioreq->blkdev->cnt_map);
392 209cd7ab Anthony PERARD
            return -1;
393 209cd7ab Anthony PERARD
        }
394 9e496d74 Roger Pau Monne
        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
395 9e496d74 Roger Pau Monne
            if (page[i] == NULL) {
396 9e496d74 Roger Pau Monne
                page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
397 9e496d74 Roger Pau Monne
            }
398 209cd7ab Anthony PERARD
        }
399 9e496d74 Roger Pau Monne
        ioreq->blkdev->cnt_map += new_maps;
400 9e496d74 Roger Pau Monne
    } else if (new_maps)  {
401 9e496d74 Roger Pau Monne
        for (i = 0; i < new_maps; i++) {
402 209cd7ab Anthony PERARD
            ioreq->page[i] = xc_gnttab_map_grant_ref
403 9e496d74 Roger Pau Monne
                (gnt, domids[i], refs[i], ioreq->prot);
404 209cd7ab Anthony PERARD
            if (ioreq->page[i] == NULL) {
405 209cd7ab Anthony PERARD
                xen_be_printf(&ioreq->blkdev->xendev, 0,
406 209cd7ab Anthony PERARD
                              "can't map grant ref %d (%s, %d maps)\n",
407 9e496d74 Roger Pau Monne
                              refs[i], strerror(errno), ioreq->blkdev->cnt_map);
408 209cd7ab Anthony PERARD
                ioreq_unmap(ioreq);
409 209cd7ab Anthony PERARD
                return -1;
410 209cd7ab Anthony PERARD
            }
411 209cd7ab Anthony PERARD
            ioreq->blkdev->cnt_map++;
412 209cd7ab Anthony PERARD
        }
413 9e496d74 Roger Pau Monne
        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
414 9e496d74 Roger Pau Monne
            if (page[i] == NULL) {
415 9e496d74 Roger Pau Monne
                page[i] = ioreq->page[j++];
416 9e496d74 Roger Pau Monne
            }
417 9e496d74 Roger Pau Monne
        }
418 9e496d74 Roger Pau Monne
    }
419 9e496d74 Roger Pau Monne
    if (ioreq->blkdev->feature_persistent) {
420 9e496d74 Roger Pau Monne
        while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
421 9e496d74 Roger Pau Monne
              && new_maps) {
422 9e496d74 Roger Pau Monne
            /* Go through the list of newly mapped grants and add as many
423 9e496d74 Roger Pau Monne
             * as possible to the list of persistently mapped grants.
424 9e496d74 Roger Pau Monne
             *
425 9e496d74 Roger Pau Monne
             * Since we start at the end of ioreq->page(s), we only need
426 9e496d74 Roger Pau Monne
             * to decrease new_maps to prevent this granted pages from
427 9e496d74 Roger Pau Monne
             * being unmapped in ioreq_unmap.
428 9e496d74 Roger Pau Monne
             */
429 9e496d74 Roger Pau Monne
            grant = g_malloc0(sizeof(*grant));
430 9e496d74 Roger Pau Monne
            new_maps--;
431 9e496d74 Roger Pau Monne
            if (batch_maps) {
432 9e496d74 Roger Pau Monne
                grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
433 9e496d74 Roger Pau Monne
            } else {
434 9e496d74 Roger Pau Monne
                grant->page = ioreq->page[new_maps];
435 9e496d74 Roger Pau Monne
            }
436 9e496d74 Roger Pau Monne
            grant->blkdev = ioreq->blkdev;
437 9e496d74 Roger Pau Monne
            xen_be_printf(&ioreq->blkdev->xendev, 3,
438 9e496d74 Roger Pau Monne
                          "adding grant %" PRIu32 " page: %p\n",
439 9e496d74 Roger Pau Monne
                          refs[new_maps], grant->page);
440 9e496d74 Roger Pau Monne
            g_tree_insert(ioreq->blkdev->persistent_gnts,
441 9e496d74 Roger Pau Monne
                          GUINT_TO_POINTER(refs[new_maps]),
442 9e496d74 Roger Pau Monne
                          grant);
443 9e496d74 Roger Pau Monne
            ioreq->blkdev->persistent_gnt_count++;
444 9e496d74 Roger Pau Monne
        }
445 9e496d74 Roger Pau Monne
    }
446 9e496d74 Roger Pau Monne
    for (i = 0; i < ioreq->v.niov; i++) {
447 9e496d74 Roger Pau Monne
        ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
448 62d23efa aliguori
    }
449 c6961b7d Stefano Stabellini
    ioreq->mapped = 1;
450 9e496d74 Roger Pau Monne
    ioreq->num_unmap = new_maps;
451 62d23efa aliguori
    return 0;
452 62d23efa aliguori
}
453 62d23efa aliguori
454 c6961b7d Stefano Stabellini
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
455 c6961b7d Stefano Stabellini
456 62d23efa aliguori
static void qemu_aio_complete(void *opaque, int ret)
457 62d23efa aliguori
{
458 62d23efa aliguori
    struct ioreq *ioreq = opaque;
459 62d23efa aliguori
460 62d23efa aliguori
    if (ret != 0) {
461 62d23efa aliguori
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
462 62d23efa aliguori
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
463 62d23efa aliguori
        ioreq->aio_errors++;
464 62d23efa aliguori
    }
465 62d23efa aliguori
466 62d23efa aliguori
    ioreq->aio_inflight--;
467 c6961b7d Stefano Stabellini
    if (ioreq->presync) {
468 c6961b7d Stefano Stabellini
        ioreq->presync = 0;
469 c6961b7d Stefano Stabellini
        ioreq_runio_qemu_aio(ioreq);
470 c6961b7d Stefano Stabellini
        return;
471 c6961b7d Stefano Stabellini
    }
472 209cd7ab Anthony PERARD
    if (ioreq->aio_inflight > 0) {
473 62d23efa aliguori
        return;
474 209cd7ab Anthony PERARD
    }
475 d56de074 Stefano Stabellini
    if (ioreq->postsync) {
476 c6961b7d Stefano Stabellini
        ioreq->postsync = 0;
477 c6961b7d Stefano Stabellini
        ioreq->aio_inflight++;
478 c6961b7d Stefano Stabellini
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
479 c6961b7d Stefano Stabellini
        return;
480 d56de074 Stefano Stabellini
    }
481 62d23efa aliguori
482 62d23efa aliguori
    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
483 62d23efa aliguori
    ioreq_unmap(ioreq);
484 62d23efa aliguori
    ioreq_finish(ioreq);
485 a597e79c Christoph Hellwig
    bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
486 62d23efa aliguori
    qemu_bh_schedule(ioreq->blkdev->bh);
487 62d23efa aliguori
}
488 62d23efa aliguori
489 62d23efa aliguori
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
490 62d23efa aliguori
{
491 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
492 62d23efa aliguori
493 209cd7ab Anthony PERARD
    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
494 209cd7ab Anthony PERARD
        goto err_no_map;
495 209cd7ab Anthony PERARD
    }
496 62d23efa aliguori
497 62d23efa aliguori
    ioreq->aio_inflight++;
498 209cd7ab Anthony PERARD
    if (ioreq->presync) {
499 c6961b7d Stefano Stabellini
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
500 c6961b7d Stefano Stabellini
        return 0;
501 209cd7ab Anthony PERARD
    }
502 62d23efa aliguori
503 62d23efa aliguori
    switch (ioreq->req.operation) {
504 62d23efa aliguori
    case BLKIF_OP_READ:
505 a597e79c Christoph Hellwig
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
506 62d23efa aliguori
        ioreq->aio_inflight++;
507 62d23efa aliguori
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
508 62d23efa aliguori
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
509 62d23efa aliguori
                       qemu_aio_complete, ioreq);
510 209cd7ab Anthony PERARD
        break;
511 62d23efa aliguori
    case BLKIF_OP_WRITE:
512 7e7b7cba Stefano Stabellini
    case BLKIF_OP_FLUSH_DISKCACHE:
513 209cd7ab Anthony PERARD
        if (!ioreq->req.nr_segments) {
514 5cbdebe3 Stefano Stabellini
            break;
515 209cd7ab Anthony PERARD
        }
516 a597e79c Christoph Hellwig
517 a597e79c Christoph Hellwig
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
518 209bef3e Feiran Zheng
        ioreq->aio_inflight++;
519 62d23efa aliguori
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
520 62d23efa aliguori
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
521 62d23efa aliguori
                        qemu_aio_complete, ioreq);
522 209cd7ab Anthony PERARD
        break;
523 62d23efa aliguori
    default:
524 209cd7ab Anthony PERARD
        /* unknown operation (shouldn't happen -- parse catches this) */
525 209cd7ab Anthony PERARD
        goto err;
526 62d23efa aliguori
    }
527 62d23efa aliguori
528 62d23efa aliguori
    qemu_aio_complete(ioreq, 0);
529 62d23efa aliguori
530 62d23efa aliguori
    return 0;
531 62d23efa aliguori
532 62d23efa aliguori
err:
533 f6ec953c Feiran Zheng
    ioreq_unmap(ioreq);
534 f6ec953c Feiran Zheng
err_no_map:
535 f6ec953c Feiran Zheng
    ioreq_finish(ioreq);
536 62d23efa aliguori
    ioreq->status = BLKIF_RSP_ERROR;
537 62d23efa aliguori
    return -1;
538 62d23efa aliguori
}
539 62d23efa aliguori
540 62d23efa aliguori
static int blk_send_response_one(struct ioreq *ioreq)
541 62d23efa aliguori
{
542 62d23efa aliguori
    struct XenBlkDev  *blkdev = ioreq->blkdev;
543 62d23efa aliguori
    int               send_notify   = 0;
544 62d23efa aliguori
    int               have_requests = 0;
545 62d23efa aliguori
    blkif_response_t  resp;
546 62d23efa aliguori
    void              *dst;
547 62d23efa aliguori
548 62d23efa aliguori
    resp.id        = ioreq->req.id;
549 62d23efa aliguori
    resp.operation = ioreq->req.operation;
550 62d23efa aliguori
    resp.status    = ioreq->status;
551 62d23efa aliguori
552 62d23efa aliguori
    /* Place on the response ring for the relevant domain. */
553 62d23efa aliguori
    switch (blkdev->protocol) {
554 62d23efa aliguori
    case BLKIF_PROTOCOL_NATIVE:
555 209cd7ab Anthony PERARD
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
556 209cd7ab Anthony PERARD
        break;
557 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_32:
558 6fcfeff9 Blue Swirl
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
559 6fcfeff9 Blue Swirl
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
560 209cd7ab Anthony PERARD
        break;
561 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_64:
562 6fcfeff9 Blue Swirl
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
563 6fcfeff9 Blue Swirl
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
564 209cd7ab Anthony PERARD
        break;
565 62d23efa aliguori
    default:
566 209cd7ab Anthony PERARD
        dst = NULL;
567 62d23efa aliguori
    }
568 62d23efa aliguori
    memcpy(dst, &resp, sizeof(resp));
569 62d23efa aliguori
    blkdev->rings.common.rsp_prod_pvt++;
570 62d23efa aliguori
571 62d23efa aliguori
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
572 62d23efa aliguori
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
573 209cd7ab Anthony PERARD
        /*
574 209cd7ab Anthony PERARD
         * Tail check for pending requests. Allows frontend to avoid
575 209cd7ab Anthony PERARD
         * notifications if requests are already in flight (lower
576 209cd7ab Anthony PERARD
         * overheads and promotes batching).
577 209cd7ab Anthony PERARD
         */
578 209cd7ab Anthony PERARD
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
579 62d23efa aliguori
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
580 209cd7ab Anthony PERARD
        have_requests = 1;
581 62d23efa aliguori
    }
582 62d23efa aliguori
583 209cd7ab Anthony PERARD
    if (have_requests) {
584 209cd7ab Anthony PERARD
        blkdev->more_work++;
585 209cd7ab Anthony PERARD
    }
586 62d23efa aliguori
    return send_notify;
587 62d23efa aliguori
}
588 62d23efa aliguori
589 62d23efa aliguori
/* walk finished list, send outstanding responses, free requests */
590 62d23efa aliguori
static void blk_send_response_all(struct XenBlkDev *blkdev)
591 62d23efa aliguori
{
592 62d23efa aliguori
    struct ioreq *ioreq;
593 62d23efa aliguori
    int send_notify = 0;
594 62d23efa aliguori
595 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&blkdev->finished)) {
596 72cf2d4f Blue Swirl
        ioreq = QLIST_FIRST(&blkdev->finished);
597 209cd7ab Anthony PERARD
        send_notify += blk_send_response_one(ioreq);
598 ed547766 Jan Beulich
        ioreq_release(ioreq, true);
599 209cd7ab Anthony PERARD
    }
600 209cd7ab Anthony PERARD
    if (send_notify) {
601 209cd7ab Anthony PERARD
        xen_be_send_notify(&blkdev->xendev);
602 62d23efa aliguori
    }
603 62d23efa aliguori
}
604 62d23efa aliguori
605 62d23efa aliguori
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
606 62d23efa aliguori
{
607 62d23efa aliguori
    switch (blkdev->protocol) {
608 62d23efa aliguori
    case BLKIF_PROTOCOL_NATIVE:
609 209cd7ab Anthony PERARD
        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
610 209cd7ab Anthony PERARD
               sizeof(ioreq->req));
611 209cd7ab Anthony PERARD
        break;
612 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_32:
613 6fcfeff9 Blue Swirl
        blkif_get_x86_32_req(&ioreq->req,
614 6fcfeff9 Blue Swirl
                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
615 209cd7ab Anthony PERARD
        break;
616 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_64:
617 6fcfeff9 Blue Swirl
        blkif_get_x86_64_req(&ioreq->req,
618 6fcfeff9 Blue Swirl
                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
619 209cd7ab Anthony PERARD
        break;
620 62d23efa aliguori
    }
621 62d23efa aliguori
    return 0;
622 62d23efa aliguori
}
623 62d23efa aliguori
624 62d23efa aliguori
static void blk_handle_requests(struct XenBlkDev *blkdev)
625 62d23efa aliguori
{
626 62d23efa aliguori
    RING_IDX rc, rp;
627 62d23efa aliguori
    struct ioreq *ioreq;
628 62d23efa aliguori
629 62d23efa aliguori
    blkdev->more_work = 0;
630 62d23efa aliguori
631 62d23efa aliguori
    rc = blkdev->rings.common.req_cons;
632 62d23efa aliguori
    rp = blkdev->rings.common.sring->req_prod;
633 62d23efa aliguori
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
634 62d23efa aliguori
635 4e5b184d Paolo Bonzini
    blk_send_response_all(blkdev);
636 fc1f79f7 blueswir1
    while (rc != rp) {
637 62d23efa aliguori
        /* pull request from ring */
638 209cd7ab Anthony PERARD
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
639 62d23efa aliguori
            break;
640 209cd7ab Anthony PERARD
        }
641 62d23efa aliguori
        ioreq = ioreq_start(blkdev);
642 62d23efa aliguori
        if (ioreq == NULL) {
643 62d23efa aliguori
            blkdev->more_work++;
644 62d23efa aliguori
            break;
645 62d23efa aliguori
        }
646 62d23efa aliguori
        blk_get_request(blkdev, ioreq, rc);
647 62d23efa aliguori
        blkdev->rings.common.req_cons = ++rc;
648 62d23efa aliguori
649 62d23efa aliguori
        /* parse them */
650 62d23efa aliguori
        if (ioreq_parse(ioreq) != 0) {
651 209cd7ab Anthony PERARD
            if (blk_send_response_one(ioreq)) {
652 62d23efa aliguori
                xen_be_send_notify(&blkdev->xendev);
653 209cd7ab Anthony PERARD
            }
654 ed547766 Jan Beulich
            ioreq_release(ioreq, false);
655 62d23efa aliguori
            continue;
656 62d23efa aliguori
        }
657 62d23efa aliguori
658 4e5b184d Paolo Bonzini
        ioreq_runio_qemu_aio(ioreq);
659 209cd7ab Anthony PERARD
    }
660 62d23efa aliguori
661 209cd7ab Anthony PERARD
    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
662 62d23efa aliguori
        qemu_bh_schedule(blkdev->bh);
663 209cd7ab Anthony PERARD
    }
664 62d23efa aliguori
}
665 62d23efa aliguori
666 62d23efa aliguori
/* ------------------------------------------------------------- */
667 62d23efa aliguori
668 62d23efa aliguori
static void blk_bh(void *opaque)
669 62d23efa aliguori
{
670 62d23efa aliguori
    struct XenBlkDev *blkdev = opaque;
671 62d23efa aliguori
    blk_handle_requests(blkdev);
672 62d23efa aliguori
}
673 62d23efa aliguori
674 64c27e5b Jan Beulich
/*
675 64c27e5b Jan Beulich
 * We need to account for the grant allocations requiring contiguous
676 64c27e5b Jan Beulich
 * chunks; the worst case number would be
677 64c27e5b Jan Beulich
 *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
678 64c27e5b Jan Beulich
 * but in order to keep things simple just use
679 64c27e5b Jan Beulich
 *     2 * max_req * max_seg.
680 64c27e5b Jan Beulich
 */
681 64c27e5b Jan Beulich
#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
682 64c27e5b Jan Beulich
683 62d23efa aliguori
static void blk_alloc(struct XenDevice *xendev)
684 62d23efa aliguori
{
685 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
686 62d23efa aliguori
687 72cf2d4f Blue Swirl
    QLIST_INIT(&blkdev->inflight);
688 72cf2d4f Blue Swirl
    QLIST_INIT(&blkdev->finished);
689 72cf2d4f Blue Swirl
    QLIST_INIT(&blkdev->freelist);
690 62d23efa aliguori
    blkdev->bh = qemu_bh_new(blk_bh, blkdev);
691 209cd7ab Anthony PERARD
    if (xen_mode != XEN_EMULATE) {
692 62d23efa aliguori
        batch_maps = 1;
693 209cd7ab Anthony PERARD
    }
694 64c27e5b Jan Beulich
    if (xc_gnttab_set_max_grants(xendev->gnttabdev,
695 64c27e5b Jan Beulich
            MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
696 64c27e5b Jan Beulich
        xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
697 64c27e5b Jan Beulich
                      strerror(errno));
698 64c27e5b Jan Beulich
    }
699 62d23efa aliguori
}
700 62d23efa aliguori
701 62d23efa aliguori
static int blk_init(struct XenDevice *xendev)
702 62d23efa aliguori
{
703 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
704 86f425db Alex Bligh
    int info = 0;
705 454ae734 Stefano Stabellini
    char *directiosafe = NULL;
706 62d23efa aliguori
707 62d23efa aliguori
    /* read xenstore entries */
708 62d23efa aliguori
    if (blkdev->params == NULL) {
709 5ea3c2b4 Stefano Stabellini
        char *h = NULL;
710 209cd7ab Anthony PERARD
        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
711 5ea3c2b4 Stefano Stabellini
        if (blkdev->params != NULL) {
712 5ea3c2b4 Stefano Stabellini
            h = strchr(blkdev->params, ':');
713 5ea3c2b4 Stefano Stabellini
        }
714 209cd7ab Anthony PERARD
        if (h != NULL) {
715 209cd7ab Anthony PERARD
            blkdev->fileproto = blkdev->params;
716 209cd7ab Anthony PERARD
            blkdev->filename  = h+1;
717 209cd7ab Anthony PERARD
            *h = 0;
718 209cd7ab Anthony PERARD
        } else {
719 209cd7ab Anthony PERARD
            blkdev->fileproto = "<unset>";
720 209cd7ab Anthony PERARD
            blkdev->filename  = blkdev->params;
721 209cd7ab Anthony PERARD
        }
722 209cd7ab Anthony PERARD
    }
723 7cef3f4f Stefano Stabellini
    if (!strcmp("aio", blkdev->fileproto)) {
724 7cef3f4f Stefano Stabellini
        blkdev->fileproto = "raw";
725 7cef3f4f Stefano Stabellini
    }
726 209cd7ab Anthony PERARD
    if (blkdev->mode == NULL) {
727 209cd7ab Anthony PERARD
        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
728 209cd7ab Anthony PERARD
    }
729 209cd7ab Anthony PERARD
    if (blkdev->type == NULL) {
730 209cd7ab Anthony PERARD
        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
731 209cd7ab Anthony PERARD
    }
732 209cd7ab Anthony PERARD
    if (blkdev->dev == NULL) {
733 209cd7ab Anthony PERARD
        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
734 209cd7ab Anthony PERARD
    }
735 209cd7ab Anthony PERARD
    if (blkdev->devtype == NULL) {
736 209cd7ab Anthony PERARD
        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
737 209cd7ab Anthony PERARD
    }
738 454ae734 Stefano Stabellini
    directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
739 454ae734 Stefano Stabellini
    blkdev->directiosafe = (directiosafe && atoi(directiosafe));
740 62d23efa aliguori
741 62d23efa aliguori
    /* do we have all we need? */
742 62d23efa aliguori
    if (blkdev->params == NULL ||
743 209cd7ab Anthony PERARD
        blkdev->mode == NULL   ||
744 209cd7ab Anthony PERARD
        blkdev->type == NULL   ||
745 209cd7ab Anthony PERARD
        blkdev->dev == NULL) {
746 5ea3c2b4 Stefano Stabellini
        goto out_error;
747 209cd7ab Anthony PERARD
    }
748 62d23efa aliguori
749 62d23efa aliguori
    /* read-only ? */
750 86f425db Alex Bligh
    if (strcmp(blkdev->mode, "w")) {
751 209cd7ab Anthony PERARD
        info  |= VDISK_READONLY;
752 62d23efa aliguori
    }
753 62d23efa aliguori
754 62d23efa aliguori
    /* cdrom ? */
755 209cd7ab Anthony PERARD
    if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
756 209cd7ab Anthony PERARD
        info  |= VDISK_CDROM;
757 209cd7ab Anthony PERARD
    }
758 62d23efa aliguori
759 86f425db Alex Bligh
    blkdev->file_blk  = BLOCK_SIZE;
760 86f425db Alex Bligh
761 86f425db Alex Bligh
    /* fill info
762 86f425db Alex Bligh
     * blk_connect supplies sector-size and sectors
763 86f425db Alex Bligh
     */
764 86f425db Alex Bligh
    xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
765 86f425db Alex Bligh
    xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
766 86f425db Alex Bligh
    xenstore_write_be_int(&blkdev->xendev, "info", info);
767 454ae734 Stefano Stabellini
768 454ae734 Stefano Stabellini
    g_free(directiosafe);
769 86f425db Alex Bligh
    return 0;
770 86f425db Alex Bligh
771 86f425db Alex Bligh
out_error:
772 86f425db Alex Bligh
    g_free(blkdev->params);
773 86f425db Alex Bligh
    blkdev->params = NULL;
774 86f425db Alex Bligh
    g_free(blkdev->mode);
775 86f425db Alex Bligh
    blkdev->mode = NULL;
776 86f425db Alex Bligh
    g_free(blkdev->type);
777 86f425db Alex Bligh
    blkdev->type = NULL;
778 86f425db Alex Bligh
    g_free(blkdev->dev);
779 86f425db Alex Bligh
    blkdev->dev = NULL;
780 86f425db Alex Bligh
    g_free(blkdev->devtype);
781 86f425db Alex Bligh
    blkdev->devtype = NULL;
782 454ae734 Stefano Stabellini
    g_free(directiosafe);
783 454ae734 Stefano Stabellini
    blkdev->directiosafe = false;
784 86f425db Alex Bligh
    return -1;
785 86f425db Alex Bligh
}
786 86f425db Alex Bligh
787 86f425db Alex Bligh
static int blk_connect(struct XenDevice *xendev)
788 86f425db Alex Bligh
{
789 86f425db Alex Bligh
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
790 86f425db Alex Bligh
    int pers, index, qflags;
791 b64ec4e4 Fam Zheng
    bool readonly = true;
792 86f425db Alex Bligh
793 86f425db Alex Bligh
    /* read-only ? */
794 454ae734 Stefano Stabellini
    if (blkdev->directiosafe) {
795 454ae734 Stefano Stabellini
        qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
796 454ae734 Stefano Stabellini
    } else {
797 454ae734 Stefano Stabellini
        qflags = BDRV_O_CACHE_WB;
798 454ae734 Stefano Stabellini
    }
799 86f425db Alex Bligh
    if (strcmp(blkdev->mode, "w") == 0) {
800 86f425db Alex Bligh
        qflags |= BDRV_O_RDWR;
801 b64ec4e4 Fam Zheng
        readonly = false;
802 86f425db Alex Bligh
    }
803 86f425db Alex Bligh
804 62d23efa aliguori
    /* init qemu block driver */
805 751c6a17 Gerd Hoffmann
    index = (blkdev->xendev.dev - 202 * 256) / 16;
806 751c6a17 Gerd Hoffmann
    blkdev->dinfo = drive_get(IF_XEN, 0, index);
807 751c6a17 Gerd Hoffmann
    if (!blkdev->dinfo) {
808 62d23efa aliguori
        /* setup via xenbus -> create new block driver instance */
809 62d23efa aliguori
        xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
810 ad717139 Kevin Wolf
        blkdev->bs = bdrv_new(blkdev->dev);
811 5ea3c2b4 Stefano Stabellini
        if (blkdev->bs) {
812 34b5d2c6 Max Reitz
            Error *local_err = NULL;
813 b64ec4e4 Fam Zheng
            BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
814 b64ec4e4 Fam Zheng
                                                           readonly);
815 b64ec4e4 Fam Zheng
            if (bdrv_open(blkdev->bs,
816 34b5d2c6 Max Reitz
                          blkdev->filename, NULL, qflags, drv, &local_err) != 0)
817 34b5d2c6 Max Reitz
            {
818 34b5d2c6 Max Reitz
                xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
819 34b5d2c6 Max Reitz
                              error_get_pretty(local_err));
820 34b5d2c6 Max Reitz
                error_free(local_err);
821 4f6fd349 Fam Zheng
                bdrv_unref(blkdev->bs);
822 5ea3c2b4 Stefano Stabellini
                blkdev->bs = NULL;
823 5ea3c2b4 Stefano Stabellini
            }
824 5ea3c2b4 Stefano Stabellini
        }
825 5ea3c2b4 Stefano Stabellini
        if (!blkdev->bs) {
826 86f425db Alex Bligh
            return -1;
827 ad717139 Kevin Wolf
        }
828 62d23efa aliguori
    } else {
829 62d23efa aliguori
        /* setup via qemu cmdline -> already setup for us */
830 62d23efa aliguori
        xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
831 209cd7ab Anthony PERARD
        blkdev->bs = blkdev->dinfo->bdrv;
832 c0777fe1 Fam Zheng
        /* blkdev->bs is not create by us, we get a reference
833 c0777fe1 Fam Zheng
         * so we can bdrv_unref() unconditionally */
834 c0777fe1 Fam Zheng
        bdrv_ref(blkdev->bs);
835 62d23efa aliguori
    }
836 fa879d62 Markus Armbruster
    bdrv_attach_dev_nofail(blkdev->bs, blkdev);
837 62d23efa aliguori
    blkdev->file_size = bdrv_getlength(blkdev->bs);
838 62d23efa aliguori
    if (blkdev->file_size < 0) {
839 62d23efa aliguori
        xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
840 62d23efa aliguori
                      (int)blkdev->file_size, strerror(-blkdev->file_size),
841 093003b1 Markus Armbruster
                      bdrv_get_format_name(blkdev->bs) ?: "-");
842 209cd7ab Anthony PERARD
        blkdev->file_size = 0;
843 62d23efa aliguori
    }
844 62d23efa aliguori
845 62d23efa aliguori
    xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
846 209cd7ab Anthony PERARD
                  " size %" PRId64 " (%" PRId64 " MB)\n",
847 209cd7ab Anthony PERARD
                  blkdev->type, blkdev->fileproto, blkdev->filename,
848 209cd7ab Anthony PERARD
                  blkdev->file_size, blkdev->file_size >> 20);
849 62d23efa aliguori
850 86f425db Alex Bligh
    /* Fill in number of sector size and number of sectors */
851 86f425db Alex Bligh
    xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
852 9246ce88 Felipe Franciosi
    xenstore_write_be_int64(&blkdev->xendev, "sectors",
853 9246ce88 Felipe Franciosi
                            blkdev->file_size / blkdev->file_blk);
854 62d23efa aliguori
855 209cd7ab Anthony PERARD
    if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
856 209cd7ab Anthony PERARD
        return -1;
857 209cd7ab Anthony PERARD
    }
858 62d23efa aliguori
    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
859 209cd7ab Anthony PERARD
                             &blkdev->xendev.remote_port) == -1) {
860 209cd7ab Anthony PERARD
        return -1;
861 209cd7ab Anthony PERARD
    }
862 9e496d74 Roger Pau Monne
    if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
863 9e496d74 Roger Pau Monne
        blkdev->feature_persistent = FALSE;
864 9e496d74 Roger Pau Monne
    } else {
865 9e496d74 Roger Pau Monne
        blkdev->feature_persistent = !!pers;
866 9e496d74 Roger Pau Monne
    }
867 62d23efa aliguori
868 62d23efa aliguori
    blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
869 62d23efa aliguori
    if (blkdev->xendev.protocol) {
870 209cd7ab Anthony PERARD
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
871 62d23efa aliguori
            blkdev->protocol = BLKIF_PROTOCOL_X86_32;
872 209cd7ab Anthony PERARD
        }
873 209cd7ab Anthony PERARD
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
874 62d23efa aliguori
            blkdev->protocol = BLKIF_PROTOCOL_X86_64;
875 209cd7ab Anthony PERARD
        }
876 62d23efa aliguori
    }
877 62d23efa aliguori
878 62d23efa aliguori
    blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
879 209cd7ab Anthony PERARD
                                            blkdev->xendev.dom,
880 209cd7ab Anthony PERARD
                                            blkdev->ring_ref,
881 209cd7ab Anthony PERARD
                                            PROT_READ | PROT_WRITE);
882 209cd7ab Anthony PERARD
    if (!blkdev->sring) {
883 209cd7ab Anthony PERARD
        return -1;
884 209cd7ab Anthony PERARD
    }
885 62d23efa aliguori
    blkdev->cnt_map++;
886 62d23efa aliguori
887 62d23efa aliguori
    switch (blkdev->protocol) {
888 62d23efa aliguori
    case BLKIF_PROTOCOL_NATIVE:
889 62d23efa aliguori
    {
890 209cd7ab Anthony PERARD
        blkif_sring_t *sring_native = blkdev->sring;
891 209cd7ab Anthony PERARD
        BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
892 209cd7ab Anthony PERARD
        break;
893 62d23efa aliguori
    }
894 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_32:
895 62d23efa aliguori
    {
896 209cd7ab Anthony PERARD
        blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
897 6fcfeff9 Blue Swirl
898 6fcfeff9 Blue Swirl
        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
899 209cd7ab Anthony PERARD
        break;
900 62d23efa aliguori
    }
901 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_64:
902 62d23efa aliguori
    {
903 209cd7ab Anthony PERARD
        blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
904 6fcfeff9 Blue Swirl
905 6fcfeff9 Blue Swirl
        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
906 209cd7ab Anthony PERARD
        break;
907 62d23efa aliguori
    }
908 62d23efa aliguori
    }
909 62d23efa aliguori
910 9e496d74 Roger Pau Monne
    if (blkdev->feature_persistent) {
911 9e496d74 Roger Pau Monne
        /* Init persistent grants */
912 9e496d74 Roger Pau Monne
        blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
913 9e496d74 Roger Pau Monne
        blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
914 9e496d74 Roger Pau Monne
                                             NULL, NULL,
915 9e496d74 Roger Pau Monne
                                             (GDestroyNotify)destroy_grant);
916 9e496d74 Roger Pau Monne
        blkdev->persistent_gnt_count = 0;
917 9e496d74 Roger Pau Monne
    }
918 9e496d74 Roger Pau Monne
919 62d23efa aliguori
    xen_be_bind_evtchn(&blkdev->xendev);
920 62d23efa aliguori
921 62d23efa aliguori
    xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
922 209cd7ab Anthony PERARD
                  "remote port %d, local port %d\n",
923 209cd7ab Anthony PERARD
                  blkdev->xendev.protocol, blkdev->ring_ref,
924 209cd7ab Anthony PERARD
                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
925 62d23efa aliguori
    return 0;
926 62d23efa aliguori
}
927 62d23efa aliguori
928 62d23efa aliguori
static void blk_disconnect(struct XenDevice *xendev)
929 62d23efa aliguori
{
930 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
931 62d23efa aliguori
932 62d23efa aliguori
    if (blkdev->bs) {
933 c0777fe1 Fam Zheng
        bdrv_detach_dev(blkdev->bs, blkdev);
934 c0777fe1 Fam Zheng
        bdrv_unref(blkdev->bs);
935 209cd7ab Anthony PERARD
        blkdev->bs = NULL;
936 62d23efa aliguori
    }
937 62d23efa aliguori
    xen_be_unbind_evtchn(&blkdev->xendev);
938 62d23efa aliguori
939 62d23efa aliguori
    if (blkdev->sring) {
940 209cd7ab Anthony PERARD
        xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
941 209cd7ab Anthony PERARD
        blkdev->cnt_map--;
942 209cd7ab Anthony PERARD
        blkdev->sring = NULL;
943 62d23efa aliguori
    }
944 62d23efa aliguori
}
945 62d23efa aliguori
946 62d23efa aliguori
static int blk_free(struct XenDevice *xendev)
947 62d23efa aliguori
{
948 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
949 62d23efa aliguori
    struct ioreq *ioreq;
950 62d23efa aliguori
951 77ba8fef Stefano Stabellini
    if (blkdev->bs || blkdev->sring) {
952 77ba8fef Stefano Stabellini
        blk_disconnect(xendev);
953 77ba8fef Stefano Stabellini
    }
954 77ba8fef Stefano Stabellini
955 9e496d74 Roger Pau Monne
    /* Free persistent grants */
956 9e496d74 Roger Pau Monne
    if (blkdev->feature_persistent) {
957 9e496d74 Roger Pau Monne
        g_tree_destroy(blkdev->persistent_gnts);
958 9e496d74 Roger Pau Monne
    }
959 9e496d74 Roger Pau Monne
960 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&blkdev->freelist)) {
961 209cd7ab Anthony PERARD
        ioreq = QLIST_FIRST(&blkdev->freelist);
962 72cf2d4f Blue Swirl
        QLIST_REMOVE(ioreq, list);
963 62d23efa aliguori
        qemu_iovec_destroy(&ioreq->v);
964 7267c094 Anthony Liguori
        g_free(ioreq);
965 62d23efa aliguori
    }
966 62d23efa aliguori
967 7267c094 Anthony Liguori
    g_free(blkdev->params);
968 7267c094 Anthony Liguori
    g_free(blkdev->mode);
969 7267c094 Anthony Liguori
    g_free(blkdev->type);
970 7267c094 Anthony Liguori
    g_free(blkdev->dev);
971 7267c094 Anthony Liguori
    g_free(blkdev->devtype);
972 62d23efa aliguori
    qemu_bh_delete(blkdev->bh);
973 62d23efa aliguori
    return 0;
974 62d23efa aliguori
}
975 62d23efa aliguori
976 62d23efa aliguori
static void blk_event(struct XenDevice *xendev)
977 62d23efa aliguori
{
978 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
979 62d23efa aliguori
980 62d23efa aliguori
    qemu_bh_schedule(blkdev->bh);
981 62d23efa aliguori
}
982 62d23efa aliguori
983 62d23efa aliguori
struct XenDevOps xen_blkdev_ops = {
984 62d23efa aliguori
    .size       = sizeof(struct XenBlkDev),
985 62d23efa aliguori
    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
986 62d23efa aliguori
    .alloc      = blk_alloc,
987 62d23efa aliguori
    .init       = blk_init,
988 384087b2 John Haxby
    .initialise    = blk_connect,
989 62d23efa aliguori
    .disconnect = blk_disconnect,
990 62d23efa aliguori
    .event      = blk_event,
991 62d23efa aliguori
    .free       = blk_free,
992 62d23efa aliguori
};