Revision bcbabae8 hw/virtio.c

b/hw/virtio.c
71 71
    VRing vring;
72 72
    target_phys_addr_t pa;
73 73
    uint16_t last_avail_idx;
74
    /* Last used index value we have signalled on */
75
    uint16_t signalled_used;
76

  
77
    /* Last used index value we have signalled on */
78
    bool signalled_used_valid;
79

  
80
    /* Notification enabled? */
81
    bool notification;
82

  
74 83
    int inuse;
84

  
75 85
    uint16_t vector;
76 86
    void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
77 87
    VirtIODevice *vdev;
......
140 150
    return lduw_phys(pa);
141 151
}
142 152

  
153
static inline uint16_t vring_used_event(VirtQueue *vq)
154
{
155
    return vring_avail_ring(vq, vq->vring.num);
156
}
157

  
143 158
static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
144 159
{
145 160
    target_phys_addr_t pa;
......
161 176
    return lduw_phys(pa);
162 177
}
163 178

  
164
static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
179
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
165 180
{
166 181
    target_phys_addr_t pa;
167 182
    pa = vq->vring.used + offsetof(VRingUsed, idx);
168
    stw_phys(pa, vring_used_idx(vq) + val);
183
    stw_phys(pa, val);
169 184
}
170 185

  
171 186
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
......
182 197
    stw_phys(pa, lduw_phys(pa) & ~mask);
183 198
}
184 199

  
200
static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
201
{
202
    target_phys_addr_t pa;
203
    if (!vq->notification) {
204
        return;
205
    }
206
    pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
207
    stw_phys(pa, val);
208
}
209

  
185 210
void virtio_queue_set_notification(VirtQueue *vq, int enable)
186 211
{
187
    if (enable)
212
    vq->notification = enable;
213
    if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
214
        vring_avail_event(vq, vring_avail_idx(vq));
215
    } else if (enable) {
188 216
        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
189
    else
217
    } else {
190 218
        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
219
    }
191 220
}
192 221

  
193 222
int virtio_queue_ready(VirtQueue *vq)
......
233 262

  
234 263
void virtqueue_flush(VirtQueue *vq, unsigned int count)
235 264
{
265
    uint16_t old, new;
236 266
    /* Make sure buffer is written before we update index. */
237 267
    wmb();
238 268
    trace_virtqueue_flush(vq, count);
239
    vring_used_idx_increment(vq, count);
269
    old = vring_used_idx(vq);
270
    new = old + count;
271
    vring_used_idx_set(vq, new);
240 272
    vq->inuse -= count;
273
    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
274
        vq->signalled_used_valid = false;
241 275
}
242 276

  
243 277
void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
......
394 428
    max = vq->vring.num;
395 429

  
396 430
    i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
431
    if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
432
        vring_avail_event(vq, vring_avail_idx(vq));
433
    }
397 434

  
398 435
    if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
399 436
        if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
......
477 514
        vdev->vq[i].last_avail_idx = 0;
478 515
        vdev->vq[i].pa = 0;
479 516
        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
517
        vdev->vq[i].signalled_used = 0;
518
        vdev->vq[i].signalled_used_valid = false;
519
        vdev->vq[i].notification = true;
480 520
    }
481 521
}
482 522

  
......
626 666
    virtio_notify_vector(vq->vdev, vq->vector);
627 667
}
628 668

  
629
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
669
/* Assuming a given event_idx value from the other size, if
670
 * we have just incremented index from old to new_idx,
671
 * should we trigger an event? */
672
static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
630 673
{
674
	/* Note: Xen has similar logic for notification hold-off
675
	 * in include/xen/interface/io/ring.h with req_event and req_prod
676
	 * corresponding to event_idx + 1 and new respectively.
677
	 * Note also that req_event and req_prod in Xen start at 1,
678
	 * event indexes in virtio start at 0. */
679
	return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
680
}
681

  
682
static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
683
{
684
    uint16_t old, new;
685
    bool v;
631 686
    /* Always notify when queue is empty (when feature acknowledge) */
632
    if ((vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT) &&
633
        (!(vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) ||
634
         (vq->inuse || vring_avail_idx(vq) != vq->last_avail_idx)))
687
    if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
688
         !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
689
        return true;
690
    }
691

  
692
    if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
693
        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
694
    }
695

  
696
    v = vq->signalled_used_valid;
697
    vq->signalled_used_valid = true;
698
    old = vq->signalled_used;
699
    new = vq->signalled_used = vring_used_idx(vq);
700
    return !v || vring_need_event(vring_used_event(vq), new, old);
701
}
702

  
703
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
704
{
705
    if (!vring_notify(vdev, vq)) {
635 706
        return;
707
    }
636 708

  
637 709
    trace_virtio_notify(vdev, vq);
638 710
    vdev->isr |= 0x01;
......
715 787
        vdev->vq[i].vring.num = qemu_get_be32(f);
716 788
        vdev->vq[i].pa = qemu_get_be64(f);
717 789
        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
790
        vdev->vq[i].signalled_used_valid = false;
791
        vdev->vq[i].notification = true;
718 792

  
719 793
        if (vdev->vq[i].pa) {
720 794
            uint16_t nheads;

Also available in: Unified diff