Revision 46daff13 cpus.c

b/cpus.c
636 636
#else /* CONFIG_IOTHREAD */
637 637

  
638 638
QemuMutex qemu_global_mutex;
639
static QemuMutex qemu_fair_mutex;
639
static QemuCond qemu_io_proceeded_cond;
640
static bool iothread_requesting_mutex;
640 641

  
641 642
static QemuThread io_thread;
642 643

  
......
672 673
    qemu_cond_init(&qemu_system_cond);
673 674
    qemu_cond_init(&qemu_pause_cond);
674 675
    qemu_cond_init(&qemu_work_cond);
675
    qemu_mutex_init(&qemu_fair_mutex);
676
    qemu_cond_init(&qemu_io_proceeded_cond);
676 677
    qemu_mutex_init(&qemu_global_mutex);
677 678
    qemu_mutex_lock(&qemu_global_mutex);
678 679

  
......
755 756
        qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
756 757
    }
757 758

  
758
    qemu_mutex_unlock(&qemu_global_mutex);
759

  
760
    /*
761
     * Users of qemu_global_mutex can be starved, having no chance
762
     * to acquire it since this path will get to it first.
763
     * So use another lock to provide fairness.
764
     */
765
    qemu_mutex_lock(&qemu_fair_mutex);
766
    qemu_mutex_unlock(&qemu_fair_mutex);
767

  
768
    qemu_mutex_lock(&qemu_global_mutex);
759
    while (iothread_requesting_mutex) {
760
        qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
761
    }
769 762

  
770 763
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
771 764
        qemu_wait_io_event_common(env);
......
908 901
    if (kvm_enabled()) {
909 902
        qemu_mutex_lock(&qemu_global_mutex);
910 903
    } else {
911
        qemu_mutex_lock(&qemu_fair_mutex);
904
        iothread_requesting_mutex = true;
912 905
        if (qemu_mutex_trylock(&qemu_global_mutex)) {
913 906
            qemu_cpu_kick_thread(first_cpu);
914 907
            qemu_mutex_lock(&qemu_global_mutex);
915 908
        }
916
        qemu_mutex_unlock(&qemu_fair_mutex);
909
        iothread_requesting_mutex = false;
910
        qemu_cond_broadcast(&qemu_io_proceeded_cond);
917 911
    }
918 912
}
919 913

  

Also available in: Unified diff