Revision c227f099

b/CODING_STYLE
40 40

  
41 41
3. Naming
42 42

  
43
Variables are lower_case_with_underscores; easy to type and read.
44
Structured type names are in CamelCase; harder to type but standing
45
out.  Scalar type names are a_lower_case_beginning_with_an a or an.
46
Do not use _t suffix if you are including any headers.
43
Variables are lower_case_with_underscores; easy to type and read.  Structured
44
type names are in CamelCase; harder to type but standing out.  Scalar type
45
names are lower_case_with_underscores_ending_with_a_t, like the POSIX
46
uint64_t and family.  Note that this last convention contradicts POSIX
47
and is therefore likely to be changed.
48

  
49
Typedefs are used to eliminate the redundant 'struct' keyword.  It is the
50
QEMU coding style.
47 51

  
48 52
4. Block structure
49 53

  
b/balloon.h
16 16

  
17 17
#include "cpu-defs.h"
18 18

  
19
typedef a_ram_addr (QEMUBalloonEvent)(void *opaque, a_ram_addr target);
19
typedef ram_addr_t (QEMUBalloonEvent)(void *opaque, ram_addr_t target);
20 20

  
21 21
void qemu_add_balloon_handler(QEMUBalloonEvent *func, void *opaque);
22 22

  
23
void qemu_balloon(a_ram_addr target);
23
void qemu_balloon(ram_addr_t target);
24 24

  
25
a_ram_addr qemu_balloon_status(void);
25
ram_addr_t qemu_balloon_status(void);
26 26

  
27 27
#endif
b/block/vvfat.c
71 71
#endif
72 72

  
73 73
/* dynamic array functions */
74
typedef struct array {
74
typedef struct array_t {
75 75
    char* pointer;
76 76
    unsigned int size,next,item_size;
77
} an_array;
77
} array_t;
78 78

  
79
static inline void array_init(an_array* array,unsigned int item_size)
79
static inline void array_init(array_t* array,unsigned int item_size)
80 80
{
81 81
    array->pointer = NULL;
82 82
    array->size=0;
......
84 84
    array->item_size=item_size;
85 85
}
86 86

  
87
static inline void array_free(an_array* array)
87
static inline void array_free(array_t* array)
88 88
{
89 89
    if(array->pointer)
90 90
        free(array->pointer);
......
92 92
}
93 93

  
94 94
/* does not automatically grow */
95
static inline void* array_get(an_array* array,unsigned int index) {
95
static inline void* array_get(array_t* array,unsigned int index) {
96 96
    assert(index < array->next);
97 97
    return array->pointer + index * array->item_size;
98 98
}
99 99

  
100
static inline int array_ensure_allocated(an_array* array, int index)
100
static inline int array_ensure_allocated(array_t* array, int index)
101 101
{
102 102
    if((index + 1) * array->item_size > array->size) {
103 103
	int new_size = (index + 32) * array->item_size;
......
111 111
    return 0;
112 112
}
113 113

  
114
static inline void* array_get_next(an_array* array) {
114
static inline void* array_get_next(array_t* array) {
115 115
    unsigned int next = array->next;
116 116
    void* result;
117 117

  
......
124 124
    return result;
125 125
}
126 126

  
127
static inline void* array_insert(an_array* array,unsigned int index,unsigned int count) {
127
static inline void* array_insert(array_t* array,unsigned int index,unsigned int count) {
128 128
    if((array->next+count)*array->item_size>array->size) {
129 129
	int increment=count*array->item_size;
130 130
	array->pointer=qemu_realloc(array->pointer,array->size+increment);
......
141 141

  
142 142
/* this performs a "roll", so that the element which was at index_from becomes
143 143
 * index_to, but the order of all other elements is preserved. */
144
static inline int array_roll(an_array* array,int index_to,int index_from,int count)
144
static inline int array_roll(array_t* array,int index_to,int index_from,int count)
145 145
{
146 146
    char* buf;
147 147
    char* from;
......
174 174
    return 0;
175 175
}
176 176

  
177
static inline int array_remove_slice(an_array* array,int index, int count)
177
static inline int array_remove_slice(array_t* array,int index, int count)
178 178
{
179 179
    assert(index >=0);
180 180
    assert(count > 0);
......
185 185
    return 0;
186 186
}
187 187

  
188
static int array_remove(an_array* array,int index)
188
static int array_remove(array_t* array,int index)
189 189
{
190 190
    return array_remove_slice(array, index, 1);
191 191
}
192 192

  
193 193
/* return the index for a given member */
194
static int array_index(an_array* array, void* pointer)
194
static int array_index(array_t* array, void* pointer)
195 195
{
196 196
    size_t offset = (char*)pointer - array->pointer;
197 197
    assert((offset % array->item_size) == 0);
......
202 202
/* These structures are used to fake a disk and the VFAT filesystem.
203 203
 * For this reason we need to use __attribute__((packed)). */
204 204

  
205
typedef struct bootsector {
205
typedef struct bootsector_t {
206 206
    uint8_t jump[3];
207 207
    uint8_t name[8];
208 208
    uint16_t sector_size;
......
238 238
    uint8_t fat_type[8];
239 239
    uint8_t ignored[0x1c0];
240 240
    uint8_t magic[2];
241
} __attribute__((packed)) a_bootsector;
241
} __attribute__((packed)) bootsector_t;
242 242

  
243 243
typedef struct {
244 244
    uint8_t head;
245 245
    uint8_t sector;
246 246
    uint8_t cylinder;
247
} a_mbr_chs;
247
} mbr_chs_t;
248 248

  
249
typedef struct partition {
249
typedef struct partition_t {
250 250
    uint8_t attributes; /* 0x80 = bootable */
251
    a_mbr_chs start_CHS;
251
    mbr_chs_t start_CHS;
252 252
    uint8_t   fs_type; /* 0x1 = FAT12, 0x6 = FAT16, 0xe = FAT16_LBA, 0xb = FAT32, 0xc = FAT32_LBA */
253
    a_mbr_chs end_CHS;
253
    mbr_chs_t end_CHS;
254 254
    uint32_t start_sector_long;
255 255
    uint32_t length_sector_long;
256
} __attribute__((packed)) a_partition;
256
} __attribute__((packed)) partition_t;
257 257

  
258
typedef struct mbr {
258
typedef struct mbr_t {
259 259
    uint8_t ignored[0x1b8];
260 260
    uint32_t nt_id;
261 261
    uint8_t ignored2[2];
262
    a_partition partition[4];
262
    partition_t partition[4];
263 263
    uint8_t magic[2];
264
} __attribute__((packed)) a_mbr;
264
} __attribute__((packed)) mbr_t;
265 265

  
266
typedef struct direntry {
266
typedef struct direntry_t {
267 267
    uint8_t name[8];
268 268
    uint8_t extension[3];
269 269
    uint8_t attributes;
......
276 276
    uint16_t mdate;
277 277
    uint16_t begin;
278 278
    uint32_t size;
279
} __attribute__((packed)) a_direntry;
279
} __attribute__((packed)) direntry_t;
280 280

  
281 281
/* this structure are used to transparently access the files */
282 282

  
283
typedef struct mapping {
283
typedef struct mapping_t {
284 284
    /* begin is the first cluster, end is the last+1 */
285 285
    uint32_t begin,end;
286 286
    /* as s->directory is growable, no pointer may be used here */
......
308 308
	MODE_DIRECTORY = 4, MODE_FAKED = 8,
309 309
	MODE_DELETED = 16, MODE_RENAMED = 32 } mode;
310 310
    int read_only;
311
} a_mapping;
311
} mapping_t;
312 312

  
313 313
#ifdef DEBUG
314
static void print_direntry(const struct a_direntry*);
315
static void print_mapping(const struct a_mapping* mapping);
314
static void print_direntry(const struct direntry_t*);
315
static void print_mapping(const struct mapping_t* mapping);
316 316
#endif
317 317

  
318 318
/* here begins the real VVFAT driver */
......
323 323
    unsigned char first_sectors[0x40*0x200];
324 324

  
325 325
    int fat_type; /* 16 or 32 */
326
    an_array fat,directory,mapping;
326
    array_t fat,directory,mapping;
327 327

  
328 328
    unsigned int cluster_size;
329 329
    unsigned int sectors_per_cluster;
......
336 336
    uint32_t max_fat_value;
337 337

  
338 338
    int current_fd;
339
    a_mapping* current_mapping;
339
    mapping_t* current_mapping;
340 340
    unsigned char* cluster; /* points to current cluster */
341 341
    unsigned char* cluster_buffer; /* points to a buffer to hold temp data */
342 342
    unsigned int current_cluster;
......
347 347
    BlockDriverState* qcow;
348 348
    void* fat2;
349 349
    char* used_clusters;
350
    an_array commits;
350
    array_t commits;
351 351
    const char* path;
352 352
    int downcase_short_names;
353 353
} BDRVVVFATState;
......
356 356
 * if the position is outside the specified geometry, fill maximum value for CHS
357 357
 * and return 1 to signal overflow.
358 358
 */
359
static int sector2CHS(BlockDriverState* bs, a_mbr_chs * chs, int spos){
359
static int sector2CHS(BlockDriverState* bs, mbr_chs_t * chs, int spos){
360 360
    int head,sector;
361 361
    sector   = spos % (bs->secs);  spos/= bs->secs;
362 362
    head     = spos % (bs->heads); spos/= bs->heads;
......
378 378
static void init_mbr(BDRVVVFATState* s)
379 379
{
380 380
    /* TODO: if the files mbr.img and bootsect.img exist, use them */
381
    a_mbr* real_mbr=(a_mbr*)s->first_sectors;
382
    a_partition* partition = &(real_mbr->partition[0]);
381
    mbr_t* real_mbr=(mbr_t*)s->first_sectors;
382
    partition_t* partition = &(real_mbr->partition[0]);
383 383
    int lba;
384 384

  
385 385
    memset(s->first_sectors,0,512);
......
425 425
    return len;
426 426
}
427 427

  
428
static inline a_direntry* create_long_filename(BDRVVVFATState* s,const char* filename)
428
static inline direntry_t* create_long_filename(BDRVVVFATState* s,const char* filename)
429 429
{
430 430
    char buffer[258];
431 431
    int length=short2long_name(buffer,filename),
432 432
        number_of_entries=(length+25)/26,i;
433
    a_direntry* entry;
433
    direntry_t* entry;
434 434

  
435 435
    for(i=0;i<number_of_entries;i++) {
436 436
	entry=array_get_next(&(s->directory));
......
450 450
    return array_get(&(s->directory),s->directory.next-number_of_entries);
451 451
}
452 452

  
453
static char is_free(const a_direntry* direntry)
453
static char is_free(const direntry_t* direntry)
454 454
{
455 455
    return direntry->name[0]==0xe5 || direntry->name[0]==0x00;
456 456
}
457 457

  
458
static char is_volume_label(const a_direntry* direntry)
458
static char is_volume_label(const direntry_t* direntry)
459 459
{
460 460
    return direntry->attributes == 0x28;
461 461
}
462 462

  
463
static char is_long_name(const a_direntry* direntry)
463
static char is_long_name(const direntry_t* direntry)
464 464
{
465 465
    return direntry->attributes == 0xf;
466 466
}
467 467

  
468
static char is_short_name(const a_direntry* direntry)
468
static char is_short_name(const direntry_t* direntry)
469 469
{
470 470
    return !is_volume_label(direntry) && !is_long_name(direntry)
471 471
	&& !is_free(direntry);
472 472
}
473 473

  
474
static char is_directory(const a_direntry* direntry)
474
static char is_directory(const direntry_t* direntry)
475 475
{
476 476
    return direntry->attributes & 0x10 && direntry->name[0] != 0xe5;
477 477
}
478 478

  
479
static inline char is_dot(const a_direntry* direntry)
479
static inline char is_dot(const direntry_t* direntry)
480 480
{
481 481
    return is_short_name(direntry) && direntry->name[0] == '.';
482 482
}
483 483

  
484
static char is_file(const a_direntry* direntry)
484
static char is_file(const direntry_t* direntry)
485 485
{
486 486
    return is_short_name(direntry) && !is_directory(direntry);
487 487
}
488 488

  
489
static inline uint32_t begin_of_direntry(const a_direntry* direntry)
489
static inline uint32_t begin_of_direntry(const direntry_t* direntry)
490 490
{
491 491
    return le16_to_cpu(direntry->begin)|(le16_to_cpu(direntry->begin_hi)<<16);
492 492
}
493 493

  
494
static inline uint32_t filesize_of_direntry(const a_direntry* direntry)
494
static inline uint32_t filesize_of_direntry(const direntry_t* direntry)
495 495
{
496 496
    return le32_to_cpu(direntry->size);
497 497
}
498 498

  
499
static void set_begin_of_direntry(a_direntry* direntry, uint32_t begin)
499
static void set_begin_of_direntry(direntry_t* direntry, uint32_t begin)
500 500
{
501 501
    direntry->begin = cpu_to_le16(begin & 0xffff);
502 502
    direntry->begin_hi = cpu_to_le16((begin >> 16) & 0xffff);
......
504 504

  
505 505
/* fat functions */
506 506

  
507
static inline uint8_t fat_chksum(const a_direntry* entry)
507
static inline uint8_t fat_chksum(const direntry_t* entry)
508 508
{
509 509
    uint8_t chksum=0;
510 510
    int i;
......
603 603

  
604 604
/* TODO: in create_short_filename, 0xe5->0x05 is not yet handled! */
605 605
/* TODO: in parse_short_filename, 0x05->0xe5 is not yet handled! */
606
static inline a_direntry* create_short_and_long_name(BDRVVVFATState* s,
606
static inline direntry_t* create_short_and_long_name(BDRVVVFATState* s,
607 607
	unsigned int directory_start, const char* filename, int is_dot)
608 608
{
609 609
    int i,j,long_index=s->directory.next;
610
    a_direntry* entry = NULL;
611
    a_direntry* entry_long = NULL;
610
    direntry_t* entry = NULL;
611
    direntry_t* entry_long = NULL;
612 612

  
613 613
    if(is_dot) {
614 614
	entry=array_get_next(&(s->directory));
......
646 646

  
647 647
    /* mangle duplicates */
648 648
    while(1) {
649
	a_direntry* entry1=array_get(&(s->directory),directory_start);
649
	direntry_t* entry1=array_get(&(s->directory),directory_start);
650 650
	int j;
651 651

  
652 652
	for(;entry1<entry;entry1++)
......
693 693
 */
694 694
static int read_directory(BDRVVVFATState* s, int mapping_index)
695 695
{
696
    a_mapping* mapping = array_get(&(s->mapping), mapping_index);
697
    a_direntry* direntry;
696
    mapping_t* mapping = array_get(&(s->mapping), mapping_index);
697
    direntry_t* direntry;
698 698
    const char* dirname = mapping->path;
699 699
    int first_cluster = mapping->begin;
700 700
    int parent_index = mapping->info.dir.parent_mapping_index;
701
    a_mapping* parent_mapping = (a_mapping*)
701
    mapping_t* parent_mapping = (mapping_t*)
702 702
        (parent_index >= 0 ? array_get(&(s->mapping), parent_index) : NULL);
703 703
    int first_cluster_of_parent = parent_mapping ? parent_mapping->begin : -1;
704 704

  
......
720 720
    while((entry=readdir(dir))) {
721 721
	unsigned int length=strlen(dirname)+2+strlen(entry->d_name);
722 722
        char* buffer;
723
	a_direntry* direntry;
723
	direntry_t* direntry;
724 724
        struct stat st;
725 725
	int is_dot=!strcmp(entry->d_name,".");
726 726
	int is_dotdot=!strcmp(entry->d_name,"..");
......
762 762

  
763 763
	/* create mapping for this file */
764 764
	if(!is_dot && !is_dotdot && (S_ISDIR(st.st_mode) || st.st_size)) {
765
	    s->current_mapping=(a_mapping*)array_get_next(&(s->mapping));
765
	    s->current_mapping=(mapping_t*)array_get_next(&(s->mapping));
766 766
	    s->current_mapping->begin=0;
767 767
	    s->current_mapping->end=st.st_size;
768 768
	    /*
......
788 788

  
789 789
    /* fill with zeroes up to the end of the cluster */
790 790
    while(s->directory.next%(0x10*s->sectors_per_cluster)) {
791
	a_direntry* direntry=array_get_next(&(s->directory));
792
	memset(direntry,0,sizeof(a_direntry));
791
	direntry_t* direntry=array_get_next(&(s->directory));
792
	memset(direntry,0,sizeof(direntry_t));
793 793
    }
794 794

  
795 795
/* TODO: if there are more entries, bootsector has to be adjusted! */
......
799 799
	int cur = s->directory.next;
800 800
	array_ensure_allocated(&(s->directory), ROOT_ENTRIES - 1);
801 801
	memset(array_get(&(s->directory), cur), 0,
802
		(ROOT_ENTRIES - cur) * sizeof(a_direntry));
802
		(ROOT_ENTRIES - cur) * sizeof(direntry_t));
803 803
    }
804 804

  
805 805
     /* reget the mapping, since s->mapping was possibly realloc()ed */
806
    mapping = (a_mapping*)array_get(&(s->mapping), mapping_index);
806
    mapping = (mapping_t*)array_get(&(s->mapping), mapping_index);
807 807
    first_cluster += (s->directory.next - mapping->info.dir.first_dir_index)
808 808
	* 0x20 / s->cluster_size;
809 809
    mapping->end = first_cluster;
810 810

  
811
    direntry = (a_direntry*)array_get(&(s->directory), mapping->dir_index);
811
    direntry = (direntry_t*)array_get(&(s->directory), mapping->dir_index);
812 812
    set_begin_of_direntry(direntry, mapping->begin);
813 813

  
814 814
    return 0;
......
830 830
}
831 831

  
832 832
#ifdef DBG
833
static a_direntry* get_direntry_for_mapping(BDRVVVFATState* s,a_mapping* mapping)
833
static direntry_t* get_direntry_for_mapping(BDRVVVFATState* s,mapping_t* mapping)
834 834
{
835 835
    if(mapping->mode==MODE_UNDEFINED)
836 836
	return 0;
837
    return (a_direntry*)(s->directory.pointer+sizeof(a_direntry)*mapping->dir_index);
837
    return (direntry_t*)(s->directory.pointer+sizeof(direntry_t)*mapping->dir_index);
838 838
}
839 839
#endif
840 840

  
841 841
static int init_directories(BDRVVVFATState* s,
842 842
	const char* dirname)
843 843
{
844
    a_bootsector* bootsector;
845
    a_mapping* mapping;
844
    bootsector_t* bootsector;
845
    mapping_t* mapping;
846 846
    unsigned int i;
847 847
    unsigned int cluster;
848 848

  
......
861 861
    i = 1+s->sectors_per_cluster*0x200*8/s->fat_type;
862 862
    s->sectors_per_fat=(s->sector_count+i)/i; /* round up */
863 863

  
864
    array_init(&(s->mapping),sizeof(a_mapping));
865
    array_init(&(s->directory),sizeof(a_direntry));
864
    array_init(&(s->mapping),sizeof(mapping_t));
865
    array_init(&(s->directory),sizeof(direntry_t));
866 866

  
867 867
    /* add volume label */
868 868
    {
869
	a_direntry* entry=array_get_next(&(s->directory));
869
	direntry_t* entry=array_get_next(&(s->directory));
870 870
	entry->attributes=0x28; /* archive | volume label */
871 871
	snprintf((char*)entry->name,11,"QEMU VVFAT");
872 872
    }
......
910 910
	    mapping->mode=MODE_NORMAL;
911 911
	    mapping->begin = cluster;
912 912
	    if (mapping->end > 0) {
913
		a_direntry* direntry = array_get(&(s->directory),
913
		direntry_t* direntry = array_get(&(s->directory),
914 914
			mapping->dir_index);
915 915

  
916 916
		mapping->end = cluster + 1 + (mapping->end-1)/s->cluster_size;
......
954 954

  
955 955
    s->current_mapping = NULL;
956 956

  
957
    bootsector=(a_bootsector*)(s->first_sectors+(s->first_sectors_number-1)*0x200);
957
    bootsector=(bootsector_t*)(s->first_sectors+(s->first_sectors_number-1)*0x200);
958 958
    bootsector->jump[0]=0xeb;
959 959
    bootsector->jump[1]=0x3e;
960 960
    bootsector->jump[2]=0x90;
......
1100 1100
{
1101 1101
    int index3=index1+1;
1102 1102
    while(1) {
1103
	a_mapping* mapping;
1103
	mapping_t* mapping;
1104 1104
	index3=(index1+index2)/2;
1105 1105
	mapping=array_get(&(s->mapping),index3);
1106 1106
	assert(mapping->begin < mapping->end);
......
1123 1123
    }
1124 1124
}
1125 1125

  
1126
static inline a_mapping* find_mapping_for_cluster(BDRVVVFATState* s,int cluster_num)
1126
static inline mapping_t* find_mapping_for_cluster(BDRVVVFATState* s,int cluster_num)
1127 1127
{
1128 1128
    int index=find_mapping_for_cluster_aux(s,cluster_num,0,s->mapping.next);
1129
    a_mapping* mapping;
1129
    mapping_t* mapping;
1130 1130
    if(index>=s->mapping.next)
1131 1131
        return NULL;
1132 1132
    mapping=array_get(&(s->mapping),index);
......
1140 1140
 * This function simply compares path == mapping->path. Since the mappings
1141 1141
 * are sorted by cluster, this is expensive: O(n).
1142 1142
 */
1143
static inline a_mapping* find_mapping_for_path(BDRVVVFATState* s,
1143
static inline mapping_t* find_mapping_for_path(BDRVVVFATState* s,
1144 1144
	const char* path)
1145 1145
{
1146 1146
    int i;
1147 1147

  
1148 1148
    for (i = 0; i < s->mapping.next; i++) {
1149
	a_mapping* mapping = array_get(&(s->mapping), i);
1149
	mapping_t* mapping = array_get(&(s->mapping), i);
1150 1150
	if (mapping->first_mapping_index < 0 &&
1151 1151
		!strcmp(path, mapping->path))
1152 1152
	    return mapping;
......
1155 1155
    return NULL;
1156 1156
}
1157 1157

  
1158
static int open_file(BDRVVVFATState* s,a_mapping* mapping)
1158
static int open_file(BDRVVVFATState* s,mapping_t* mapping)
1159 1159
{
1160 1160
    if(!mapping)
1161 1161
	return -1;
......
1182 1182
		|| s->current_mapping->begin>cluster_num
1183 1183
		|| s->current_mapping->end<=cluster_num) {
1184 1184
	    /* binary search of mappings for file */
1185
	    a_mapping* mapping=find_mapping_for_cluster(s,cluster_num);
1185
	    mapping_t* mapping=find_mapping_for_cluster(s,cluster_num);
1186 1186

  
1187 1187
	    assert(!mapping || (cluster_num>=mapping->begin && cluster_num<mapping->end));
1188 1188

  
......
1238 1238
    }
1239 1239
}
1240 1240

  
1241
static void print_direntry(const a_direntry* direntry)
1241
static void print_direntry(const direntry_t* direntry)
1242 1242
{
1243 1243
    int j = 0;
1244 1244
    char buffer[1024];
......
1270 1270
    }
1271 1271
}
1272 1272

  
1273
static void print_mapping(const a_mapping* mapping)
1273
static void print_mapping(const mapping_t* mapping)
1274 1274
{
1275 1275
    fprintf(stderr, "mapping (0x%x): begin, end = %d, %d, dir_index = %d, first_mapping_index = %d, name = %s, mode = 0x%x, " , (int)mapping, mapping->begin, mapping->end, mapping->dir_index, mapping->first_mapping_index, mapping->path, mapping->mode);
1276 1276
    if (mapping->mode & MODE_DIRECTORY)
......
1346 1346
 *
1347 1347
 */
1348 1348

  
1349
typedef struct commit {
1349
typedef struct commit_t {
1350 1350
    char* path;
1351 1351
    union {
1352 1352
	struct { uint32_t cluster; } rename;
......
1358 1358
    enum {
1359 1359
	ACTION_RENAME, ACTION_WRITEOUT, ACTION_NEW_FILE, ACTION_MKDIR
1360 1360
    } action;
1361
} a_commit;
1361
} commit_t;
1362 1362

  
1363 1363
static void clear_commits(BDRVVVFATState* s)
1364 1364
{
1365 1365
    int i;
1366 1366
DLOG(fprintf(stderr, "clear_commits (%d commits)\n", s->commits.next));
1367 1367
    for (i = 0; i < s->commits.next; i++) {
1368
	a_commit* commit = array_get(&(s->commits), i);
1368
	commit_t* commit = array_get(&(s->commits), i);
1369 1369
	assert(commit->path || commit->action == ACTION_WRITEOUT);
1370 1370
	if (commit->action != ACTION_WRITEOUT) {
1371 1371
	    assert(commit->path);
......
1379 1379
static void schedule_rename(BDRVVVFATState* s,
1380 1380
	uint32_t cluster, char* new_path)
1381 1381
{
1382
    a_commit* commit = array_get_next(&(s->commits));
1382
    commit_t* commit = array_get_next(&(s->commits));
1383 1383
    commit->path = new_path;
1384 1384
    commit->param.rename.cluster = cluster;
1385 1385
    commit->action = ACTION_RENAME;
......
1388 1388
static void schedule_writeout(BDRVVVFATState* s,
1389 1389
	int dir_index, uint32_t modified_offset)
1390 1390
{
1391
    a_commit* commit = array_get_next(&(s->commits));
1391
    commit_t* commit = array_get_next(&(s->commits));
1392 1392
    commit->path = NULL;
1393 1393
    commit->param.writeout.dir_index = dir_index;
1394 1394
    commit->param.writeout.modified_offset = modified_offset;
......
1398 1398
static void schedule_new_file(BDRVVVFATState* s,
1399 1399
	char* path, uint32_t first_cluster)
1400 1400
{
1401
    a_commit* commit = array_get_next(&(s->commits));
1401
    commit_t* commit = array_get_next(&(s->commits));
1402 1402
    commit->path = path;
1403 1403
    commit->param.new_file.first_cluster = first_cluster;
1404 1404
    commit->action = ACTION_NEW_FILE;
......
1406 1406

  
1407 1407
static void schedule_mkdir(BDRVVVFATState* s, uint32_t cluster, char* path)
1408 1408
{
1409
    a_commit* commit = array_get_next(&(s->commits));
1409
    commit_t* commit = array_get_next(&(s->commits));
1410 1410
    commit->path = path;
1411 1411
    commit->param.mkdir.cluster = cluster;
1412 1412
    commit->action = ACTION_MKDIR;
......
1431 1431

  
1432 1432
/* return 0 if parsed successfully, > 0 if no long name, < 0 if error */
1433 1433
static int parse_long_name(long_file_name* lfn,
1434
	const a_direntry* direntry)
1434
	const direntry_t* direntry)
1435 1435
{
1436 1436
    int i, j, offset;
1437 1437
    const unsigned char* pointer = (const unsigned char*)direntry;
......
1474 1474

  
1475 1475
/* returns 0 if successful, >0 if no short_name, and <0 on error */
1476 1476
static int parse_short_name(BDRVVVFATState* s,
1477
	long_file_name* lfn, a_direntry* direntry)
1477
	long_file_name* lfn, direntry_t* direntry)
1478 1478
{
1479 1479
    int i, j;
1480 1480

  
......
1566 1566
 */
1567 1567
typedef enum {
1568 1568
     USED_DIRECTORY = 1, USED_FILE = 2, USED_ANY = 3, USED_ALLOCATED = 4
1569
} e_used;
1569
} used_t;
1570 1570

  
1571 1571
/*
1572 1572
 * get_cluster_count_for_direntry() not only determines how many clusters
......
1579 1579
 * assumed to be *not* deleted (and *only* those).
1580 1580
 */
1581 1581
static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s,
1582
	a_direntry* direntry, const char* path)
1582
	direntry_t* direntry, const char* path)
1583 1583
{
1584 1584
    /*
1585 1585
     * This is a little bit tricky:
......
1605 1605
    uint32_t cluster_num = begin_of_direntry(direntry);
1606 1606
    uint32_t offset = 0;
1607 1607
    int first_mapping_index = -1;
1608
    a_mapping* mapping = NULL;
1608
    mapping_t* mapping = NULL;
1609 1609
    const char* basename2 = NULL;
1610 1610

  
1611 1611
    vvfat_close_current_file(s);
......
1730 1730
{
1731 1731
    int ret = 0;
1732 1732
    unsigned char* cluster = qemu_malloc(s->cluster_size);
1733
    a_direntry* direntries = (a_direntry*)cluster;
1734
    a_mapping* mapping = find_mapping_for_cluster(s, cluster_num);
1733
    direntry_t* direntries = (direntry_t*)cluster;
1734
    mapping_t* mapping = find_mapping_for_cluster(s, cluster_num);
1735 1735

  
1736 1736
    long_file_name lfn;
1737 1737
    int path_len = strlen(path);
......
1889 1889
     * (check_directory_consistency() will unmark those still present). */
1890 1890
    if (s->qcow)
1891 1891
	for (i = 0; i < s->mapping.next; i++) {
1892
	    a_mapping* mapping = array_get(&(s->mapping), i);
1892
	    mapping_t* mapping = array_get(&(s->mapping), i);
1893 1893
	    if (mapping->first_mapping_index < 0)
1894 1894
		mapping->mode |= MODE_DELETED;
1895 1895
	}
......
1929 1929
    int i;
1930 1930

  
1931 1931
    for (i = 0; i < s->mapping.next; i++) {
1932
	a_mapping* mapping = array_get(&(s->mapping), i);
1932
	mapping_t* mapping = array_get(&(s->mapping), i);
1933 1933

  
1934 1934
#define ADJUST_MAPPING_INDEX(name) \
1935 1935
	if (mapping->name >= offset) \
......
1942 1942
}
1943 1943

  
1944 1944
/* insert or update mapping */
1945
static a_mapping* insert_mapping(BDRVVVFATState* s,
1945
static mapping_t* insert_mapping(BDRVVVFATState* s,
1946 1946
	uint32_t begin, uint32_t end)
1947 1947
{
1948 1948
    /*
......
1953 1953
     * - replace name
1954 1954
     */
1955 1955
    int index = find_mapping_for_cluster_aux(s, begin, 0, s->mapping.next);
1956
    a_mapping* mapping = NULL;
1957
    a_mapping* first_mapping = array_get(&(s->mapping), 0);
1956
    mapping_t* mapping = NULL;
1957
    mapping_t* first_mapping = array_get(&(s->mapping), 0);
1958 1958

  
1959 1959
    if (index < s->mapping.next && (mapping = array_get(&(s->mapping), index))
1960 1960
	    && mapping->begin < begin) {
......
1971 1971
    mapping->begin = begin;
1972 1972
    mapping->end = end;
1973 1973

  
1974
DLOG(a_mapping* next_mapping;
1974
DLOG(mapping_t* next_mapping;
1975 1975
assert(index + 1 >= s->mapping.next ||
1976 1976
((next_mapping = array_get(&(s->mapping), index + 1)) &&
1977 1977
 next_mapping->begin >= end)));
1978 1978

  
1979
    if (s->current_mapping && first_mapping != (a_mapping*)s->mapping.pointer)
1979
    if (s->current_mapping && first_mapping != (mapping_t*)s->mapping.pointer)
1980 1980
	s->current_mapping = array_get(&(s->mapping),
1981 1981
		s->current_mapping - first_mapping);
1982 1982

  
......
1985 1985

  
1986 1986
static int remove_mapping(BDRVVVFATState* s, int mapping_index)
1987 1987
{
1988
    a_mapping* mapping = array_get(&(s->mapping), mapping_index);
1989
    a_mapping* first_mapping = array_get(&(s->mapping), 0);
1988
    mapping_t* mapping = array_get(&(s->mapping), mapping_index);
1989
    mapping_t* first_mapping = array_get(&(s->mapping), 0);
1990 1990

  
1991 1991
    /* free mapping */
1992 1992
    if (mapping->first_mapping_index < 0)
......
1998 1998
    /* adjust all references to mappings */
1999 1999
    adjust_mapping_indices(s, mapping_index, -1);
2000 2000

  
2001
    if (s->current_mapping && first_mapping != (a_mapping*)s->mapping.pointer)
2001
    if (s->current_mapping && first_mapping != (mapping_t*)s->mapping.pointer)
2002 2002
	s->current_mapping = array_get(&(s->mapping),
2003 2003
		s->current_mapping - first_mapping);
2004 2004

  
......
2009 2009
{
2010 2010
    int i;
2011 2011
    for (i = 0; i < s->mapping.next; i++) {
2012
	a_mapping* mapping = array_get(&(s->mapping), i);
2012
	mapping_t* mapping = array_get(&(s->mapping), i);
2013 2013
	if (mapping->dir_index >= offset)
2014 2014
	    mapping->dir_index += adjust;
2015 2015
	if ((mapping->mode & MODE_DIRECTORY) &&
......
2018 2018
    }
2019 2019
}
2020 2020

  
2021
static a_direntry* insert_direntries(BDRVVVFATState* s,
2021
static direntry_t* insert_direntries(BDRVVVFATState* s,
2022 2022
	int dir_index, int count)
2023 2023
{
2024 2024
    /*
2025 2025
     * make room in s->directory,
2026 2026
     * adjust_dirindices
2027 2027
     */
2028
    a_direntry* result = array_insert(&(s->directory), dir_index, count);
2028
    direntry_t* result = array_insert(&(s->directory), dir_index, count);
2029 2029
    if (result == NULL)
2030 2030
	return NULL;
2031 2031
    adjust_dirindices(s, dir_index, count);
......
2050 2050
static int commit_mappings(BDRVVVFATState* s,
2051 2051
	uint32_t first_cluster, int dir_index)
2052 2052
{
2053
    a_mapping* mapping = find_mapping_for_cluster(s, first_cluster);
2054
    a_direntry* direntry = array_get(&(s->directory), dir_index);
2053
    mapping_t* mapping = find_mapping_for_cluster(s, first_cluster);
2054
    direntry_t* direntry = array_get(&(s->directory), dir_index);
2055 2055
    uint32_t cluster = first_cluster;
2056 2056

  
2057 2057
    vvfat_close_current_file(s);
......
2083 2083

  
2084 2084
	if (!fat_eof(s, c1)) {
2085 2085
	    int i = find_mapping_for_cluster_aux(s, c1, 0, s->mapping.next);
2086
	    a_mapping* next_mapping = i >= s->mapping.next ? NULL :
2086
	    mapping_t* next_mapping = i >= s->mapping.next ? NULL :
2087 2087
		array_get(&(s->mapping), i);
2088 2088

  
2089 2089
	    if (next_mapping == NULL || next_mapping->begin > c1) {
......
2127 2127
static int commit_direntries(BDRVVVFATState* s,
2128 2128
	int dir_index, int parent_mapping_index)
2129 2129
{
2130
    a_direntry* direntry = array_get(&(s->directory), dir_index);
2130
    direntry_t* direntry = array_get(&(s->directory), dir_index);
2131 2131
    uint32_t first_cluster = dir_index == 0 ? 0 : begin_of_direntry(direntry);
2132
    a_mapping* mapping = find_mapping_for_cluster(s, first_cluster);
2132
    mapping_t* mapping = find_mapping_for_cluster(s, first_cluster);
2133 2133

  
2134 2134
    int factor = 0x10 * s->sectors_per_cluster;
2135 2135
    int old_cluster_count, new_cluster_count;
......
2207 2207
static int commit_one_file(BDRVVVFATState* s,
2208 2208
	int dir_index, uint32_t offset)
2209 2209
{
2210
    a_direntry* direntry = array_get(&(s->directory), dir_index);
2210
    direntry_t* direntry = array_get(&(s->directory), dir_index);
2211 2211
    uint32_t c = begin_of_direntry(direntry);
2212 2212
    uint32_t first_cluster = c;
2213
    a_mapping* mapping = find_mapping_for_cluster(s, c);
2213
    mapping_t* mapping = find_mapping_for_cluster(s, c);
2214 2214
    uint32_t size = filesize_of_direntry(direntry);
2215 2215
    char* cluster = qemu_malloc(s->cluster_size);
2216 2216
    uint32_t i;
......
2268 2268
{
2269 2269
    int i;
2270 2270
    for (i = 0; i < s->mapping.next; i++) {
2271
	a_mapping* mapping = array_get(&(s->mapping), i);
2271
	mapping_t* mapping = array_get(&(s->mapping), i);
2272 2272
	if (mapping->mode & MODE_DELETED) {
2273 2273
	    fprintf(stderr, "deleted\n");
2274 2274
	    continue;
2275 2275
	}
2276 2276
	assert(mapping->dir_index >= 0);
2277 2277
	assert(mapping->dir_index < s->directory.next);
2278
	a_direntry* direntry = array_get(&(s->directory), mapping->dir_index);
2278
	direntry_t* direntry = array_get(&(s->directory), mapping->dir_index);
2279 2279
	assert(mapping->begin == begin_of_direntry(direntry) || mapping->first_mapping_index >= 0);
2280 2280
	if (mapping->mode & MODE_DIRECTORY) {
2281 2281
	    assert(mapping->info.dir.first_dir_index + 0x10 * s->sectors_per_cluster * (mapping->end - mapping->begin) <= s->directory.next);
......
2291 2291
    int first_mapping = -1;
2292 2292

  
2293 2293
    for (i = 0; i < s->directory.next; i++) {
2294
	a_direntry* direntry = array_get(&(s->directory), i);
2294
	direntry_t* direntry = array_get(&(s->directory), i);
2295 2295

  
2296 2296
	if (is_short_name(direntry) && begin_of_direntry(direntry)) {
2297
	    a_mapping* mapping = find_mapping_for_cluster(s, begin_of_direntry(direntry));
2297
	    mapping_t* mapping = find_mapping_for_cluster(s, begin_of_direntry(direntry));
2298 2298
	    assert(mapping);
2299 2299
	    assert(mapping->dir_index == i || is_dot(direntry));
2300 2300
	    assert(mapping->begin == begin_of_direntry(direntry) || is_dot(direntry));
......
2305 2305
	    int j, count = 0;
2306 2306

  
2307 2307
	    for (j = 0; j < s->mapping.next; j++) {
2308
		a_mapping* mapping = array_get(&(s->mapping), j);
2308
		mapping_t* mapping = array_get(&(s->mapping), j);
2309 2309
		if (mapping->mode & MODE_DELETED)
2310 2310
		    continue;
2311 2311
		if (mapping->mode & MODE_DIRECTORY) {
......
2318 2318
			if (mapping->info.dir.parent_mapping_index < 0)
2319 2319
			    assert(j == 0);
2320 2320
			else {
2321
			    a_mapping* parent = array_get(&(s->mapping), mapping->info.dir.parent_mapping_index);
2321
			    mapping_t* parent = array_get(&(s->mapping), mapping->info.dir.parent_mapping_index);
2322 2322
			    assert(parent->mode & MODE_DIRECTORY);
2323 2323
			    assert(parent->info.dir.first_dir_index < mapping->info.dir.first_dir_index);
2324 2324
			}
......
2339 2339
#ifdef DEBUG
2340 2340
    fprintf(stderr, "handle_renames\n");
2341 2341
    for (i = 0; i < s->commits.next; i++) {
2342
	a_commit* commit = array_get(&(s->commits), i);
2342
	commit_t* commit = array_get(&(s->commits), i);
2343 2343
	fprintf(stderr, "%d, %s (%d, %d)\n", i, commit->path ? commit->path : "(null)", commit->param.rename.cluster, commit->action);
2344 2344
    }
2345 2345
#endif
2346 2346

  
2347 2347
    for (i = 0; i < s->commits.next;) {
2348
	a_commit* commit = array_get(&(s->commits), i);
2348
	commit_t* commit = array_get(&(s->commits), i);
2349 2349
	if (commit->action == ACTION_RENAME) {
2350
	    a_mapping* mapping = find_mapping_for_cluster(s,
2350
	    mapping_t* mapping = find_mapping_for_cluster(s,
2351 2351
		    commit->param.rename.cluster);
2352 2352
	    char* old_path = mapping->path;
2353 2353

  
......
2360 2360
		int l1 = strlen(mapping->path);
2361 2361
		int l2 = strlen(old_path);
2362 2362
		int diff = l1 - l2;
2363
		a_direntry* direntry = array_get(&(s->directory),
2363
		direntry_t* direntry = array_get(&(s->directory),
2364 2364
			mapping->info.dir.first_dir_index);
2365 2365
		uint32_t c = mapping->begin;
2366 2366
		int i = 0;
......
2368 2368
		/* recurse */
2369 2369
		while (!fat_eof(s, c)) {
2370 2370
		    do {
2371
			a_direntry* d = direntry + i;
2371
			direntry_t* d = direntry + i;
2372 2372

  
2373 2373
			if (is_file(d) || (is_directory(d) && !is_dot(d))) {
2374
			    a_mapping* m = find_mapping_for_cluster(s,
2374
			    mapping_t* m = find_mapping_for_cluster(s,
2375 2375
				    begin_of_direntry(d));
2376 2376
			    int l = strlen(m->path);
2377 2377
			    char* new_path = qemu_malloc(l + diff + 1);
......
2394 2394
	    array_remove(&(s->commits), i);
2395 2395
	    continue;
2396 2396
	} else if (commit->action == ACTION_MKDIR) {
2397
	    a_mapping* mapping;
2397
	    mapping_t* mapping;
2398 2398
	    int j, parent_path_len;
2399 2399

  
2400 2400
#ifdef __MINGW32__
......
2422 2422
	    parent_path_len = strlen(commit->path)
2423 2423
		- strlen(get_basename(commit->path)) - 1;
2424 2424
	    for (j = 0; j < s->mapping.next; j++) {
2425
		a_mapping* m = array_get(&(s->mapping), j);
2425
		mapping_t* m = array_get(&(s->mapping), j);
2426 2426
		if (m->first_mapping_index < 0 && m != mapping &&
2427 2427
			!strncmp(m->path, mapping->path, parent_path_len) &&
2428 2428
			strlen(m->path) == parent_path_len)
......
2450 2450
    vvfat_close_current_file(s);
2451 2451

  
2452 2452
    for (i = 0; !fail && i < s->commits.next; i++) {
2453
	a_commit* commit = array_get(&(s->commits), i);
2453
	commit_t* commit = array_get(&(s->commits), i);
2454 2454
	switch(commit->action) {
2455 2455
	case ACTION_RENAME: case ACTION_MKDIR:
2456 2456
	    assert(0);
2457 2457
	    fail = -2;
2458 2458
	    break;
2459 2459
	case ACTION_WRITEOUT: {
2460
	    a_direntry* entry = array_get(&(s->directory),
2460
	    direntry_t* entry = array_get(&(s->directory),
2461 2461
		    commit->param.writeout.dir_index);
2462 2462
	    uint32_t begin = begin_of_direntry(entry);
2463
	    a_mapping* mapping = find_mapping_for_cluster(s, begin);
2463
	    mapping_t* mapping = find_mapping_for_cluster(s, begin);
2464 2464

  
2465 2465
	    assert(mapping);
2466 2466
	    assert(mapping->begin == begin);
......
2474 2474
	}
2475 2475
	case ACTION_NEW_FILE: {
2476 2476
	    int begin = commit->param.new_file.first_cluster;
2477
	    a_mapping* mapping = find_mapping_for_cluster(s, begin);
2478
	    a_direntry* entry;
2477
	    mapping_t* mapping = find_mapping_for_cluster(s, begin);
2478
	    direntry_t* entry;
2479 2479
	    int i;
2480 2480

  
2481 2481
	    /* find direntry */
......
2530 2530
	deleted = 0;
2531 2531

  
2532 2532
	for (i = 1; i < s->mapping.next; i++) {
2533
	    a_mapping* mapping = array_get(&(s->mapping), i);
2533
	    mapping_t* mapping = array_get(&(s->mapping), i);
2534 2534
	    if (mapping->mode & MODE_DELETED) {
2535
		a_direntry* entry = array_get(&(s->directory),
2535
		direntry_t* entry = array_get(&(s->directory),
2536 2536
			mapping->dir_index);
2537 2537

  
2538 2538
		if (is_free(entry)) {
......
2550 2550
			}
2551 2551

  
2552 2552
			for (j = 1; j < s->mapping.next; j++) {
2553
			    a_mapping* m = array_get(&(s->mapping), j);
2553
			    mapping_t* m = array_get(&(s->mapping), j);
2554 2554
			    if (m->mode & MODE_DIRECTORY &&
2555 2555
				    m->info.dir.first_dir_index >
2556 2556
				    first_dir_index &&
......
2666 2666

  
2667 2667
    for (i = sector2cluster(s, sector_num);
2668 2668
	    i <= sector2cluster(s, sector_num + nb_sectors - 1);) {
2669
	a_mapping* mapping = find_mapping_for_cluster(s, i);
2669
	mapping_t* mapping = find_mapping_for_cluster(s, i);
2670 2670
	if (mapping) {
2671 2671
	    if (mapping->read_only) {
2672 2672
		fprintf(stderr, "Tried to write to write-protected file %s\n",
......
2678 2678
		int begin = cluster2sector(s, i);
2679 2679
		int end = begin + s->sectors_per_cluster, k;
2680 2680
		int dir_index;
2681
		const a_direntry* direntries;
2681
		const direntry_t* direntries;
2682 2682
		long_file_name lfn;
2683 2683

  
2684 2684
		lfn_init(&lfn);
......
2689 2689
		    end = sector_num + nb_sectors;
2690 2690
		dir_index  = mapping->dir_index +
2691 2691
		    0x10 * (begin - mapping->begin * s->sectors_per_cluster);
2692
		direntries = (a_direntry*)(buf + 0x200 * (begin - sector_num));
2692
		direntries = (direntry_t*)(buf + 0x200 * (begin - sector_num));
2693 2693

  
2694 2694
		for (k = 0; k < (end - begin) * 0x10; k++) {
2695 2695
		    /* do not allow non-ASCII filenames */
......
2702 2702
			    (direntries[k].attributes & 1)) {
2703 2703
			if (memcmp(direntries + k,
2704 2704
				    array_get(&(s->directory), dir_index + k),
2705
				    sizeof(a_direntry))) {
2705
				    sizeof(direntry_t))) {
2706 2706
			    fprintf(stderr, "Warning: tried to write to write-protected file\n");
2707 2707
			    return -1;
2708 2708
			}
......
2774 2774
    int size = sector2cluster(s, s->sector_count);
2775 2775
    s->used_clusters = calloc(size, 1);
2776 2776

  
2777
    array_init(&(s->commits), sizeof(a_commit));
2777
    array_init(&(s->commits), sizeof(commit_t));
2778 2778

  
2779 2779
    s->qcow_filename = qemu_malloc(1024);
2780 2780
    get_tmp_filename(s->qcow_filename, 1024);
......
2833 2833

  
2834 2834
#ifdef DEBUG
2835 2835
static void checkpoint(void) {
2836
    assert(((a_mapping*)array_get(&(vvv->mapping), 0))->end == 2);
2836
    assert(((mapping_t*)array_get(&(vvv->mapping), 0))->end == 2);
2837 2837
    check1(vvv);
2838 2838
    check2(vvv);
2839 2839
    assert(!vvv->current_mapping || vvv->current_fd || (vvv->current_mapping->mode & MODE_DIRECTORY));
2840 2840
#if 0
2841
    if (((a_direntry*)vvv->directory.pointer)[1].attributes != 0xf)
2841
    if (((direntry_t*)vvv->directory.pointer)[1].attributes != 0xf)
2842 2842
	fprintf(stderr, "Nonono!\n");
2843
    a_mapping* mapping;
2844
    a_direntry* direntry;
2843
    mapping_t* mapping;
2844
    direntry_t* direntry;
2845 2845
    assert(vvv->mapping.size >= vvv->mapping.item_size * vvv->mapping.next);
2846 2846
    assert(vvv->directory.size >= vvv->directory.item_size * vvv->directory.next);
2847 2847
    if (vvv->mapping.next<47)
b/console.c
110 110
    GRAPHIC_CONSOLE,
111 111
    TEXT_CONSOLE,
112 112
    TEXT_CONSOLE_FIXED_SIZE
113
} e_console_type;
113
} console_type_t;
114 114

  
115 115
/* ??? This is mis-named.
116 116
   It is used for both text and graphical consoles.  */
117 117
struct TextConsole {
118
    e_console_type console_type;
118
    console_type_t console_type;
119 119
    DisplayState *ds;
120 120
    /* Graphic console state.  */
121 121
    vga_hw_update_ptr hw_update;
......
183 183
    active_console = previous_active_console;
184 184
}
185 185

  
186
void vga_hw_text_update(a_console_ch *chardata)
186
void vga_hw_text_update(console_ch_t *chardata)
187 187
{
188 188
    if (active_console && active_console->hw_text_update)
189 189
        active_console->hw_text_update(active_console->hw, chardata);
......
1197 1197
    console_refresh(s);
1198 1198
}
1199 1199

  
1200
static void text_console_update(void *opaque, a_console_ch *chardata)
1200
static void text_console_update(void *opaque, console_ch_t *chardata)
1201 1201
{
1202 1202
    TextConsole *s = (TextConsole *) opaque;
1203 1203
    int i, j, src;
......
1236 1236
    return NULL;
1237 1237
}
1238 1238

  
1239
static TextConsole *new_console(DisplayState *ds, e_console_type console_type)
1239
static TextConsole *new_console(DisplayState *ds, console_type_t console_type)
1240 1240
{
1241 1241
    TextConsole *s;
1242 1242
    int i;
b/console.h
279 279
    return ds->surface->pf.bytes_per_pixel;
280 280
}
281 281

  
282
typedef unsigned long a_console_ch;
283
static inline void console_write_ch(a_console_ch *dest, uint32_t ch)
282
typedef unsigned long console_ch_t;
283
static inline void console_write_ch(console_ch_t *dest, uint32_t ch)
284 284
{
285 285
    cpu_to_le32wu((uint32_t *) dest, ch);
286 286
}
......
288 288
typedef void (*vga_hw_update_ptr)(void *);
289 289
typedef void (*vga_hw_invalidate_ptr)(void *);
290 290
typedef void (*vga_hw_screen_dump_ptr)(void *, const char *);
291
typedef void (*vga_hw_text_update_ptr)(void *, a_console_ch *);
291
typedef void (*vga_hw_text_update_ptr)(void *, console_ch_t *);
292 292

  
293 293
DisplayState *graphic_console_init(vga_hw_update_ptr update,
294 294
                                   vga_hw_invalidate_ptr invalidate,
......
299 299
void vga_hw_update(void);
300 300
void vga_hw_invalidate(void);
301 301
void vga_hw_screen_dump(const char *filename);
302
void vga_hw_text_update(a_console_ch *chardata);
302
void vga_hw_text_update(console_ch_t *chardata);
303 303

  
304 304
int is_graphic_console(void);
305 305
int is_fixedsize_console(void);
b/cpu-all.h
814 814
/* Return the physical page corresponding to a virtual one. Use it
815 815
   only for debugging because no protection checks are done. Return -1
816 816
   if no page found. */
817
a_target_phys_addr cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
817
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
818 818

  
819 819
#define CPU_LOG_TB_OUT_ASM (1 << 0)
820 820
#define CPU_LOG_TB_IN_ASM  (1 << 1)
......
847 847

  
848 848
extern int phys_ram_fd;
849 849
extern uint8_t *phys_ram_dirty;
850
extern a_ram_addr ram_size;
851
extern a_ram_addr last_ram_offset;
850
extern ram_addr_t ram_size;
851
extern ram_addr_t last_ram_offset;
852 852

  
853 853
/* physical memory access */
854 854

  
......
876 876
#define MIGRATION_DIRTY_FLAG 0x08
877 877

  
878 878
/* read dirty bit (return 0 or 1) */
879
static inline int cpu_physical_memory_is_dirty(a_ram_addr addr)
879
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
880 880
{
881 881
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
882 882
}
883 883

  
884
static inline int cpu_physical_memory_get_dirty(a_ram_addr addr,
884
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
885 885
                                                int dirty_flags)
886 886
{
887 887
    return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
888 888
}
889 889

  
890
static inline void cpu_physical_memory_set_dirty(a_ram_addr addr)
890
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
891 891
{
892 892
    phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
893 893
}
894 894

  
895
void cpu_physical_memory_reset_dirty(a_ram_addr start, a_ram_addr end,
895
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
896 896
                                     int dirty_flags);
897 897
void cpu_tlb_update_dirty(CPUState *env);
898 898

  
......
900 900

  
901 901
int cpu_physical_memory_get_dirty_tracking(void);
902 902

  
903
int cpu_physical_sync_dirty_bitmap(a_target_phys_addr start_addr,
904
                                   a_target_phys_addr end_addr);
903
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
904
                                   target_phys_addr_t end_addr);
905 905

  
906 906
void dump_exec_info(FILE *f,
907 907
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
......
911 911
 * batching which can make a major impact on performance when using
912 912
 * virtualization.
913 913
 */
914
void qemu_register_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size);
914
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
915 915

  
916
void qemu_unregister_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size);
916
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
917 917

  
918 918
/*******************************************/
919 919
/* host CPU ticks (if available) */
b/cpu-common.h
10 10
#include "bswap.h"
11 11

  
12 12
/* address in the RAM (different from a physical address) */
13
typedef unsigned long a_ram_addr;
13
typedef unsigned long ram_addr_t;
14 14

  
15 15
/* memory API */
16 16

  
17
typedef void CPUWriteMemoryFunc(void *opaque, a_target_phys_addr addr, uint32_t value);
18
typedef uint32_t CPUReadMemoryFunc(void *opaque, a_target_phys_addr addr);
17
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
18
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
19 19

  
20
void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
21
                                         a_ram_addr size,
22
                                         a_ram_addr phys_offset,
23
                                         a_ram_addr region_offset);
24
static inline void cpu_register_physical_memory(a_target_phys_addr start_addr,
25
                                                a_ram_addr size,
26
                                                a_ram_addr phys_offset)
20
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
21
                                         ram_addr_t size,
22
                                         ram_addr_t phys_offset,
23
                                         ram_addr_t region_offset);
24
static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
25
                                                ram_addr_t size,
26
                                                ram_addr_t phys_offset)
27 27
{
28 28
    cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
29 29
}
30 30

  
31
a_ram_addr cpu_get_physical_page_desc(a_target_phys_addr addr);
32
a_ram_addr qemu_ram_alloc(a_ram_addr);
33
void qemu_ram_free(a_ram_addr addr);
31
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
32
ram_addr_t qemu_ram_alloc(ram_addr_t);
33
void qemu_ram_free(ram_addr_t addr);
34 34
/* This should only be used for ram local to a device.  */
35
void *qemu_get_ram_ptr(a_ram_addr addr);
35
void *qemu_get_ram_ptr(ram_addr_t addr);
36 36
/* This should not be used by devices.  */
37
a_ram_addr qemu_ram_addr_from_host(void *ptr);
37
ram_addr_t qemu_ram_addr_from_host(void *ptr);
38 38

  
39 39
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
40 40
                           CPUWriteMemoryFunc * const *mem_write,
41 41
                           void *opaque);
42 42
void cpu_unregister_io_memory(int table_address);
43 43

  
44
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
44
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
45 45
                            int len, int is_write);
46
static inline void cpu_physical_memory_read(a_target_phys_addr addr,
46
static inline void cpu_physical_memory_read(target_phys_addr_t addr,
47 47
                                            uint8_t *buf, int len)
48 48
{
49 49
    cpu_physical_memory_rw(addr, buf, len, 0);
50 50
}
51
static inline void cpu_physical_memory_write(a_target_phys_addr addr,
51
static inline void cpu_physical_memory_write(target_phys_addr_t addr,
52 52
                                             const uint8_t *buf, int len)
53 53
{
54 54
    cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
55 55
}
56
void *cpu_physical_memory_map(a_target_phys_addr addr,
57
                              a_target_phys_addr *plen,
56
void *cpu_physical_memory_map(target_phys_addr_t addr,
57
                              target_phys_addr_t *plen,
58 58
                              int is_write);
59
void cpu_physical_memory_unmap(void *buffer, a_target_phys_addr len,
60
                               int is_write, a_target_phys_addr access_len);
59
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
60
                               int is_write, target_phys_addr_t access_len);
61 61
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
62 62
void cpu_unregister_map_client(void *cookie);
63 63

  
64
uint32_t ldub_phys(a_target_phys_addr addr);
65
uint32_t lduw_phys(a_target_phys_addr addr);
66
uint32_t ldl_phys(a_target_phys_addr addr);
67
uint64_t ldq_phys(a_target_phys_addr addr);
68
void stl_phys_notdirty(a_target_phys_addr addr, uint32_t val);
69
void stq_phys_notdirty(a_target_phys_addr addr, uint64_t val);
70
void stb_phys(a_target_phys_addr addr, uint32_t val);
71
void stw_phys(a_target_phys_addr addr, uint32_t val);
72
void stl_phys(a_target_phys_addr addr, uint32_t val);
73
void stq_phys(a_target_phys_addr addr, uint64_t val);
74

  
75
void cpu_physical_memory_write_rom(a_target_phys_addr addr,
64
uint32_t ldub_phys(target_phys_addr_t addr);
65
uint32_t lduw_phys(target_phys_addr_t addr);
66
uint32_t ldl_phys(target_phys_addr_t addr);
67
uint64_t ldq_phys(target_phys_addr_t addr);
68
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
69
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
70
void stb_phys(target_phys_addr_t addr, uint32_t val);
71
void stw_phys(target_phys_addr_t addr, uint32_t val);
72
void stl_phys(target_phys_addr_t addr, uint32_t val);
73
void stq_phys(target_phys_addr_t addr, uint64_t val);
74

  
75
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
76 76
                                   const uint8_t *buf, int len);
77 77

  
78 78
#define IO_MEM_SHIFT       3
b/cpu-defs.h
95 95
       use the corresponding iotlb value.  */
96 96
#if TARGET_PHYS_ADDR_BITS == 64
97 97
    /* on i386 Linux make sure it is aligned */
98
    a_target_phys_addr addend __attribute__((aligned(8)));
98
    target_phys_addr_t addend __attribute__((aligned(8)));
99 99
#else
100
    a_target_phys_addr addend;
100
    target_phys_addr_t addend;
101 101
#endif
102 102
    /* padding to get a power of two size */
103 103
    uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 
104 104
                  (sizeof(target_ulong) * 3 + 
105
                   ((-sizeof(target_ulong) * 3) & (sizeof(a_target_phys_addr) - 1)) + 
106
                   sizeof(a_target_phys_addr))];
105
                   ((-sizeof(target_ulong) * 3) & (sizeof(target_phys_addr_t) - 1)) + 
106
                   sizeof(target_phys_addr_t))];
107 107
} CPUTLBEntry;
108 108

  
109 109
#ifdef HOST_WORDS_BIGENDIAN
......
152 152
    volatile sig_atomic_t exit_request;                                 \
153 153
    /* The meaning of the MMU modes is defined in the target code. */   \
154 154
    CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
155
    a_target_phys_addr iotlb[NB_MMU_MODES][CPU_TLB_SIZE];               \
155
    target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE];               \
156 156
    struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];           \
157 157
    /* buffer for temporaries in the code generator */                  \
158 158
    long temp_buf[CPU_TEMP_BUF_NLONGS];                                 \
b/curses.c
40 40
#define FONT_HEIGHT 16
41 41
#define FONT_WIDTH 8
42 42

  
43
static a_console_ch screen[160 * 100];
43
static console_ch_t screen[160 * 100];
44 44
static WINDOW *screenpad = NULL;
45 45
static int width, height, gwidth, gheight, invalidate;
46 46
static int px, py, sminx, sminy, smaxx, smaxy;
......
158 158

  
159 159
#include "curses_keys.h"
160 160

  
161
static a_kbd_layout *kbd_layout = NULL;
161
static kbd_layout_t *kbd_layout = NULL;
162 162
static int keycode2keysym[CURSES_KEYS];
163 163

  
164 164
static void curses_refresh(DisplayState *ds)
b/curses_keys.h
243 243

  
244 244
};
245 245

  
246
static const a_name2keysym name2keysym[] = {
246
static const name2keysym_t name2keysym[] = {
247 247
    /* Plain ASCII */
248 248
    { "space", 0x020 },
249 249
    { "exclam", 0x021 },
b/darwin-user/commpage.c
109 109
    COMMPAGE_ENTRY(add_atomic_word64,     0, 0x1c0,  unimpl_commpage,       CALL_INDIRECT | HAS_PTR),
110 110

  
111 111
    COMMPAGE_ENTRY(mach_absolute_time,    0, 0x200,  unimpl_commpage,       CALL_INDIRECT),
112
    COMMPAGE_ENTRY(a_spinlockry,          1, 0x220,  unimpl_commpage,       CALL_INDIRECT),
112
    COMMPAGE_ENTRY(spinlock_try,          1, 0x220,  unimpl_commpage,       CALL_INDIRECT),
113 113
    COMMPAGE_ENTRY(spinlock_lock,         1, 0x260,  OSSpinLockLock,        CALL_DIRECT),
114 114
    COMMPAGE_ENTRY(spinlock_unlock,       1, 0x2a0,  OSSpinLockUnlock,      CALL_DIRECT),
115 115
    COMMPAGE_ENTRY(pthread_getspecific,   0, 0x2c0,  unimpl_commpage,       CALL_INDIRECT),
b/darwin-user/machload.c
101 101
    unsigned int    es;
102 102
    unsigned int    fs;
103 103
    unsigned int    gs;
104
};
104
} mach_i386_thread_state_t;
105 105

  
106 106
void bswap_i386_thread_state(struct mach_i386_thread_state *ts)
107 107
{
b/darwin-user/qemu.h
10 10

  
11 11
#include "gdbstub.h"
12 12

  
13
typedef siginfo_t a_target_siginfo;
13
typedef siginfo_t target_siginfo_t;
14 14
#define target_sigaction	sigaction
15 15
#ifdef TARGET_I386
16 16
struct target_pt_regs {
b/disas.h
22 22
struct elf32_sym;
23 23
struct elf64_sym;
24 24

  
25
typedef const char *(*lookup_symbol_t)(struct syminfo *s, a_target_phys_addr orig_addr);
25
typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_phys_addr_t orig_addr);
26 26

  
27 27
struct syminfo {
28 28
    lookup_symbol_t lookup_symbol;
b/dma-helpers.c
18 18
    qsg->size = 0;
19 19
}
20 20

  
21
void qemu_sglist_add(QEMUSGList *qsg, a_target_phys_addr base,
22
                     a_target_phys_addr len)
21
void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
22
                     target_phys_addr_t len)
23 23
{
24 24
    if (qsg->nsg == qsg->nalloc) {
25 25
        qsg->nalloc = 2 * qsg->nalloc + 1;
......
44 44
    uint64_t sector_num;
45 45
    int is_write;
46 46
    int sg_cur_index;
47
    a_target_phys_addr sg_cur_byte;
47
    target_phys_addr_t sg_cur_byte;
48 48
    QEMUIOVector iov;
49 49
    QEMUBH *bh;
50 50
} DMAAIOCB;
......
82 82
static void dma_bdrv_cb(void *opaque, int ret)
83 83
{
84 84
    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
85
    a_target_phys_addr cur_addr, cur_len;
85
    target_phys_addr_t cur_addr, cur_len;
86 86
    void *mem;
87 87

  
88 88
    dbs->acb = NULL;
b/dma.h
16 16
#include "block.h"
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff