Statistics
| Branch: | Revision:

root / hw / pcie_aer.c @ 247c97f3

History | View | Annotate | Download (26.3 kB)

1
/*
2
 * pcie_aer.c
3
 *
4
 * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
5
 *                    VA Linux Systems Japan K.K.
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License along
18
 * with this program; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include "sysemu.h"
22
#include "pci_bridge.h"
23
#include "pcie.h"
24
#include "msix.h"
25
#include "msi.h"
26
#include "pci_internals.h"
27
#include "pcie_regs.h"
28

    
29
//#define DEBUG_PCIE
30
#ifdef DEBUG_PCIE
31
# define PCIE_DPRINTF(fmt, ...)                                         \
32
    fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
33
#else
34
# define PCIE_DPRINTF(fmt, ...) do {} while (0)
35
#endif
36
#define PCIE_DEV_PRINTF(dev, fmt, ...)                                  \
37
    PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
38

    
39
/* From 6.2.7 Error Listing and Rules. Table 6-2, 6-3 and 6-4 */
40
static uint32_t pcie_aer_uncor_default_severity(uint32_t status)
41
{
42
    switch (status) {
43
    case PCI_ERR_UNC_INTN:
44
    case PCI_ERR_UNC_DLP:
45
    case PCI_ERR_UNC_SDN:
46
    case PCI_ERR_UNC_RX_OVER:
47
    case PCI_ERR_UNC_FCP:
48
    case PCI_ERR_UNC_MALF_TLP:
49
        return PCI_ERR_ROOT_CMD_FATAL_EN;
50
    case PCI_ERR_UNC_POISON_TLP:
51
    case PCI_ERR_UNC_ECRC:
52
    case PCI_ERR_UNC_UNSUP:
53
    case PCI_ERR_UNC_COMP_TIME:
54
    case PCI_ERR_UNC_COMP_ABORT:
55
    case PCI_ERR_UNC_UNX_COMP:
56
    case PCI_ERR_UNC_ACSV:
57
    case PCI_ERR_UNC_MCBTLP:
58
    case PCI_ERR_UNC_ATOP_EBLOCKED:
59
    case PCI_ERR_UNC_TLP_PRF_BLOCKED:
60
        return PCI_ERR_ROOT_CMD_NONFATAL_EN;
61
    default:
62
        abort();
63
        break;
64
    }
65
    return PCI_ERR_ROOT_CMD_FATAL_EN;
66
}
67

    
68
static int aer_log_add_err(PCIEAERLog *aer_log, const PCIEAERErr *err)
69
{
70
    if (aer_log->log_num == aer_log->log_max) {
71
        return -1;
72
    }
73
    memcpy(&aer_log->log[aer_log->log_num], err, sizeof *err);
74
    aer_log->log_num++;
75
    return 0;
76
}
77

    
78
static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err)
79
{
80
    assert(aer_log->log_num);
81
    *err = aer_log->log[0];
82
    aer_log->log_num--;
83
    memmove(&aer_log->log[0], &aer_log->log[1],
84
            aer_log->log_num * sizeof *err);
85
}
86

    
87
static void aer_log_clear_all_err(PCIEAERLog *aer_log)
88
{
89
    aer_log->log_num = 0;
90
}
91

    
92
int pcie_aer_init(PCIDevice *dev, uint16_t offset)
93
{
94
    PCIExpressDevice *exp;
95

    
96
    pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER,
97
                        offset, PCI_ERR_SIZEOF);
98
    exp = &dev->exp;
99
    exp->aer_cap = offset;
100

    
101
    /* log_max is property */
102
    if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) {
103
        dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT;
104
    }
105
    /* clip down the value to avoid unreasobale memory usage */
106
    if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) {
107
        return -EINVAL;
108
    }
109
    dev->exp.aer_log.log = qemu_mallocz(sizeof dev->exp.aer_log.log[0] *
110
                                        dev->exp.aer_log.log_max);
111

    
112
    pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS,
113
                 PCI_ERR_UNC_SUPPORTED);
114

    
115
    pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER,
116
                 PCI_ERR_UNC_SEVERITY_DEFAULT);
117
    pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER,
118
                 PCI_ERR_UNC_SUPPORTED);
119

    
120
    pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS,
121
                               PCI_ERR_COR_STATUS);
122

    
123
    pci_set_long(dev->config + offset + PCI_ERR_COR_MASK,
124
                 PCI_ERR_COR_MASK_DEFAULT);
125
    pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK,
126
                 PCI_ERR_COR_SUPPORTED);
127

    
128
    /* capabilities and control. multiple header logging is supported */
129
    if (dev->exp.aer_log.log_max > 0) {
130
        pci_set_long(dev->config + offset + PCI_ERR_CAP,
131
                     PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC |
132
                     PCI_ERR_CAP_MHRC);
133
        pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
134
                     PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE |
135
                     PCI_ERR_CAP_MHRE);
136
    } else {
137
        pci_set_long(dev->config + offset + PCI_ERR_CAP,
138
                     PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC);
139
        pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
140
                     PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
141
    }
142

    
143
    switch (pcie_cap_get_type(dev)) {
144
    case PCI_EXP_TYPE_ROOT_PORT:
145
        /* this case will be set by pcie_aer_root_init() */
146
        /* fallthrough */
147
    case PCI_EXP_TYPE_DOWNSTREAM:
148
    case PCI_EXP_TYPE_UPSTREAM:
149
        pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL,
150
                                   PCI_BRIDGE_CTL_SERR);
151
        pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS,
152
                                   PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
153
        break;
154
    default:
155
        /* nothing */
156
        break;
157
    }
158
    return 0;
159
}
160

    
161
void pcie_aer_exit(PCIDevice *dev)
162
{
163
    qemu_free(dev->exp.aer_log.log);
164
}
165

    
166
static void pcie_aer_update_uncor_status(PCIDevice *dev)
167
{
168
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
169
    PCIEAERLog *aer_log = &dev->exp.aer_log;
170

    
171
    uint16_t i;
172
    for (i = 0; i < aer_log->log_num; i++) {
173
        pci_long_test_and_set_mask(aer_cap + PCI_ERR_UNCOR_STATUS,
174
                                   dev->exp.aer_log.log[i].status);
175
    }
176
}
177

    
178
/*
179
 * return value:
180
 * true: error message needs to be sent up
181
 * false: error message is masked
182
 *
183
 * 6.2.6 Error Message Control
184
 * Figure 6-3
185
 * all pci express devices part
186
 */
187
static bool
188
pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg)
189
{
190
    if (!(pcie_aer_msg_is_uncor(msg) &&
191
          (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) {
192
        return false;
193
    }
194

    
195
    /* Signaled System Error
196
     *
197
     * 7.5.1.1 Command register
198
     * Bit 8 SERR# Enable
199
     *
200
     * When Set, this bit enables reporting of Non-fatal and Fatal
201
     * errors detected by the Function to the Root Complex. Note that
202
     * errors are reported if enabled either through this bit or through
203
     * the PCI Express specific bits in the Device Control register (see
204
     * Section 7.8.4).
205
     */
206
    pci_word_test_and_set_mask(dev->config + PCI_STATUS,
207
                               PCI_STATUS_SIG_SYSTEM_ERROR);
208

    
209
    if (!(msg->severity &
210
          pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL))) {
211
        return false;
212
    }
213

    
214
    /* send up error message */
215
    return true;
216
}
217

    
218
/* Get parent port to send up error message on.
219
 * TODO: clean up and open-code this logic */
220
static PCIDevice *pcie_aer_parent_port(PCIDevice *dev)
221
{
222
    PCIDevice *parent_port;
223
    if (pci_is_express(dev) &&
224
        pcie_cap_get_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
225
        /* Root port can notify system itself,
226
           or send the error message to root complex event collector. */
227
        /*
228
         * if root port is associated with an event collector,
229
         * return the root complex event collector here.
230
         * For now root complex event collector isn't supported.
231
         */
232
        parent_port = NULL;
233
    } else {
234
        parent_port = pci_bridge_get_device(dev->bus);
235
    }
236
    if (parent_port) {
237
        if (!pci_is_express(parent_port)) {
238
            /* just ignore it */
239
            return NULL;
240
        }
241
    }
242
    return parent_port;
243
}
244

    
245
/*
246
 * return value:
247
 * true: error message is sent up
248
 * false: error message is masked
249
 *
250
 * 6.2.6 Error Message Control
251
 * Figure 6-3
252
 * virtual pci bridge part
253
 */
254
static bool pcie_aer_msg_vbridge(PCIDevice *dev, const PCIEAERMsg *msg)
255
{
256
    uint16_t bridge_control = pci_get_word(dev->config + PCI_BRIDGE_CONTROL);
257

    
258
    if (pcie_aer_msg_is_uncor(msg)) {
259
        /* Received System Error */
260
        pci_word_test_and_set_mask(dev->config + PCI_SEC_STATUS,
261
                                   PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
262
    }
263

    
264
    if (!(bridge_control & PCI_BRIDGE_CTL_SERR)) {
265
        return false;
266
    }
267
    return true;
268
}
269

    
270
void pcie_aer_root_set_vector(PCIDevice *dev, unsigned int vector)
271
{
272
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
273
    assert(vector < PCI_ERR_ROOT_IRQ_MAX);
274
    pci_long_test_and_clear_mask(aer_cap + PCI_ERR_ROOT_STATUS,
275
                                 PCI_ERR_ROOT_IRQ);
276
    pci_long_test_and_set_mask(aer_cap + PCI_ERR_ROOT_STATUS,
277
                               vector << PCI_ERR_ROOT_IRQ_SHIFT);
278
}
279

    
280
static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
281
{
282
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
283
    uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
284
    return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
285
}
286

    
287
/*
288
 * return value:
289
 * true: error message is sent up
290
 * false: error message is masked
291
 *
292
 * 6.2.6 Error Message Control
293
 * Figure 6-3
294
 * root port part
295
 */
296
static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
297
{
298
    bool msg_sent;
299
    uint16_t cmd;
300
    uint8_t *aer_cap;
301
    uint32_t root_cmd;
302
    uint32_t root_status;
303
    bool msi_trigger;
304

    
305
    msg_sent = false;
306
    cmd = pci_get_word(dev->config + PCI_COMMAND);
307
    aer_cap = dev->config + dev->exp.aer_cap;
308
    root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
309
    root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
310
    msi_trigger = false;
311

    
312
    if (cmd & PCI_COMMAND_SERR) {
313
        /* System Error.
314
         *
315
         * The way to report System Error is platform specific and
316
         * it isn't implemented in qemu right now.
317
         * So just discard the error for now.
318
         * OS which cares of aer would receive errors via
319
         * native aer mechanims, so this wouldn't matter.
320
         */
321
    }
322

    
323
    /* Errro Message Received: Root Error Status register */
324
    switch (msg->severity) {
325
    case PCI_ERR_ROOT_CMD_COR_EN:
326
        if (root_status & PCI_ERR_ROOT_COR_RCV) {
327
            root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
328
        } else {
329
            if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) {
330
                msi_trigger = true;
331
            }
332
            pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id);
333
        }
334
        root_status |= PCI_ERR_ROOT_COR_RCV;
335
        break;
336
    case PCI_ERR_ROOT_CMD_NONFATAL_EN:
337
        if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) &&
338
            root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) {
339
            msi_trigger = true;
340
        }
341
        root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
342
        break;
343
    case PCI_ERR_ROOT_CMD_FATAL_EN:
344
        if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) &&
345
            root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) {
346
            msi_trigger = true;
347
        }
348
        if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
349
            root_status |= PCI_ERR_ROOT_FIRST_FATAL;
350
        }
351
        root_status |= PCI_ERR_ROOT_FATAL_RCV;
352
        break;
353
    default:
354
        abort();
355
        break;
356
    }
357
    if (pcie_aer_msg_is_uncor(msg)) {
358
        if (root_status & PCI_ERR_ROOT_UNCOR_RCV) {
359
            root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
360
        } else {
361
            pci_set_word(aer_cap + PCI_ERR_ROOT_SRC, msg->source_id);
362
        }
363
        root_status |= PCI_ERR_ROOT_UNCOR_RCV;
364
    }
365
    pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
366

    
367
    if (root_cmd & msg->severity) {
368
        /* 6.2.4.1.2 Interrupt Generation */
369
        if (pci_msi_enabled(dev)) {
370
            if (msi_trigger) {
371
                pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
372
            }
373
        } else {
374
            qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
375
        }
376
        msg_sent = true;
377
    }
378
    return msg_sent;
379
}
380

    
381
/*
382
 * 6.2.6 Error Message Control Figure 6-3
383
 *
384
 * Returns true in case the error needs to
385
 * be propagated up.
386
 * TODO: open-code.
387
 */
388
static bool pcie_send_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg)
389
{
390
    uint8_t type;
391
    bool msg_sent;
392

    
393
    assert(pci_is_express(dev));
394

    
395
    type = pcie_cap_get_type(dev);
396
    if (type == PCI_EXP_TYPE_ROOT_PORT ||
397
        type == PCI_EXP_TYPE_UPSTREAM ||
398
        type == PCI_EXP_TYPE_DOWNSTREAM) {
399
        msg_sent = pcie_aer_msg_vbridge(dev, msg);
400
        if (!msg_sent) {
401
            return;
402
        }
403
    }
404
    msg_sent = pcie_aer_msg_alldev(dev, msg);
405
    if (type == PCI_EXP_TYPE_ROOT_PORT && msg_sent) {
406
        pcie_aer_msg_root_port(dev, msg);
407
    }
408
    return msg_sent;
409
}
410

    
411
static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg)
412
{
413
    bool send_to_parent;
414
    while (dev) {
415
        if (!pcie_send_aer_msg(dev, msg)) {
416
            return;
417
        }
418
        dev =  pcie_aer_parent_port(dev);
419
    }
420
}
421

    
422
static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err)
423
{
424
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
425
    uint8_t first_bit = ffsl(err->status) - 1;
426
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
427
    int i;
428

    
429
    assert(err->status);
430
    assert(err->status & (err->status - 1));
431

    
432
    errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
433
    errcap |= PCI_ERR_CAP_FEP(first_bit);
434

    
435
    if (err->flags & PCIE_AER_ERR_HEADER_VALID) {
436
        for (i = 0; i < ARRAY_SIZE(err->header); ++i) {
437
            /* 7.10.8 Header Log Register */
438
            uint8_t *header_log =
439
                aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0];
440
            cpu_to_be32wu((uint32_t*)header_log, err->header[i]);
441
        }
442
    } else {
443
        assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT));
444
        memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
445
    }
446

    
447
    if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) &&
448
        (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) &
449
         PCI_EXP_DEVCAP2_EETLPP)) {
450
        for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) {
451
            /* 7.10.12 tlp prefix log register */
452
            uint8_t *prefix_log =
453
                aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0];
454
            cpu_to_be32wu((uint32_t*)prefix_log, err->prefix[i]);
455
        }
456
        errcap |= PCI_ERR_CAP_TLP;
457
    } else {
458
        memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0,
459
               PCI_ERR_TLP_PREFIX_LOG_SIZE);
460
    }
461
    pci_set_long(aer_cap + PCI_ERR_CAP, errcap);
462
}
463

    
464
static void pcie_aer_clear_log(PCIDevice *dev)
465
{
466
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
467

    
468
    pci_long_test_and_clear_mask(aer_cap + PCI_ERR_CAP,
469
                                 PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
470

    
471
    memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
472
    memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE);
473
}
474

    
475
static void pcie_aer_clear_error(PCIDevice *dev)
476
{
477
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
478
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
479
    PCIEAERLog *aer_log = &dev->exp.aer_log;
480
    PCIEAERErr err;
481

    
482
    if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) {
483
        pcie_aer_clear_log(dev);
484
        return;
485
    }
486

    
487
    /*
488
     * If more errors are queued, set corresponding bits in uncorrectable
489
     * error status.
490
     * We emulate uncorrectable error status register as W1CS.
491
     * So set bit in uncorrectable error status here again for multiple
492
     * error recording support.
493
     *
494
     * 6.2.4.2 Multiple Error Handling(Advanced Error Reporting Capability)
495
     */
496
    pcie_aer_update_uncor_status(dev);
497

    
498
    aer_log_del_err(aer_log, &err);
499
    pcie_aer_update_log(dev, &err);
500
}
501

    
502
static int pcie_aer_record_error(PCIDevice *dev,
503
                                 const PCIEAERErr *err)
504
{
505
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
506
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
507
    int fep = PCI_ERR_CAP_FEP(errcap);
508

    
509
    assert(err->status);
510
    assert(err->status & (err->status - 1));
511

    
512
    if (errcap & PCI_ERR_CAP_MHRE &&
513
        (pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) {
514
        /*  Not first error. queue error */
515
        if (aer_log_add_err(&dev->exp.aer_log, err) < 0) {
516
            /* overflow */
517
            return -1;
518
        }
519
        return 0;
520
    }
521

    
522
    pcie_aer_update_log(dev, err);
523
    return 0;
524
}
525

    
526
typedef struct PCIEAERInject {
527
    PCIDevice *dev;
528
    uint8_t *aer_cap;
529
    const PCIEAERErr *err;
530
    uint16_t devctl;
531
    uint16_t devsta;
532
    uint32_t error_status;
533
    bool unsupported_request;
534
    bool log_overflow;
535
    PCIEAERMsg msg;
536
} PCIEAERInject;
537

    
538
static bool pcie_aer_inject_cor_error(PCIEAERInject *inj,
539
                                      uint32_t uncor_status,
540
                                      bool is_advisory_nonfatal)
541
{
542
    PCIDevice *dev = inj->dev;
543

    
544
    inj->devsta |= PCI_EXP_DEVSTA_CED;
545
    if (inj->unsupported_request) {
546
        inj->devsta |= PCI_EXP_DEVSTA_URD;
547
    }
548
    pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
549

    
550
    if (inj->aer_cap) {
551
        uint32_t mask;
552
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS,
553
                                   inj->error_status);
554
        mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK);
555
        if (mask & inj->error_status) {
556
            return false;
557
        }
558
        if (is_advisory_nonfatal) {
559
            uint32_t uncor_mask =
560
                pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
561
            if (!(uncor_mask & uncor_status)) {
562
                inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
563
            }
564
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
565
                                       uncor_status);
566
        }
567
    }
568

    
569
    if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) {
570
        return false;
571
    }
572
    if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) {
573
        return false;
574
    }
575

    
576
    inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN;
577
    return true;
578
}
579

    
580
static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal)
581
{
582
    PCIDevice *dev = inj->dev;
583
    uint16_t cmd;
584

    
585
    if (is_fatal) {
586
        inj->devsta |= PCI_EXP_DEVSTA_FED;
587
    } else {
588
        inj->devsta |= PCI_EXP_DEVSTA_NFED;
589
    }
590
    if (inj->unsupported_request) {
591
        inj->devsta |= PCI_EXP_DEVSTA_URD;
592
    }
593
    pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
594

    
595
    if (inj->aer_cap) {
596
        uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
597
        if (mask & inj->error_status) {
598
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
599
                                       inj->error_status);
600
            return false;
601
        }
602

    
603
        inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
604
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
605
                                   inj->error_status);
606
    }
607

    
608
    cmd = pci_get_word(dev->config + PCI_COMMAND);
609
    if (inj->unsupported_request &&
610
        !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) {
611
        return false;
612
    }
613
    if (is_fatal) {
614
        if (!((cmd & PCI_COMMAND_SERR) ||
615
              (inj->devctl & PCI_EXP_DEVCTL_FERE))) {
616
            return false;
617
        }
618
        inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN;
619
    } else {
620
        if (!((cmd & PCI_COMMAND_SERR) ||
621
              (inj->devctl & PCI_EXP_DEVCTL_NFERE))) {
622
            return false;
623
        }
624
        inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN;
625
    }
626
    return true;
627
}
628

    
629
/*
630
 * non-Function specific error must be recorded in all functions.
631
 * It is the responsibility of the caller of this function.
632
 * It is also caller's responsiblity to determine which function should
633
 * report the rerror.
634
 *
635
 * 6.2.4 Error Logging
636
 * 6.2.5 Sqeunce of Device Error Signaling and Logging Operations
637
 * table 6-2: Flowchard Showing Sequence of Device Error Signaling and Logging
638
 *            Operations
639
 */
640
int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err)
641
{
642
    uint8_t *aer_cap = NULL;
643
    uint16_t devctl = 0;
644
    uint16_t devsta = 0;
645
    uint32_t error_status = err->status;
646
    PCIEAERInject inj;
647

    
648
    if (!pci_is_express(dev)) {
649
        return -ENOSYS;
650
    }
651

    
652
    if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
653
        error_status &= PCI_ERR_COR_SUPPORTED;
654
    } else {
655
        error_status &= PCI_ERR_UNC_SUPPORTED;
656
    }
657

    
658
    /* invalid status bit. one and only one bit must be set */
659
    if (!error_status || (error_status & (error_status - 1))) {
660
        return -EINVAL;
661
    }
662

    
663
    if (dev->exp.aer_cap) {
664
        uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
665
        aer_cap = dev->config + dev->exp.aer_cap;
666
        devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL);
667
        devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA);
668
    }
669

    
670
    inj.dev = dev;
671
    inj.aer_cap = aer_cap;
672
    inj.err = err;
673
    inj.devctl = devctl;
674
    inj.devsta = devsta;
675
    inj.error_status = error_status;
676
    inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) &&
677
        err->status == PCI_ERR_UNC_UNSUP;
678
    inj.log_overflow = false;
679

    
680
    if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
681
        if (!pcie_aer_inject_cor_error(&inj, 0, false)) {
682
            return 0;
683
        }
684
    } else {
685
        bool is_fatal =
686
            pcie_aer_uncor_default_severity(error_status) ==
687
            PCI_ERR_ROOT_CMD_FATAL_EN;
688
        if (aer_cap) {
689
            is_fatal =
690
                error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER);
691
        }
692
        if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) {
693
            inj.error_status = PCI_ERR_COR_ADV_NONFATAL;
694
            if (!pcie_aer_inject_cor_error(&inj, error_status, true)) {
695
                return 0;
696
            }
697
        } else {
698
            if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) {
699
                return 0;
700
            }
701
        }
702
    }
703

    
704
    /* send up error message */
705
    inj.msg.source_id = err->source_id;
706
    pcie_aer_msg(dev, &inj.msg);
707

    
708
    if (inj.log_overflow) {
709
        PCIEAERErr header_log_overflow = {
710
            .status = PCI_ERR_COR_HL_OVERFLOW,
711
            .flags = PCIE_AER_ERR_IS_CORRECTABLE,
712
        };
713
        int ret = pcie_aer_inject_error(dev, &header_log_overflow);
714
        assert(!ret);
715
    }
716
    return 0;
717
}
718

    
719
void pcie_aer_write_config(PCIDevice *dev,
720
                           uint32_t addr, uint32_t val, int len)
721
{
722
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
723
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
724
    uint32_t first_error = 1U << PCI_ERR_CAP_FEP(errcap);
725
    uint32_t uncorsta = pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS);
726

    
727
    /* uncorrectable error */
728
    if (!(uncorsta & first_error)) {
729
        /* the bit that corresponds to the first error is cleared */
730
        pcie_aer_clear_error(dev);
731
    } else if (errcap & PCI_ERR_CAP_MHRE) {
732
        /* When PCI_ERR_CAP_MHRE is enabled and the first error isn't cleared
733
         * nothing should happen. So we have to revert the modification to
734
         * the register.
735
         */
736
        pcie_aer_update_uncor_status(dev);
737
    } else {
738
        /* capability & control
739
         * PCI_ERR_CAP_MHRE might be cleared, so clear of header log.
740
         */
741
        aer_log_clear_all_err(&dev->exp.aer_log);
742
    }
743
}
744

    
745
void pcie_aer_root_init(PCIDevice *dev)
746
{
747
    uint16_t pos = dev->exp.aer_cap;
748

    
749
    pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND,
750
                 PCI_ERR_ROOT_CMD_EN_MASK);
751
    pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS,
752
                 PCI_ERR_ROOT_STATUS_REPORT_MASK);
753
}
754

    
755
void pcie_aer_root_reset(PCIDevice *dev)
756
{
757
    uint8_t* aer_cap = dev->config + dev->exp.aer_cap;
758

    
759
    pci_set_long(aer_cap + PCI_ERR_ROOT_COMMAND, 0);
760

    
761
    /*
762
     * Advanced Error Interrupt Message Number in Root Error Status Register
763
     * must be updated by chip dependent code because it's chip dependent
764
     * which number is used.
765
     */
766
}
767

    
768
static bool pcie_aer_root_does_trigger(uint32_t cmd, uint32_t status)
769
{
770
    return
771
        ((cmd & PCI_ERR_ROOT_CMD_COR_EN) && (status & PCI_ERR_ROOT_COR_RCV)) ||
772
        ((cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
773
         (status & PCI_ERR_ROOT_NONFATAL_RCV)) ||
774
        ((cmd & PCI_ERR_ROOT_CMD_FATAL_EN) &&
775
         (status & PCI_ERR_ROOT_FATAL_RCV));
776
}
777

    
778
void pcie_aer_root_write_config(PCIDevice *dev,
779
                                uint32_t addr, uint32_t val, int len,
780
                                uint32_t root_cmd_prev)
781
{
782
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
783

    
784
    /* root command register */
785
    uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
786
    if (root_cmd & PCI_ERR_ROOT_CMD_EN_MASK) {
787
        /* 6.2.4.1.2 Interrupt Generation */
788

    
789
        /* 0 -> 1 */
790
        uint32_t root_cmd_set = (root_cmd_prev ^ root_cmd) & root_cmd;
791
        uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
792

    
793
        if (pci_msi_enabled(dev)) {
794
            if (pcie_aer_root_does_trigger(root_cmd_set, root_status)) {
795
                pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
796
            }
797
        } else {
798
            int int_level = pcie_aer_root_does_trigger(root_cmd, root_status);
799
            qemu_set_irq(dev->irq[dev->exp.aer_intx], int_level);
800
        }
801
    }
802
}
803

    
804
static const VMStateDescription vmstate_pcie_aer_err = {
805
    .name = "PCIE_AER_ERROR",
806
    .version_id = 1,
807
    .minimum_version_id = 1,
808
    .minimum_version_id_old = 1,
809
    .fields     = (VMStateField[]) {
810
        VMSTATE_UINT32(status, PCIEAERErr),
811
        VMSTATE_UINT16(source_id, PCIEAERErr),
812
        VMSTATE_UINT16(flags, PCIEAERErr),
813
        VMSTATE_UINT32_ARRAY(header, PCIEAERErr, 4),
814
        VMSTATE_UINT32_ARRAY(prefix, PCIEAERErr, 4),
815
        VMSTATE_END_OF_LIST()
816
    }
817
};
818

    
819
#define VMSTATE_PCIE_AER_ERRS(_field, _state, _field_num, _vmsd, _type) { \
820
    .name       = (stringify(_field)),                                    \
821
    .version_id = 0,                                                      \
822
    .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),     \
823
    .size       = sizeof(_type),                                          \
824
    .vmsd       = &(_vmsd),                                               \
825
    .flags      = VMS_POINTER | VMS_VARRAY_UINT16 | VMS_STRUCT,           \
826
    .offset     = vmstate_offset_pointer(_state, _field, _type),          \
827
}
828

    
829
const VMStateDescription vmstate_pcie_aer_log = {
830
    .name = "PCIE_AER_ERROR_LOG",
831
    .version_id = 1,
832
    .minimum_version_id = 1,
833
    .minimum_version_id_old = 1,
834
    .fields     = (VMStateField[]) {
835
        VMSTATE_UINT16(log_num, PCIEAERLog),
836
        VMSTATE_UINT16(log_max, PCIEAERLog),
837
        VMSTATE_PCIE_AER_ERRS(log, PCIEAERLog, log_num,
838
                              vmstate_pcie_aer_err, PCIEAERErr),
839
        VMSTATE_END_OF_LIST()
840
    }
841
};