Statistics
| Branch: | Revision:

root / hw / pcie_aer.c @ 81699d8a

History | View | Annotate | Download (25.8 kB)

1
/*
2
 * pcie_aer.c
3
 *
4
 * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
5
 *                    VA Linux Systems Japan K.K.
6
 *
7
 * This program is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version.
11
 *
12
 * This program is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License along
18
 * with this program; if not, see <http://www.gnu.org/licenses/>.
19
 */
20

    
21
#include "sysemu.h"
22
#include "pci_bridge.h"
23
#include "pcie.h"
24
#include "msix.h"
25
#include "msi.h"
26
#include "pci_internals.h"
27
#include "pcie_regs.h"
28

    
29
//#define DEBUG_PCIE
30
#ifdef DEBUG_PCIE
31
# define PCIE_DPRINTF(fmt, ...)                                         \
32
    fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
33
#else
34
# define PCIE_DPRINTF(fmt, ...) do {} while (0)
35
#endif
36
#define PCIE_DEV_PRINTF(dev, fmt, ...)                                  \
37
    PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
38

    
39
/* From 6.2.7 Error Listing and Rules. Table 6-2, 6-3 and 6-4 */
40
static uint32_t pcie_aer_uncor_default_severity(uint32_t status)
41
{
42
    switch (status) {
43
    case PCI_ERR_UNC_INTN:
44
    case PCI_ERR_UNC_DLP:
45
    case PCI_ERR_UNC_SDN:
46
    case PCI_ERR_UNC_RX_OVER:
47
    case PCI_ERR_UNC_FCP:
48
    case PCI_ERR_UNC_MALF_TLP:
49
        return PCI_ERR_ROOT_CMD_FATAL_EN;
50
    case PCI_ERR_UNC_POISON_TLP:
51
    case PCI_ERR_UNC_ECRC:
52
    case PCI_ERR_UNC_UNSUP:
53
    case PCI_ERR_UNC_COMP_TIME:
54
    case PCI_ERR_UNC_COMP_ABORT:
55
    case PCI_ERR_UNC_UNX_COMP:
56
    case PCI_ERR_UNC_ACSV:
57
    case PCI_ERR_UNC_MCBTLP:
58
    case PCI_ERR_UNC_ATOP_EBLOCKED:
59
    case PCI_ERR_UNC_TLP_PRF_BLOCKED:
60
        return PCI_ERR_ROOT_CMD_NONFATAL_EN;
61
    default:
62
        abort();
63
        break;
64
    }
65
    return PCI_ERR_ROOT_CMD_FATAL_EN;
66
}
67

    
68
static int aer_log_add_err(PCIEAERLog *aer_log, const PCIEAERErr *err)
69
{
70
    if (aer_log->log_num == aer_log->log_max) {
71
        return -1;
72
    }
73
    memcpy(&aer_log->log[aer_log->log_num], err, sizeof *err);
74
    aer_log->log_num++;
75
    return 0;
76
}
77

    
78
static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err)
79
{
80
    assert(aer_log->log_num);
81
    *err = aer_log->log[0];
82
    aer_log->log_num--;
83
    memmove(&aer_log->log[0], &aer_log->log[1],
84
            aer_log->log_num * sizeof *err);
85
}
86

    
87
static void aer_log_clear_all_err(PCIEAERLog *aer_log)
88
{
89
    aer_log->log_num = 0;
90
}
91

    
92
int pcie_aer_init(PCIDevice *dev, uint16_t offset)
93
{
94
    PCIExpressDevice *exp;
95

    
96
    pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER,
97
                        offset, PCI_ERR_SIZEOF);
98
    exp = &dev->exp;
99
    exp->aer_cap = offset;
100

    
101
    /* log_max is property */
102
    if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) {
103
        dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT;
104
    }
105
    /* clip down the value to avoid unreasobale memory usage */
106
    if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) {
107
        return -EINVAL;
108
    }
109
    dev->exp.aer_log.log = qemu_mallocz(sizeof dev->exp.aer_log.log[0] *
110
                                        dev->exp.aer_log.log_max);
111

    
112
    pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS,
113
                 PCI_ERR_UNC_SUPPORTED);
114

    
115
    pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER,
116
                 PCI_ERR_UNC_SEVERITY_DEFAULT);
117
    pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER,
118
                 PCI_ERR_UNC_SUPPORTED);
119

    
120
    pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS,
121
                               PCI_ERR_COR_STATUS);
122

    
123
    pci_set_long(dev->config + offset + PCI_ERR_COR_MASK,
124
                 PCI_ERR_COR_MASK_DEFAULT);
125
    pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK,
126
                 PCI_ERR_COR_SUPPORTED);
127

    
128
    /* capabilities and control. multiple header logging is supported */
129
    if (dev->exp.aer_log.log_max > 0) {
130
        pci_set_long(dev->config + offset + PCI_ERR_CAP,
131
                     PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC |
132
                     PCI_ERR_CAP_MHRC);
133
        pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
134
                     PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE |
135
                     PCI_ERR_CAP_MHRE);
136
    } else {
137
        pci_set_long(dev->config + offset + PCI_ERR_CAP,
138
                     PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC);
139
        pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
140
                     PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
141
    }
142

    
143
    switch (pcie_cap_get_type(dev)) {
144
    case PCI_EXP_TYPE_ROOT_PORT:
145
        /* this case will be set by pcie_aer_root_init() */
146
        /* fallthrough */
147
    case PCI_EXP_TYPE_DOWNSTREAM:
148
    case PCI_EXP_TYPE_UPSTREAM:
149
        pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL,
150
                                   PCI_BRIDGE_CTL_SERR);
151
        pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS,
152
                                   PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
153
        break;
154
    default:
155
        /* nothing */
156
        break;
157
    }
158
    return 0;
159
}
160

    
161
void pcie_aer_exit(PCIDevice *dev)
162
{
163
    qemu_free(dev->exp.aer_log.log);
164
}
165

    
166
static void pcie_aer_update_uncor_status(PCIDevice *dev)
167
{
168
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
169
    PCIEAERLog *aer_log = &dev->exp.aer_log;
170

    
171
    uint16_t i;
172
    for (i = 0; i < aer_log->log_num; i++) {
173
        pci_long_test_and_set_mask(aer_cap + PCI_ERR_UNCOR_STATUS,
174
                                   dev->exp.aer_log.log[i].status);
175
    }
176
}
177

    
178
/*
179
 * return value:
180
 * true: error message needs to be sent up
181
 * false: error message is masked
182
 *
183
 * 6.2.6 Error Message Control
184
 * Figure 6-3
185
 * all pci express devices part
186
 */
187
static bool
188
pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg)
189
{
190
    if (!(pcie_aer_msg_is_uncor(msg) &&
191
          (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) {
192
        return false;
193
    }
194

    
195
    /* Signaled System Error
196
     *
197
     * 7.5.1.1 Command register
198
     * Bit 8 SERR# Enable
199
     *
200
     * When Set, this bit enables reporting of Non-fatal and Fatal
201
     * errors detected by the Function to the Root Complex. Note that
202
     * errors are reported if enabled either through this bit or through
203
     * the PCI Express specific bits in the Device Control register (see
204
     * Section 7.8.4).
205
     */
206
    pci_word_test_and_set_mask(dev->config + PCI_STATUS,
207
                               PCI_STATUS_SIG_SYSTEM_ERROR);
208

    
209
    if (!(msg->severity &
210
          pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL))) {
211
        return false;
212
    }
213

    
214
    /* send up error message */
215
    return true;
216
}
217

    
218
/*
219
 * return value:
220
 * true: error message is sent up
221
 * false: error message is masked
222
 *
223
 * 6.2.6 Error Message Control
224
 * Figure 6-3
225
 * virtual pci bridge part
226
 */
227
static bool pcie_aer_msg_vbridge(PCIDevice *dev, const PCIEAERMsg *msg)
228
{
229
    uint16_t bridge_control = pci_get_word(dev->config + PCI_BRIDGE_CONTROL);
230

    
231
    if (pcie_aer_msg_is_uncor(msg)) {
232
        /* Received System Error */
233
        pci_word_test_and_set_mask(dev->config + PCI_SEC_STATUS,
234
                                   PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
235
    }
236

    
237
    if (!(bridge_control & PCI_BRIDGE_CTL_SERR)) {
238
        return false;
239
    }
240
    return true;
241
}
242

    
243
void pcie_aer_root_set_vector(PCIDevice *dev, unsigned int vector)
244
{
245
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
246
    assert(vector < PCI_ERR_ROOT_IRQ_MAX);
247
    pci_long_test_and_clear_mask(aer_cap + PCI_ERR_ROOT_STATUS,
248
                                 PCI_ERR_ROOT_IRQ);
249
    pci_long_test_and_set_mask(aer_cap + PCI_ERR_ROOT_STATUS,
250
                               vector << PCI_ERR_ROOT_IRQ_SHIFT);
251
}
252

    
253
static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
254
{
255
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
256
    uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
257
    return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
258
}
259

    
260
/*
261
 * return value:
262
 * true: error message is sent up
263
 * false: error message is masked
264
 *
265
 * 6.2.6 Error Message Control
266
 * Figure 6-3
267
 * root port part
268
 */
269
static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
270
{
271
    bool msg_sent;
272
    uint16_t cmd;
273
    uint8_t *aer_cap;
274
    uint32_t root_cmd;
275
    uint32_t root_status;
276
    bool msi_trigger;
277

    
278
    msg_sent = false;
279
    cmd = pci_get_word(dev->config + PCI_COMMAND);
280
    aer_cap = dev->config + dev->exp.aer_cap;
281
    root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
282
    root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
283
    msi_trigger = false;
284

    
285
    if (cmd & PCI_COMMAND_SERR) {
286
        /* System Error.
287
         *
288
         * The way to report System Error is platform specific and
289
         * it isn't implemented in qemu right now.
290
         * So just discard the error for now.
291
         * OS which cares of aer would receive errors via
292
         * native aer mechanims, so this wouldn't matter.
293
         */
294
    }
295

    
296
    /* Errro Message Received: Root Error Status register */
297
    switch (msg->severity) {
298
    case PCI_ERR_ROOT_CMD_COR_EN:
299
        if (root_status & PCI_ERR_ROOT_COR_RCV) {
300
            root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
301
        } else {
302
            if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) {
303
                msi_trigger = true;
304
            }
305
            pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id);
306
        }
307
        root_status |= PCI_ERR_ROOT_COR_RCV;
308
        break;
309
    case PCI_ERR_ROOT_CMD_NONFATAL_EN:
310
        if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) &&
311
            root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) {
312
            msi_trigger = true;
313
        }
314
        root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
315
        break;
316
    case PCI_ERR_ROOT_CMD_FATAL_EN:
317
        if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) &&
318
            root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) {
319
            msi_trigger = true;
320
        }
321
        if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
322
            root_status |= PCI_ERR_ROOT_FIRST_FATAL;
323
        }
324
        root_status |= PCI_ERR_ROOT_FATAL_RCV;
325
        break;
326
    default:
327
        abort();
328
        break;
329
    }
330
    if (pcie_aer_msg_is_uncor(msg)) {
331
        if (root_status & PCI_ERR_ROOT_UNCOR_RCV) {
332
            root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
333
        } else {
334
            pci_set_word(aer_cap + PCI_ERR_ROOT_SRC, msg->source_id);
335
        }
336
        root_status |= PCI_ERR_ROOT_UNCOR_RCV;
337
    }
338
    pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
339

    
340
    if (root_cmd & msg->severity) {
341
        /* 6.2.4.1.2 Interrupt Generation */
342
        if (pci_msi_enabled(dev)) {
343
            if (msi_trigger) {
344
                pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
345
            }
346
        } else {
347
            qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
348
        }
349
        msg_sent = true;
350
    }
351
    return msg_sent;
352
}
353

    
354
/*
355
 * 6.2.6 Error Message Control Figure 6-3
356
 *
357
 * Walk up the bus tree from the device, propagate the error message.
358
 */
359
static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg)
360
{
361
    uint8_t type;
362

    
363
    while (dev) {
364
        if (!pci_is_express(dev)) {
365
            /* just ignore it */
366
            /* TODO: Shouldn't we set PCI_STATUS_SIG_SYSTEM_ERROR?
367
             * Consider e.g. a PCI bridge above a PCI Express device. */
368
            return;
369
        }
370

    
371
        type = pcie_cap_get_type(dev);
372
        if ((type == PCI_EXP_TYPE_ROOT_PORT ||
373
            type == PCI_EXP_TYPE_UPSTREAM ||
374
            type == PCI_EXP_TYPE_DOWNSTREAM) &&
375
            !pcie_aer_msg_vbridge(dev, msg)) {
376
                return;
377
        }
378
        if (!pcie_aer_msg_alldev(dev, msg)) {
379
            return;
380
        }
381
        if (type == PCI_EXP_TYPE_ROOT_PORT) {
382
            pcie_aer_msg_root_port(dev, msg);
383
            /* Root port can notify system itself,
384
               or send the error message to root complex event collector. */
385
            /*
386
             * if root port is associated with an event collector,
387
             * return the root complex event collector here.
388
             * For now root complex event collector isn't supported.
389
             */
390
            return;
391
        }
392
        dev = pci_bridge_get_device(dev->bus);
393
    }
394
}
395

    
396
static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err)
397
{
398
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
399
    uint8_t first_bit = ffsl(err->status) - 1;
400
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
401
    int i;
402

    
403
    assert(err->status);
404
    assert(err->status & (err->status - 1));
405

    
406
    errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
407
    errcap |= PCI_ERR_CAP_FEP(first_bit);
408

    
409
    if (err->flags & PCIE_AER_ERR_HEADER_VALID) {
410
        for (i = 0; i < ARRAY_SIZE(err->header); ++i) {
411
            /* 7.10.8 Header Log Register */
412
            uint8_t *header_log =
413
                aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0];
414
            cpu_to_be32wu((uint32_t*)header_log, err->header[i]);
415
        }
416
    } else {
417
        assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT));
418
        memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
419
    }
420

    
421
    if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) &&
422
        (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) &
423
         PCI_EXP_DEVCAP2_EETLPP)) {
424
        for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) {
425
            /* 7.10.12 tlp prefix log register */
426
            uint8_t *prefix_log =
427
                aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0];
428
            cpu_to_be32wu((uint32_t*)prefix_log, err->prefix[i]);
429
        }
430
        errcap |= PCI_ERR_CAP_TLP;
431
    } else {
432
        memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0,
433
               PCI_ERR_TLP_PREFIX_LOG_SIZE);
434
    }
435
    pci_set_long(aer_cap + PCI_ERR_CAP, errcap);
436
}
437

    
438
static void pcie_aer_clear_log(PCIDevice *dev)
439
{
440
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
441

    
442
    pci_long_test_and_clear_mask(aer_cap + PCI_ERR_CAP,
443
                                 PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
444

    
445
    memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
446
    memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE);
447
}
448

    
449
static void pcie_aer_clear_error(PCIDevice *dev)
450
{
451
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
452
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
453
    PCIEAERLog *aer_log = &dev->exp.aer_log;
454
    PCIEAERErr err;
455

    
456
    if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) {
457
        pcie_aer_clear_log(dev);
458
        return;
459
    }
460

    
461
    /*
462
     * If more errors are queued, set corresponding bits in uncorrectable
463
     * error status.
464
     * We emulate uncorrectable error status register as W1CS.
465
     * So set bit in uncorrectable error status here again for multiple
466
     * error recording support.
467
     *
468
     * 6.2.4.2 Multiple Error Handling(Advanced Error Reporting Capability)
469
     */
470
    pcie_aer_update_uncor_status(dev);
471

    
472
    aer_log_del_err(aer_log, &err);
473
    pcie_aer_update_log(dev, &err);
474
}
475

    
476
static int pcie_aer_record_error(PCIDevice *dev,
477
                                 const PCIEAERErr *err)
478
{
479
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
480
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
481
    int fep = PCI_ERR_CAP_FEP(errcap);
482

    
483
    assert(err->status);
484
    assert(err->status & (err->status - 1));
485

    
486
    if (errcap & PCI_ERR_CAP_MHRE &&
487
        (pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) {
488
        /*  Not first error. queue error */
489
        if (aer_log_add_err(&dev->exp.aer_log, err) < 0) {
490
            /* overflow */
491
            return -1;
492
        }
493
        return 0;
494
    }
495

    
496
    pcie_aer_update_log(dev, err);
497
    return 0;
498
}
499

    
500
typedef struct PCIEAERInject {
501
    PCIDevice *dev;
502
    uint8_t *aer_cap;
503
    const PCIEAERErr *err;
504
    uint16_t devctl;
505
    uint16_t devsta;
506
    uint32_t error_status;
507
    bool unsupported_request;
508
    bool log_overflow;
509
    PCIEAERMsg msg;
510
} PCIEAERInject;
511

    
512
static bool pcie_aer_inject_cor_error(PCIEAERInject *inj,
513
                                      uint32_t uncor_status,
514
                                      bool is_advisory_nonfatal)
515
{
516
    PCIDevice *dev = inj->dev;
517

    
518
    inj->devsta |= PCI_EXP_DEVSTA_CED;
519
    if (inj->unsupported_request) {
520
        inj->devsta |= PCI_EXP_DEVSTA_URD;
521
    }
522
    pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
523

    
524
    if (inj->aer_cap) {
525
        uint32_t mask;
526
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS,
527
                                   inj->error_status);
528
        mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK);
529
        if (mask & inj->error_status) {
530
            return false;
531
        }
532
        if (is_advisory_nonfatal) {
533
            uint32_t uncor_mask =
534
                pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
535
            if (!(uncor_mask & uncor_status)) {
536
                inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
537
            }
538
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
539
                                       uncor_status);
540
        }
541
    }
542

    
543
    if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) {
544
        return false;
545
    }
546
    if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) {
547
        return false;
548
    }
549

    
550
    inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN;
551
    return true;
552
}
553

    
554
static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal)
555
{
556
    PCIDevice *dev = inj->dev;
557
    uint16_t cmd;
558

    
559
    if (is_fatal) {
560
        inj->devsta |= PCI_EXP_DEVSTA_FED;
561
    } else {
562
        inj->devsta |= PCI_EXP_DEVSTA_NFED;
563
    }
564
    if (inj->unsupported_request) {
565
        inj->devsta |= PCI_EXP_DEVSTA_URD;
566
    }
567
    pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
568

    
569
    if (inj->aer_cap) {
570
        uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
571
        if (mask & inj->error_status) {
572
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
573
                                       inj->error_status);
574
            return false;
575
        }
576

    
577
        inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
578
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
579
                                   inj->error_status);
580
    }
581

    
582
    cmd = pci_get_word(dev->config + PCI_COMMAND);
583
    if (inj->unsupported_request &&
584
        !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) {
585
        return false;
586
    }
587
    if (is_fatal) {
588
        if (!((cmd & PCI_COMMAND_SERR) ||
589
              (inj->devctl & PCI_EXP_DEVCTL_FERE))) {
590
            return false;
591
        }
592
        inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN;
593
    } else {
594
        if (!((cmd & PCI_COMMAND_SERR) ||
595
              (inj->devctl & PCI_EXP_DEVCTL_NFERE))) {
596
            return false;
597
        }
598
        inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN;
599
    }
600
    return true;
601
}
602

    
603
/*
604
 * non-Function specific error must be recorded in all functions.
605
 * It is the responsibility of the caller of this function.
606
 * It is also caller's responsiblity to determine which function should
607
 * report the rerror.
608
 *
609
 * 6.2.4 Error Logging
610
 * 6.2.5 Sqeunce of Device Error Signaling and Logging Operations
611
 * table 6-2: Flowchard Showing Sequence of Device Error Signaling and Logging
612
 *            Operations
613
 */
614
int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err)
615
{
616
    uint8_t *aer_cap = NULL;
617
    uint16_t devctl = 0;
618
    uint16_t devsta = 0;
619
    uint32_t error_status = err->status;
620
    PCIEAERInject inj;
621

    
622
    if (!pci_is_express(dev)) {
623
        return -ENOSYS;
624
    }
625

    
626
    if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
627
        error_status &= PCI_ERR_COR_SUPPORTED;
628
    } else {
629
        error_status &= PCI_ERR_UNC_SUPPORTED;
630
    }
631

    
632
    /* invalid status bit. one and only one bit must be set */
633
    if (!error_status || (error_status & (error_status - 1))) {
634
        return -EINVAL;
635
    }
636

    
637
    if (dev->exp.aer_cap) {
638
        uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
639
        aer_cap = dev->config + dev->exp.aer_cap;
640
        devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL);
641
        devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA);
642
    }
643

    
644
    inj.dev = dev;
645
    inj.aer_cap = aer_cap;
646
    inj.err = err;
647
    inj.devctl = devctl;
648
    inj.devsta = devsta;
649
    inj.error_status = error_status;
650
    inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) &&
651
        err->status == PCI_ERR_UNC_UNSUP;
652
    inj.log_overflow = false;
653

    
654
    if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
655
        if (!pcie_aer_inject_cor_error(&inj, 0, false)) {
656
            return 0;
657
        }
658
    } else {
659
        bool is_fatal =
660
            pcie_aer_uncor_default_severity(error_status) ==
661
            PCI_ERR_ROOT_CMD_FATAL_EN;
662
        if (aer_cap) {
663
            is_fatal =
664
                error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER);
665
        }
666
        if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) {
667
            inj.error_status = PCI_ERR_COR_ADV_NONFATAL;
668
            if (!pcie_aer_inject_cor_error(&inj, error_status, true)) {
669
                return 0;
670
            }
671
        } else {
672
            if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) {
673
                return 0;
674
            }
675
        }
676
    }
677

    
678
    /* send up error message */
679
    inj.msg.source_id = err->source_id;
680
    pcie_aer_msg(dev, &inj.msg);
681

    
682
    if (inj.log_overflow) {
683
        PCIEAERErr header_log_overflow = {
684
            .status = PCI_ERR_COR_HL_OVERFLOW,
685
            .flags = PCIE_AER_ERR_IS_CORRECTABLE,
686
        };
687
        int ret = pcie_aer_inject_error(dev, &header_log_overflow);
688
        assert(!ret);
689
    }
690
    return 0;
691
}
692

    
693
void pcie_aer_write_config(PCIDevice *dev,
694
                           uint32_t addr, uint32_t val, int len)
695
{
696
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
697
    uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
698
    uint32_t first_error = 1U << PCI_ERR_CAP_FEP(errcap);
699
    uint32_t uncorsta = pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS);
700

    
701
    /* uncorrectable error */
702
    if (!(uncorsta & first_error)) {
703
        /* the bit that corresponds to the first error is cleared */
704
        pcie_aer_clear_error(dev);
705
    } else if (errcap & PCI_ERR_CAP_MHRE) {
706
        /* When PCI_ERR_CAP_MHRE is enabled and the first error isn't cleared
707
         * nothing should happen. So we have to revert the modification to
708
         * the register.
709
         */
710
        pcie_aer_update_uncor_status(dev);
711
    } else {
712
        /* capability & control
713
         * PCI_ERR_CAP_MHRE might be cleared, so clear of header log.
714
         */
715
        aer_log_clear_all_err(&dev->exp.aer_log);
716
    }
717
}
718

    
719
void pcie_aer_root_init(PCIDevice *dev)
720
{
721
    uint16_t pos = dev->exp.aer_cap;
722

    
723
    pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND,
724
                 PCI_ERR_ROOT_CMD_EN_MASK);
725
    pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS,
726
                 PCI_ERR_ROOT_STATUS_REPORT_MASK);
727
}
728

    
729
void pcie_aer_root_reset(PCIDevice *dev)
730
{
731
    uint8_t* aer_cap = dev->config + dev->exp.aer_cap;
732

    
733
    pci_set_long(aer_cap + PCI_ERR_ROOT_COMMAND, 0);
734

    
735
    /*
736
     * Advanced Error Interrupt Message Number in Root Error Status Register
737
     * must be updated by chip dependent code because it's chip dependent
738
     * which number is used.
739
     */
740
}
741

    
742
static bool pcie_aer_root_does_trigger(uint32_t cmd, uint32_t status)
743
{
744
    return
745
        ((cmd & PCI_ERR_ROOT_CMD_COR_EN) && (status & PCI_ERR_ROOT_COR_RCV)) ||
746
        ((cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
747
         (status & PCI_ERR_ROOT_NONFATAL_RCV)) ||
748
        ((cmd & PCI_ERR_ROOT_CMD_FATAL_EN) &&
749
         (status & PCI_ERR_ROOT_FATAL_RCV));
750
}
751

    
752
void pcie_aer_root_write_config(PCIDevice *dev,
753
                                uint32_t addr, uint32_t val, int len,
754
                                uint32_t root_cmd_prev)
755
{
756
    uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
757

    
758
    /* root command register */
759
    uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
760
    if (root_cmd & PCI_ERR_ROOT_CMD_EN_MASK) {
761
        /* 6.2.4.1.2 Interrupt Generation */
762

    
763
        /* 0 -> 1 */
764
        uint32_t root_cmd_set = (root_cmd_prev ^ root_cmd) & root_cmd;
765
        uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
766

    
767
        if (pci_msi_enabled(dev)) {
768
            if (pcie_aer_root_does_trigger(root_cmd_set, root_status)) {
769
                pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
770
            }
771
        } else {
772
            int int_level = pcie_aer_root_does_trigger(root_cmd, root_status);
773
            qemu_set_irq(dev->irq[dev->exp.aer_intx], int_level);
774
        }
775
    }
776
}
777

    
778
static const VMStateDescription vmstate_pcie_aer_err = {
779
    .name = "PCIE_AER_ERROR",
780
    .version_id = 1,
781
    .minimum_version_id = 1,
782
    .minimum_version_id_old = 1,
783
    .fields     = (VMStateField[]) {
784
        VMSTATE_UINT32(status, PCIEAERErr),
785
        VMSTATE_UINT16(source_id, PCIEAERErr),
786
        VMSTATE_UINT16(flags, PCIEAERErr),
787
        VMSTATE_UINT32_ARRAY(header, PCIEAERErr, 4),
788
        VMSTATE_UINT32_ARRAY(prefix, PCIEAERErr, 4),
789
        VMSTATE_END_OF_LIST()
790
    }
791
};
792

    
793
#define VMSTATE_PCIE_AER_ERRS(_field, _state, _field_num, _vmsd, _type) { \
794
    .name       = (stringify(_field)),                                    \
795
    .version_id = 0,                                                      \
796
    .num_offset = vmstate_offset_value(_state, _field_num, uint16_t),     \
797
    .size       = sizeof(_type),                                          \
798
    .vmsd       = &(_vmsd),                                               \
799
    .flags      = VMS_POINTER | VMS_VARRAY_UINT16 | VMS_STRUCT,           \
800
    .offset     = vmstate_offset_pointer(_state, _field, _type),          \
801
}
802

    
803
const VMStateDescription vmstate_pcie_aer_log = {
804
    .name = "PCIE_AER_ERROR_LOG",
805
    .version_id = 1,
806
    .minimum_version_id = 1,
807
    .minimum_version_id_old = 1,
808
    .fields     = (VMStateField[]) {
809
        VMSTATE_UINT16(log_num, PCIEAERLog),
810
        VMSTATE_UINT16(log_max, PCIEAERLog),
811
        VMSTATE_PCIE_AER_ERRS(log, PCIEAERLog, log_num,
812
                              vmstate_pcie_aer_err, PCIEAERErr),
813
        VMSTATE_END_OF_LIST()
814
    }
815
};