1 /*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
42 #include <asm/mce.h>
43
44 #include "edac_mc.h"
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
47
48 #define EDAC_MOD_STR "pnd2_edac"
49
50 #define APL_NUM_CHANNELS 4
51 #define DNV_NUM_CHANNELS 2
52 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
53
54 enum type {
55 APL,
56 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
57 };
58
59 struct dram_addr {
60 int chan;
61 int dimm;
62 int rank;
63 int bank;
64 int row;
65 int col;
66 };
67
68 struct pnd2_pvt {
69 int dimm_geom[APL_NUM_CHANNELS];
70 u64 tolm, tohm;
71 };
72
73 /*
74 * System address space is divided into multiple regions with
75 * different interleave rules in each. The as0/as1 regions
76 * have no interleaving at all. The as2 region is interleaved
77 * between two channels. The mot region is magic and may overlap
78 * other regions, with its interleave rules taking precedence.
79 * Addresses not in any of these regions are interleaved across
80 * all four channels.
81 */
82 static struct region {
83 u64 base;
84 u64 limit;
85 u8 enabled;
86 } mot, as0, as1, as2;
87
88 static struct dunit_ops {
89 char *name;
90 enum type type;
91 int pmiaddr_shift;
92 int pmiidx_shift;
93 int channels;
94 int dimms_per_channel;
95 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
96 int (*get_registers)(void);
97 int (*check_ecc)(void);
98 void (*mk_region)(char *name, struct region *rp, void *asym);
99 void (*get_dimm_config)(struct mem_ctl_info *mci);
100 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
101 struct dram_addr *daddr, char *msg);
102 } *ops;
103
104 static struct mem_ctl_info *pnd2_mci;
105
106 #define PND2_MSG_SIZE 256
107
108 /* Debug macros */
109 #define pnd2_printk(level, fmt, arg...) \
110 edac_printk(level, "pnd2", fmt, ##arg)
111
112 #define pnd2_mc_printk(mci, level, fmt, arg...) \
113 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
114
115 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
116 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
117 #define SELECTOR_DISABLED (-1)
118 #define _4GB (1ul << 32)
119
120 #define PMI_ADDRESS_WIDTH 31
121 #define PND_MAX_PHYS_BIT 39
122
123 #define APL_ASYMSHIFT 28
124 #define DNV_ASYMSHIFT 31
125 #define CH_HASH_MASK_LSB 6
126 #define SLICE_HASH_MASK_LSB 6
127 #define MOT_SLC_INTLV_BIT 12
128 #define LOG2_PMI_ADDR_GRANULARITY 5
129 #define MOT_SHIFT 24
130
131 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
132 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
133
134 /*
135 * On Apollo Lake we access memory controller registers via a
136 * side-band mailbox style interface in a hidden PCI device
137 * configuration space.
138 */
139 static struct pci_bus *p2sb_bus;
140 #define P2SB_DEVFN PCI_DEVFN(0xd, 0)
141 #define P2SB_ADDR_OFF 0xd0
142 #define P2SB_DATA_OFF 0xd4
143 #define P2SB_STAT_OFF 0xd8
144 #define P2SB_ROUT_OFF 0xda
145 #define P2SB_EADD_OFF 0xdc
146 #define P2SB_HIDE_OFF 0xe1
147
148 #define P2SB_BUSY 1
149
150 #define P2SB_READ(size, off, ptr) \
151 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
152 #define P2SB_WRITE(size, off, val) \
153 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
154
p2sb_is_busy(u16 * status)155 static bool p2sb_is_busy(u16 *status)
156 {
157 P2SB_READ(word, P2SB_STAT_OFF, status);
158
159 return !!(*status & P2SB_BUSY);
160 }
161
_apl_rd_reg(int port,int off,int op,u32 * data)162 static int _apl_rd_reg(int port, int off, int op, u32 *data)
163 {
164 int retries = 0xff, ret;
165 u16 status;
166 u8 hidden;
167
168 /* Unhide the P2SB device, if it's hidden */
169 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
170 if (hidden)
171 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
172
173 if (p2sb_is_busy(&status)) {
174 ret = -EAGAIN;
175 goto out;
176 }
177
178 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
179 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
180 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
181 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
182 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
183
184 while (p2sb_is_busy(&status)) {
185 if (retries-- == 0) {
186 ret = -EBUSY;
187 goto out;
188 }
189 }
190
191 P2SB_READ(dword, P2SB_DATA_OFF, data);
192 ret = (status >> 1) & 0x3;
193 out:
194 /* Hide the P2SB device, if it was hidden before */
195 if (hidden)
196 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
197
198 return ret;
199 }
200
apl_rd_reg(int port,int off,int op,void * data,size_t sz,char * name)201 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
202 {
203 int ret = 0;
204
205 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
206 switch (sz) {
207 case 8:
208 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
209 /* fall through */
210 case 4:
211 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
212 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
213 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
214 break;
215 }
216
217 return ret;
218 }
219
get_mem_ctrl_hub_base_addr(void)220 static u64 get_mem_ctrl_hub_base_addr(void)
221 {
222 struct b_cr_mchbar_lo_pci lo;
223 struct b_cr_mchbar_hi_pci hi;
224 struct pci_dev *pdev;
225
226 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
227 if (pdev) {
228 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
229 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
230 pci_dev_put(pdev);
231 } else {
232 return 0;
233 }
234
235 if (!lo.enable) {
236 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
237 return 0;
238 }
239
240 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
241 }
242
get_sideband_reg_base_addr(void)243 static u64 get_sideband_reg_base_addr(void)
244 {
245 struct pci_dev *pdev;
246 u32 hi, lo;
247 u8 hidden;
248
249 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
250 if (pdev) {
251 /* Unhide the P2SB device, if it's hidden */
252 pci_read_config_byte(pdev, 0xe1, &hidden);
253 if (hidden)
254 pci_write_config_byte(pdev, 0xe1, 0);
255
256 pci_read_config_dword(pdev, 0x10, &lo);
257 pci_read_config_dword(pdev, 0x14, &hi);
258 lo &= 0xfffffff0;
259
260 /* Hide the P2SB device, if it was hidden before */
261 if (hidden)
262 pci_write_config_byte(pdev, 0xe1, hidden);
263
264 pci_dev_put(pdev);
265 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
266 } else {
267 return 0xfd000000;
268 }
269 }
270
271 #define DNV_MCHBAR_SIZE 0x8000
272 #define DNV_SB_PORT_SIZE 0x10000
dnv_rd_reg(int port,int off,int op,void * data,size_t sz,char * name)273 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
274 {
275 struct pci_dev *pdev;
276 char *base;
277 u64 addr;
278 unsigned long size;
279
280 if (op == 4) {
281 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
282 if (!pdev)
283 return -ENODEV;
284
285 pci_read_config_dword(pdev, off, data);
286 pci_dev_put(pdev);
287 } else {
288 /* MMIO via memory controller hub base address */
289 if (op == 0 && port == 0x4c) {
290 addr = get_mem_ctrl_hub_base_addr();
291 if (!addr)
292 return -ENODEV;
293 size = DNV_MCHBAR_SIZE;
294 } else {
295 /* MMIO via sideband register base address */
296 addr = get_sideband_reg_base_addr();
297 if (!addr)
298 return -ENODEV;
299 addr += (port << 16);
300 size = DNV_SB_PORT_SIZE;
301 }
302
303 base = ioremap((resource_size_t)addr, size);
304 if (!base)
305 return -ENODEV;
306
307 if (sz == 8)
308 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
309 *(u32 *)data = *(u32 *)(base + off);
310
311 iounmap(base);
312 }
313
314 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
315 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
316
317 return 0;
318 }
319
320 #define RD_REGP(regp, regname, port) \
321 ops->rd_reg(port, \
322 regname##_offset, \
323 regname##_r_opcode, \
324 regp, sizeof(struct regname), \
325 #regname)
326
327 #define RD_REG(regp, regname) \
328 ops->rd_reg(regname ## _port, \
329 regname##_offset, \
330 regname##_r_opcode, \
331 regp, sizeof(struct regname), \
332 #regname)
333
334 static u64 top_lm, top_hm;
335 static bool two_slices;
336 static bool two_channels; /* Both PMI channels in one slice enabled */
337
338 static u8 sym_chan_mask;
339 static u8 asym_chan_mask;
340 static u8 chan_mask;
341
342 static int slice_selector = -1;
343 static int chan_selector = -1;
344 static u64 slice_hash_mask;
345 static u64 chan_hash_mask;
346
mk_region(char * name,struct region * rp,u64 base,u64 limit)347 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
348 {
349 rp->enabled = 1;
350 rp->base = base;
351 rp->limit = limit;
352 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
353 }
354
mk_region_mask(char * name,struct region * rp,u64 base,u64 mask)355 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
356 {
357 if (mask == 0) {
358 pr_info(FW_BUG "MOT mask cannot be zero\n");
359 return;
360 }
361 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
362 pr_info(FW_BUG "MOT mask not power of two\n");
363 return;
364 }
365 if (base & ~mask) {
366 pr_info(FW_BUG "MOT region base/mask alignment error\n");
367 return;
368 }
369 rp->base = base;
370 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
371 rp->enabled = 1;
372 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
373 }
374
in_region(struct region * rp,u64 addr)375 static bool in_region(struct region *rp, u64 addr)
376 {
377 if (!rp->enabled)
378 return false;
379
380 return rp->base <= addr && addr <= rp->limit;
381 }
382
gen_sym_mask(struct b_cr_slice_channel_hash * p)383 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
384 {
385 int mask = 0;
386
387 if (!p->slice_0_mem_disabled)
388 mask |= p->sym_slice0_channel_enabled;
389
390 if (!p->slice_1_disabled)
391 mask |= p->sym_slice1_channel_enabled << 2;
392
393 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
394 mask &= 0x5;
395
396 return mask;
397 }
398
gen_asym_mask(struct b_cr_slice_channel_hash * p,struct b_cr_asym_mem_region0_mchbar * as0,struct b_cr_asym_mem_region1_mchbar * as1,struct b_cr_asym_2way_mem_region_mchbar * as2way)399 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
400 struct b_cr_asym_mem_region0_mchbar *as0,
401 struct b_cr_asym_mem_region1_mchbar *as1,
402 struct b_cr_asym_2way_mem_region_mchbar *as2way)
403 {
404 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
405 int mask = 0;
406
407 if (as2way->asym_2way_interleave_enable)
408 mask = intlv[as2way->asym_2way_intlv_mode];
409 if (as0->slice0_asym_enable)
410 mask |= (1 << as0->slice0_asym_channel_select);
411 if (as1->slice1_asym_enable)
412 mask |= (4 << as1->slice1_asym_channel_select);
413 if (p->slice_0_mem_disabled)
414 mask &= 0xc;
415 if (p->slice_1_disabled)
416 mask &= 0x3;
417 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
418 mask &= 0x5;
419
420 return mask;
421 }
422
423 static struct b_cr_tolud_pci tolud;
424 static struct b_cr_touud_lo_pci touud_lo;
425 static struct b_cr_touud_hi_pci touud_hi;
426 static struct b_cr_asym_mem_region0_mchbar asym0;
427 static struct b_cr_asym_mem_region1_mchbar asym1;
428 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
429 static struct b_cr_mot_out_base_mchbar mot_base;
430 static struct b_cr_mot_out_mask_mchbar mot_mask;
431 static struct b_cr_slice_channel_hash chash;
432
433 /* Apollo Lake dunit */
434 /*
435 * Validated on board with just two DIMMs in the [0] and [2] positions
436 * in this array. Other port number matches documentation, but caution
437 * advised.
438 */
439 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
440 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
441
442 /* Denverton dunit */
443 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
444 static struct d_cr_dsch dsch;
445 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
446 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
447 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
448 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
449 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
450 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
451 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
452 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
453
apl_mk_region(char * name,struct region * rp,void * asym)454 static void apl_mk_region(char *name, struct region *rp, void *asym)
455 {
456 struct b_cr_asym_mem_region0_mchbar *a = asym;
457
458 mk_region(name, rp,
459 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
460 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
461 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
462 }
463
dnv_mk_region(char * name,struct region * rp,void * asym)464 static void dnv_mk_region(char *name, struct region *rp, void *asym)
465 {
466 struct b_cr_asym_mem_region_denverton *a = asym;
467
468 mk_region(name, rp,
469 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
470 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
471 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
472 }
473
apl_get_registers(void)474 static int apl_get_registers(void)
475 {
476 int ret = -ENODEV;
477 int i;
478
479 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
480 return -ENODEV;
481
482 /*
483 * RD_REGP() will fail for unpopulated or non-existent
484 * DIMM slots. Return success if we find at least one DIMM.
485 */
486 for (i = 0; i < APL_NUM_CHANNELS; i++)
487 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
488 ret = 0;
489
490 return ret;
491 }
492
dnv_get_registers(void)493 static int dnv_get_registers(void)
494 {
495 int i;
496
497 if (RD_REG(&dsch, d_cr_dsch))
498 return -ENODEV;
499
500 for (i = 0; i < DNV_NUM_CHANNELS; i++)
501 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
502 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
503 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
504 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
505 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
506 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
507 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
508 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
509 return -ENODEV;
510
511 return 0;
512 }
513
514 /*
515 * Read all the h/w config registers once here (they don't
516 * change at run time. Figure out which address ranges have
517 * which interleave characteristics.
518 */
get_registers(void)519 static int get_registers(void)
520 {
521 const int intlv[] = { 10, 11, 12, 12 };
522
523 if (RD_REG(&tolud, b_cr_tolud_pci) ||
524 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
525 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
526 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
527 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
528 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
529 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
530 RD_REG(&chash, b_cr_slice_channel_hash))
531 return -ENODEV;
532
533 if (ops->get_registers())
534 return -ENODEV;
535
536 if (ops->type == DNV) {
537 /* PMI channel idx (always 0) for asymmetric region */
538 asym0.slice0_asym_channel_select = 0;
539 asym1.slice1_asym_channel_select = 0;
540 /* PMI channel bitmap (always 1) for symmetric region */
541 chash.sym_slice0_channel_enabled = 0x1;
542 chash.sym_slice1_channel_enabled = 0x1;
543 }
544
545 if (asym0.slice0_asym_enable)
546 ops->mk_region("as0", &as0, &asym0);
547
548 if (asym1.slice1_asym_enable)
549 ops->mk_region("as1", &as1, &asym1);
550
551 if (asym_2way.asym_2way_interleave_enable) {
552 mk_region("as2way", &as2,
553 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
554 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
555 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
556 }
557
558 if (mot_base.imr_en) {
559 mk_region_mask("mot", &mot,
560 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
561 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
562 }
563
564 top_lm = U64_LSHIFT(tolud.tolud, 20);
565 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
566
567 two_slices = !chash.slice_1_disabled &&
568 !chash.slice_0_mem_disabled &&
569 (chash.sym_slice0_channel_enabled != 0) &&
570 (chash.sym_slice1_channel_enabled != 0);
571 two_channels = !chash.ch_1_disabled &&
572 !chash.enable_pmi_dual_data_mode &&
573 ((chash.sym_slice0_channel_enabled == 3) ||
574 (chash.sym_slice1_channel_enabled == 3));
575
576 sym_chan_mask = gen_sym_mask(&chash);
577 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
578 chan_mask = sym_chan_mask | asym_chan_mask;
579
580 if (two_slices && !two_channels) {
581 if (chash.hvm_mode)
582 slice_selector = 29;
583 else
584 slice_selector = intlv[chash.interleave_mode];
585 } else if (!two_slices && two_channels) {
586 if (chash.hvm_mode)
587 chan_selector = 29;
588 else
589 chan_selector = intlv[chash.interleave_mode];
590 } else if (two_slices && two_channels) {
591 if (chash.hvm_mode) {
592 slice_selector = 29;
593 chan_selector = 30;
594 } else {
595 slice_selector = intlv[chash.interleave_mode];
596 chan_selector = intlv[chash.interleave_mode] + 1;
597 }
598 }
599
600 if (two_slices) {
601 if (!chash.hvm_mode)
602 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
603 if (!two_channels)
604 slice_hash_mask |= BIT_ULL(slice_selector);
605 }
606
607 if (two_channels) {
608 if (!chash.hvm_mode)
609 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
610 if (!two_slices)
611 chan_hash_mask |= BIT_ULL(chan_selector);
612 }
613
614 return 0;
615 }
616
617 /* Get a contiguous memory address (remove the MMIO gap) */
remove_mmio_gap(u64 sys)618 static u64 remove_mmio_gap(u64 sys)
619 {
620 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
621 }
622
623 /* Squeeze out one address bit, shift upper part down to fill gap */
remove_addr_bit(u64 * addr,int bitidx)624 static void remove_addr_bit(u64 *addr, int bitidx)
625 {
626 u64 mask;
627
628 if (bitidx == -1)
629 return;
630
631 mask = (1ull << bitidx) - 1;
632 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
633 }
634
635 /* XOR all the bits from addr specified in mask */
hash_by_mask(u64 addr,u64 mask)636 static int hash_by_mask(u64 addr, u64 mask)
637 {
638 u64 result = addr & mask;
639
640 result = (result >> 32) ^ result;
641 result = (result >> 16) ^ result;
642 result = (result >> 8) ^ result;
643 result = (result >> 4) ^ result;
644 result = (result >> 2) ^ result;
645 result = (result >> 1) ^ result;
646
647 return (int)result & 1;
648 }
649
650 /*
651 * First stage decode. Take the system address and figure out which
652 * second stage will deal with it based on interleave modes.
653 */
sys2pmi(const u64 addr,u32 * pmiidx,u64 * pmiaddr,char * msg)654 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
655 {
656 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
657 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
658 MOT_CHAN_INTLV_BIT_1SLC_2CH;
659 int slice_intlv_bit_rm = SELECTOR_DISABLED;
660 int chan_intlv_bit_rm = SELECTOR_DISABLED;
661 /* Determine if address is in the MOT region. */
662 bool mot_hit = in_region(&mot, addr);
663 /* Calculate the number of symmetric regions enabled. */
664 int sym_channels = hweight8(sym_chan_mask);
665
666 /*
667 * The amount we need to shift the asym base can be determined by the
668 * number of enabled symmetric channels.
669 * NOTE: This can only work because symmetric memory is not supposed
670 * to do a 3-way interleave.
671 */
672 int sym_chan_shift = sym_channels >> 1;
673
674 /* Give up if address is out of range, or in MMIO gap */
675 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
676 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
677 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
678 return -EINVAL;
679 }
680
681 /* Get a contiguous memory address (remove the MMIO gap) */
682 contig_addr = remove_mmio_gap(addr);
683
684 if (in_region(&as0, addr)) {
685 *pmiidx = asym0.slice0_asym_channel_select;
686
687 contig_base = remove_mmio_gap(as0.base);
688 contig_offset = contig_addr - contig_base;
689 contig_base_adj = (contig_base >> sym_chan_shift) *
690 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
691 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
692 } else if (in_region(&as1, addr)) {
693 *pmiidx = 2u + asym1.slice1_asym_channel_select;
694
695 contig_base = remove_mmio_gap(as1.base);
696 contig_offset = contig_addr - contig_base;
697 contig_base_adj = (contig_base >> sym_chan_shift) *
698 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
699 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
700 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
701 bool channel1;
702
703 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
704 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
705 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
706 hash_by_mask(contig_addr, chan_hash_mask);
707 *pmiidx |= (u32)channel1;
708
709 contig_base = remove_mmio_gap(as2.base);
710 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
711 contig_offset = contig_addr - contig_base;
712 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
713 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
714 } else {
715 /* Otherwise we're in normal, boring symmetric mode. */
716 *pmiidx = 0u;
717
718 if (two_slices) {
719 bool slice1;
720
721 if (mot_hit) {
722 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
723 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
724 } else {
725 slice_intlv_bit_rm = slice_selector;
726 slice1 = hash_by_mask(addr, slice_hash_mask);
727 }
728
729 *pmiidx = (u32)slice1 << 1;
730 }
731
732 if (two_channels) {
733 bool channel1;
734
735 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
736 MOT_CHAN_INTLV_BIT_1SLC_2CH;
737
738 if (mot_hit) {
739 chan_intlv_bit_rm = mot_intlv_bit;
740 channel1 = (addr >> mot_intlv_bit) & 1;
741 } else {
742 chan_intlv_bit_rm = chan_selector;
743 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
744 }
745
746 *pmiidx |= (u32)channel1;
747 }
748 }
749
750 /* Remove the chan_selector bit first */
751 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
752 /* Remove the slice bit (we remove it second because it must be lower */
753 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
754 *pmiaddr = contig_addr;
755
756 return 0;
757 }
758
759 /* Translate PMI address to memory (rank, row, bank, column) */
760 #define C(n) (0x10 | (n)) /* column */
761 #define B(n) (0x20 | (n)) /* bank */
762 #define R(n) (0x40 | (n)) /* row */
763 #define RS (0x80) /* rank */
764
765 /* addrdec values */
766 #define AMAP_1KB 0
767 #define AMAP_2KB 1
768 #define AMAP_4KB 2
769 #define AMAP_RSVD 3
770
771 /* dden values */
772 #define DEN_4Gb 0
773 #define DEN_8Gb 2
774
775 /* dwid values */
776 #define X8 0
777 #define X16 1
778
779 static struct dimm_geometry {
780 u8 addrdec;
781 u8 dden;
782 u8 dwid;
783 u8 rowbits, colbits;
784 u16 bits[PMI_ADDRESS_WIDTH];
785 } dimms[] = {
786 {
787 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
788 .rowbits = 15, .colbits = 10,
789 .bits = {
790 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
791 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
792 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
793 0, 0, 0, 0
794 }
795 },
796 {
797 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
798 .rowbits = 16, .colbits = 10,
799 .bits = {
800 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
801 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
802 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
803 R(15), 0, 0, 0
804 }
805 },
806 {
807 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
808 .rowbits = 16, .colbits = 10,
809 .bits = {
810 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
811 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
812 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
813 R(15), 0, 0, 0
814 }
815 },
816 {
817 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
818 .rowbits = 16, .colbits = 11,
819 .bits = {
820 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
821 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
822 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
823 R(14), R(15), 0, 0
824 }
825 },
826 {
827 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
828 .rowbits = 15, .colbits = 10,
829 .bits = {
830 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
831 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
832 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
833 0, 0, 0, 0
834 }
835 },
836 {
837 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
838 .rowbits = 16, .colbits = 10,
839 .bits = {
840 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
841 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
842 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
843 R(15), 0, 0, 0
844 }
845 },
846 {
847 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
848 .rowbits = 16, .colbits = 10,
849 .bits = {
850 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
851 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
852 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
853 R(15), 0, 0, 0
854 }
855 },
856 {
857 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
858 .rowbits = 16, .colbits = 11,
859 .bits = {
860 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
861 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
862 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
863 R(14), R(15), 0, 0
864 }
865 },
866 {
867 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
868 .rowbits = 15, .colbits = 10,
869 .bits = {
870 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
871 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
872 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
873 0, 0, 0, 0
874 }
875 },
876 {
877 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
878 .rowbits = 16, .colbits = 10,
879 .bits = {
880 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
881 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
882 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
883 R(15), 0, 0, 0
884 }
885 },
886 {
887 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
888 .rowbits = 16, .colbits = 10,
889 .bits = {
890 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
891 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
892 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
893 R(15), 0, 0, 0
894 }
895 },
896 {
897 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
898 .rowbits = 16, .colbits = 11,
899 .bits = {
900 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
901 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
902 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
903 R(14), R(15), 0, 0
904 }
905 }
906 };
907
bank_hash(u64 pmiaddr,int idx,int shft)908 static int bank_hash(u64 pmiaddr, int idx, int shft)
909 {
910 int bhash = 0;
911
912 switch (idx) {
913 case 0:
914 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
915 break;
916 case 1:
917 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
918 bhash ^= ((pmiaddr >> 22) & 1) << 1;
919 break;
920 case 2:
921 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
922 break;
923 }
924
925 return bhash;
926 }
927
rank_hash(u64 pmiaddr)928 static int rank_hash(u64 pmiaddr)
929 {
930 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
931 }
932
933 /* Second stage decode. Compute rank, bank, row & column. */
apl_pmi2mem(struct mem_ctl_info * mci,u64 pmiaddr,u32 pmiidx,struct dram_addr * daddr,char * msg)934 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
935 struct dram_addr *daddr, char *msg)
936 {
937 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
938 struct pnd2_pvt *pvt = mci->pvt_info;
939 int g = pvt->dimm_geom[pmiidx];
940 struct dimm_geometry *d = &dimms[g];
941 int column = 0, bank = 0, row = 0, rank = 0;
942 int i, idx, type, skiprs = 0;
943
944 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
945 int bit = (pmiaddr >> i) & 1;
946
947 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
948 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
949 return -EINVAL;
950 }
951
952 type = d->bits[i + skiprs] & ~0xf;
953 idx = d->bits[i + skiprs] & 0xf;
954
955 /*
956 * On single rank DIMMs ignore the rank select bit
957 * and shift remainder of "bits[]" down one place.
958 */
959 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
960 skiprs = 1;
961 type = d->bits[i + skiprs] & ~0xf;
962 idx = d->bits[i + skiprs] & 0xf;
963 }
964
965 switch (type) {
966 case C(0):
967 column |= (bit << idx);
968 break;
969 case B(0):
970 bank |= (bit << idx);
971 if (cr_drp0->bahen)
972 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
973 break;
974 case R(0):
975 row |= (bit << idx);
976 break;
977 case RS:
978 rank = bit;
979 if (cr_drp0->rsien)
980 rank ^= rank_hash(pmiaddr);
981 break;
982 default:
983 if (bit) {
984 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
985 return -EINVAL;
986 }
987 goto done;
988 }
989 }
990
991 done:
992 daddr->col = column;
993 daddr->bank = bank;
994 daddr->row = row;
995 daddr->rank = rank;
996 daddr->dimm = 0;
997
998 return 0;
999 }
1000
1001 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
1002 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
1003
dnv_pmi2mem(struct mem_ctl_info * mci,u64 pmiaddr,u32 pmiidx,struct dram_addr * daddr,char * msg)1004 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
1005 struct dram_addr *daddr, char *msg)
1006 {
1007 /* Rank 0 or 1 */
1008 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1009 /* Rank 2 or 3 */
1010 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1011
1012 /*
1013 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1014 * flip them if DIMM1 is larger than DIMM0.
1015 */
1016 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1017
1018 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1019 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1020 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1021 if (dsch.ddr4en)
1022 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1023 if (dmap1[pmiidx].bxor) {
1024 if (dsch.ddr4en) {
1025 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1026 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1027 if (dsch.chan_width == 0)
1028 /* 64/72 bit dram channel width */
1029 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1030 else
1031 /* 32/40 bit dram channel width */
1032 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1033 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1034 } else {
1035 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1036 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1037 if (dsch.chan_width == 0)
1038 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1039 else
1040 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1041 }
1042 }
1043
1044 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1045 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1046 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1047 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1048 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1049 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1050 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1051 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1052 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1053 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1054 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1055 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1056 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1057 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1058 if (dmap4[pmiidx].row14 != 31)
1059 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1060 if (dmap4[pmiidx].row15 != 31)
1061 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1062 if (dmap4[pmiidx].row16 != 31)
1063 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1064 if (dmap4[pmiidx].row17 != 31)
1065 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1066
1067 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1068 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1069 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1070 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1071 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1072 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1073 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1074 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1075 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1076
1077 return 0;
1078 }
1079
check_channel(int ch)1080 static int check_channel(int ch)
1081 {
1082 if (drp0[ch].dramtype != 0) {
1083 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1084 return 1;
1085 } else if (drp0[ch].eccen == 0) {
1086 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1087 return 1;
1088 }
1089 return 0;
1090 }
1091
apl_check_ecc_active(void)1092 static int apl_check_ecc_active(void)
1093 {
1094 int i, ret = 0;
1095
1096 /* Check dramtype and ECC mode for each present DIMM */
1097 for (i = 0; i < APL_NUM_CHANNELS; i++)
1098 if (chan_mask & BIT(i))
1099 ret += check_channel(i);
1100 return ret ? -EINVAL : 0;
1101 }
1102
1103 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1104
check_unit(int ch)1105 static int check_unit(int ch)
1106 {
1107 struct d_cr_drp *d = &drp[ch];
1108
1109 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1110 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1111 return 1;
1112 }
1113 return 0;
1114 }
1115
dnv_check_ecc_active(void)1116 static int dnv_check_ecc_active(void)
1117 {
1118 int i, ret = 0;
1119
1120 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1121 ret += check_unit(i);
1122 return ret ? -EINVAL : 0;
1123 }
1124
get_memory_error_data(struct mem_ctl_info * mci,u64 addr,struct dram_addr * daddr,char * msg)1125 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1126 struct dram_addr *daddr, char *msg)
1127 {
1128 u64 pmiaddr;
1129 u32 pmiidx;
1130 int ret;
1131
1132 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1133 if (ret)
1134 return ret;
1135
1136 pmiaddr >>= ops->pmiaddr_shift;
1137 /* pmi channel idx to dimm channel idx */
1138 pmiidx >>= ops->pmiidx_shift;
1139 daddr->chan = pmiidx;
1140
1141 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1142 if (ret)
1143 return ret;
1144
1145 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1146 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1147
1148 return 0;
1149 }
1150
pnd2_mce_output_error(struct mem_ctl_info * mci,const struct mce * m,struct dram_addr * daddr)1151 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1152 struct dram_addr *daddr)
1153 {
1154 enum hw_event_mc_err_type tp_event;
1155 char *optype, msg[PND2_MSG_SIZE];
1156 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1157 bool overflow = m->status & MCI_STATUS_OVER;
1158 bool uc_err = m->status & MCI_STATUS_UC;
1159 bool recov = m->status & MCI_STATUS_S;
1160 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1161 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1162 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1163 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1164 int rc;
1165
1166 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1167 HW_EVENT_ERR_CORRECTED;
1168
1169 /*
1170 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1171 * memory errors should fit in this mask:
1172 * 000f 0000 1mmm cccc (binary)
1173 * where:
1174 * f = Correction Report Filtering Bit. If 1, subsequent errors
1175 * won't be shown
1176 * mmm = error type
1177 * cccc = channel
1178 * If the mask doesn't match, report an error to the parsing logic
1179 */
1180 if (!((errcode & 0xef80) == 0x80)) {
1181 optype = "Can't parse: it is not a mem";
1182 } else {
1183 switch (optypenum) {
1184 case 0:
1185 optype = "generic undef request error";
1186 break;
1187 case 1:
1188 optype = "memory read error";
1189 break;
1190 case 2:
1191 optype = "memory write error";
1192 break;
1193 case 3:
1194 optype = "addr/cmd error";
1195 break;
1196 case 4:
1197 optype = "memory scrubbing error";
1198 break;
1199 default:
1200 optype = "reserved";
1201 break;
1202 }
1203 }
1204
1205 /* Only decode errors with an valid address (ADDRV) */
1206 if (!(m->status & MCI_STATUS_ADDRV))
1207 return;
1208
1209 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1210 if (rc)
1211 goto address_error;
1212
1213 snprintf(msg, sizeof(msg),
1214 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1215 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1216 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1217
1218 edac_dbg(0, "%s\n", msg);
1219
1220 /* Call the helper to output message */
1221 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1222 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1223
1224 return;
1225
1226 address_error:
1227 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1228 }
1229
apl_get_dimm_config(struct mem_ctl_info * mci)1230 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1231 {
1232 struct pnd2_pvt *pvt = mci->pvt_info;
1233 struct dimm_info *dimm;
1234 struct d_cr_drp0 *d;
1235 u64 capacity;
1236 int i, g;
1237
1238 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1239 if (!(chan_mask & BIT(i)))
1240 continue;
1241
1242 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1243 if (!dimm) {
1244 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1245 continue;
1246 }
1247
1248 d = &drp0[i];
1249 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1250 if (dimms[g].addrdec == d->addrdec &&
1251 dimms[g].dden == d->dden &&
1252 dimms[g].dwid == d->dwid)
1253 break;
1254
1255 if (g == ARRAY_SIZE(dimms)) {
1256 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1257 continue;
1258 }
1259
1260 pvt->dimm_geom[i] = g;
1261 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1262 (1ul << dimms[g].colbits);
1263 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1264 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1265 dimm->grain = 32;
1266 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1267 dimm->mtype = MEM_DDR3;
1268 dimm->edac_mode = EDAC_SECDED;
1269 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1270 }
1271 }
1272
1273 static const int dnv_dtypes[] = {
1274 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1275 };
1276
dnv_get_dimm_config(struct mem_ctl_info * mci)1277 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1278 {
1279 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1280 struct dimm_info *dimm;
1281 struct d_cr_drp *d;
1282 u64 capacity;
1283
1284 if (dsch.ddr4en) {
1285 memtype = MEM_DDR4;
1286 banks = 16;
1287 colbits = 10;
1288 } else {
1289 memtype = MEM_DDR3;
1290 banks = 8;
1291 }
1292
1293 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1294 if (dmap4[i].row14 == 31)
1295 rowbits = 14;
1296 else if (dmap4[i].row15 == 31)
1297 rowbits = 15;
1298 else if (dmap4[i].row16 == 31)
1299 rowbits = 16;
1300 else if (dmap4[i].row17 == 31)
1301 rowbits = 17;
1302 else
1303 rowbits = 18;
1304
1305 if (memtype == MEM_DDR3) {
1306 if (dmap1[i].ca11 != 0x3f)
1307 colbits = 12;
1308 else
1309 colbits = 10;
1310 }
1311
1312 d = &drp[i];
1313 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1314 ranks_of_dimm[0] = d->rken0 + d->rken1;
1315 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1316 ranks_of_dimm[1] = d->rken2 + d->rken3;
1317
1318 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1319 if (!ranks_of_dimm[j])
1320 continue;
1321
1322 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1323 if (!dimm) {
1324 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1325 continue;
1326 }
1327
1328 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1329 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1330 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1331 dimm->grain = 32;
1332 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1333 dimm->mtype = memtype;
1334 dimm->edac_mode = EDAC_SECDED;
1335 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1336 }
1337 }
1338 }
1339
pnd2_register_mci(struct mem_ctl_info ** ppmci)1340 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1341 {
1342 struct edac_mc_layer layers[2];
1343 struct mem_ctl_info *mci;
1344 struct pnd2_pvt *pvt;
1345 int rc;
1346
1347 rc = ops->check_ecc();
1348 if (rc < 0)
1349 return rc;
1350
1351 /* Allocate a new MC control structure */
1352 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1353 layers[0].size = ops->channels;
1354 layers[0].is_virt_csrow = false;
1355 layers[1].type = EDAC_MC_LAYER_SLOT;
1356 layers[1].size = ops->dimms_per_channel;
1357 layers[1].is_virt_csrow = true;
1358 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1359 if (!mci)
1360 return -ENOMEM;
1361
1362 pvt = mci->pvt_info;
1363 memset(pvt, 0, sizeof(*pvt));
1364
1365 mci->mod_name = EDAC_MOD_STR;
1366 mci->dev_name = ops->name;
1367 mci->ctl_name = "Pondicherry2";
1368
1369 /* Get dimm basic config and the memory layout */
1370 ops->get_dimm_config(mci);
1371
1372 if (edac_mc_add_mc(mci)) {
1373 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1374 edac_mc_free(mci);
1375 return -EINVAL;
1376 }
1377
1378 *ppmci = mci;
1379
1380 return 0;
1381 }
1382
pnd2_unregister_mci(struct mem_ctl_info * mci)1383 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1384 {
1385 if (unlikely(!mci || !mci->pvt_info)) {
1386 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1387 return;
1388 }
1389
1390 /* Remove MC sysfs nodes */
1391 edac_mc_del_mc(NULL);
1392 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1393 edac_mc_free(mci);
1394 }
1395
1396 /*
1397 * Callback function registered with core kernel mce code.
1398 * Called once for each logged error.
1399 */
pnd2_mce_check_error(struct notifier_block * nb,unsigned long val,void * data)1400 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1401 {
1402 struct mce *mce = (struct mce *)data;
1403 struct mem_ctl_info *mci;
1404 struct dram_addr daddr;
1405 char *type;
1406
1407 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1408 return NOTIFY_DONE;
1409
1410 mci = pnd2_mci;
1411 if (!mci)
1412 return NOTIFY_DONE;
1413
1414 /*
1415 * Just let mcelog handle it if the error is
1416 * outside the memory controller. A memory error
1417 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1418 * bit 12 has an special meaning.
1419 */
1420 if ((mce->status & 0xefff) >> 7 != 1)
1421 return NOTIFY_DONE;
1422
1423 if (mce->mcgstatus & MCG_STATUS_MCIP)
1424 type = "Exception";
1425 else
1426 type = "Event";
1427
1428 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1429 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1430 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1431 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1432 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1433 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1434 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1435 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1436
1437 pnd2_mce_output_error(mci, mce, &daddr);
1438
1439 /* Advice mcelog that the error were handled */
1440 return NOTIFY_STOP;
1441 }
1442
1443 static struct notifier_block pnd2_mce_dec = {
1444 .notifier_call = pnd2_mce_check_error,
1445 };
1446
1447 #ifdef CONFIG_EDAC_DEBUG
1448 /*
1449 * Write an address to this file to exercise the address decode
1450 * logic in this driver.
1451 */
1452 static u64 pnd2_fake_addr;
1453 #define PND2_BLOB_SIZE 1024
1454 static char pnd2_result[PND2_BLOB_SIZE];
1455 static struct dentry *pnd2_test;
1456 static struct debugfs_blob_wrapper pnd2_blob = {
1457 .data = pnd2_result,
1458 .size = 0
1459 };
1460
debugfs_u64_set(void * data,u64 val)1461 static int debugfs_u64_set(void *data, u64 val)
1462 {
1463 struct dram_addr daddr;
1464 struct mce m;
1465
1466 *(u64 *)data = val;
1467 m.mcgstatus = 0;
1468 /* ADDRV + MemRd + Unknown channel */
1469 m.status = MCI_STATUS_ADDRV + 0x9f;
1470 m.addr = val;
1471 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1472 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1473 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1474 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1475 pnd2_blob.size = strlen(pnd2_blob.data);
1476
1477 return 0;
1478 }
1479 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1480
setup_pnd2_debug(void)1481 static void setup_pnd2_debug(void)
1482 {
1483 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1484 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1485 &pnd2_fake_addr, &fops_u64_wo);
1486 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1487 }
1488
teardown_pnd2_debug(void)1489 static void teardown_pnd2_debug(void)
1490 {
1491 debugfs_remove_recursive(pnd2_test);
1492 }
1493 #else
setup_pnd2_debug(void)1494 static void setup_pnd2_debug(void) {}
teardown_pnd2_debug(void)1495 static void teardown_pnd2_debug(void) {}
1496 #endif /* CONFIG_EDAC_DEBUG */
1497
1498
pnd2_probe(void)1499 static int pnd2_probe(void)
1500 {
1501 int rc;
1502
1503 edac_dbg(2, "\n");
1504 rc = get_registers();
1505 if (rc)
1506 return rc;
1507
1508 return pnd2_register_mci(&pnd2_mci);
1509 }
1510
pnd2_remove(void)1511 static void pnd2_remove(void)
1512 {
1513 edac_dbg(0, "\n");
1514 pnd2_unregister_mci(pnd2_mci);
1515 }
1516
1517 static struct dunit_ops apl_ops = {
1518 .name = "pnd2/apl",
1519 .type = APL,
1520 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1521 .pmiidx_shift = 0,
1522 .channels = APL_NUM_CHANNELS,
1523 .dimms_per_channel = 1,
1524 .rd_reg = apl_rd_reg,
1525 .get_registers = apl_get_registers,
1526 .check_ecc = apl_check_ecc_active,
1527 .mk_region = apl_mk_region,
1528 .get_dimm_config = apl_get_dimm_config,
1529 .pmi2mem = apl_pmi2mem,
1530 };
1531
1532 static struct dunit_ops dnv_ops = {
1533 .name = "pnd2/dnv",
1534 .type = DNV,
1535 .pmiaddr_shift = 0,
1536 .pmiidx_shift = 1,
1537 .channels = DNV_NUM_CHANNELS,
1538 .dimms_per_channel = 2,
1539 .rd_reg = dnv_rd_reg,
1540 .get_registers = dnv_get_registers,
1541 .check_ecc = dnv_check_ecc_active,
1542 .mk_region = dnv_mk_region,
1543 .get_dimm_config = dnv_get_dimm_config,
1544 .pmi2mem = dnv_pmi2mem,
1545 };
1546
1547 static const struct x86_cpu_id pnd2_cpuids[] = {
1548 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1549 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
1550 { }
1551 };
1552 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1553
pnd2_init(void)1554 static int __init pnd2_init(void)
1555 {
1556 const struct x86_cpu_id *id;
1557 const char *owner;
1558 int rc;
1559
1560 edac_dbg(2, "\n");
1561
1562 owner = edac_get_owner();
1563 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1564 return -EBUSY;
1565
1566 id = x86_match_cpu(pnd2_cpuids);
1567 if (!id)
1568 return -ENODEV;
1569
1570 ops = (struct dunit_ops *)id->driver_data;
1571
1572 if (ops->type == APL) {
1573 p2sb_bus = pci_find_bus(0, 0);
1574 if (!p2sb_bus)
1575 return -ENODEV;
1576 }
1577
1578 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1579 opstate_init();
1580
1581 rc = pnd2_probe();
1582 if (rc < 0) {
1583 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1584 return rc;
1585 }
1586
1587 if (!pnd2_mci)
1588 return -ENODEV;
1589
1590 mce_register_decode_chain(&pnd2_mce_dec);
1591 setup_pnd2_debug();
1592
1593 return 0;
1594 }
1595
pnd2_exit(void)1596 static void __exit pnd2_exit(void)
1597 {
1598 edac_dbg(2, "\n");
1599 teardown_pnd2_debug();
1600 mce_unregister_decode_chain(&pnd2_mce_dec);
1601 pnd2_remove();
1602 }
1603
1604 module_init(pnd2_init);
1605 module_exit(pnd2_exit);
1606
1607 module_param(edac_op_state, int, 0444);
1608 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1609
1610 MODULE_LICENSE("GPL v2");
1611 MODULE_AUTHOR("Tony Luck");
1612 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1613