1 
2 /*
3  * edac_device.c
4  * (C) 2007 www.douglaskthompson.com
5  *
6  * This file may be distributed under the terms of the
7  * GNU General Public License.
8  *
9  * Written by Doug Thompson <norsk5@xmission.com>
10  *
11  * edac_device API implementation
12  * 19 Jan 2007
13  */
14 
15 #include <asm/page.h>
16 #include <linux/uaccess.h>
17 #include <linux/ctype.h>
18 #include <linux/highmem.h>
19 #include <linux/init.h>
20 #include <linux/jiffies.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/sysctl.h>
26 #include <linux/timer.h>
27 
28 #include "edac_device.h"
29 #include "edac_module.h"
30 
31 /* lock for the list: 'edac_device_list', manipulation of this list
32  * is protected by the 'device_ctls_mutex' lock
33  */
34 static DEFINE_MUTEX(device_ctls_mutex);
35 static LIST_HEAD(edac_device_list);
36 
37 /* Default workqueue processing interval on this instance, in msecs */
38 #define DEFAULT_POLL_INTERVAL 1000
39 
40 #ifdef CONFIG_EDAC_DEBUG
edac_device_dump_device(struct edac_device_ctl_info * edac_dev)41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42 {
43 	edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
44 		 edac_dev, edac_dev->dev_idx);
45 	edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46 	edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
47 	edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
48 		 edac_dev->mod_name, edac_dev->ctl_name);
49 	edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
50 }
51 #endif				/* CONFIG_EDAC_DEBUG */
52 
edac_device_alloc_ctl_info(unsigned sz_private,char * edac_device_name,unsigned nr_instances,char * edac_block_name,unsigned nr_blocks,unsigned offset_value,struct edac_dev_sysfs_block_attribute * attrib_spec,unsigned nr_attrib,int device_index)53 struct edac_device_ctl_info *edac_device_alloc_ctl_info(
54 	unsigned sz_private,
55 	char *edac_device_name, unsigned nr_instances,
56 	char *edac_block_name, unsigned nr_blocks,
57 	unsigned offset_value,		/* zero, 1, or other based offset */
58 	struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
59 	int device_index)
60 {
61 	struct edac_device_ctl_info *dev_ctl;
62 	struct edac_device_instance *dev_inst, *inst;
63 	struct edac_device_block *dev_blk, *blk_p, *blk;
64 	struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
65 	unsigned total_size;
66 	unsigned count;
67 	unsigned instance, block, attr;
68 	void *pvt, *p;
69 	int err;
70 
71 	edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
72 
73 	/* Calculate the size of memory we need to allocate AND
74 	 * determine the offsets of the various item arrays
75 	 * (instance,block,attrib) from the start of an  allocated structure.
76 	 * We want the alignment of each item  (instance,block,attrib)
77 	 * to be at least as stringent as what the compiler would
78 	 * provide if we could simply hardcode everything into a single struct.
79 	 */
80 	p = NULL;
81 	dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
82 
83 	/* Calc the 'end' offset past end of ONE ctl_info structure
84 	 * which will become the start of the 'instance' array
85 	 */
86 	dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
87 
88 	/* Calc the 'end' offset past the instance array within the ctl_info
89 	 * which will become the start of the block array
90 	 */
91 	count = nr_instances * nr_blocks;
92 	dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
93 
94 	/* Calc the 'end' offset past the dev_blk array
95 	 * which will become the start of the attrib array, if any.
96 	 */
97 	/* calc how many nr_attrib we need */
98 	if (nr_attrib > 0)
99 		count *= nr_attrib;
100 	dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
101 
102 	/* Calc the 'end' offset past the attributes array */
103 	pvt = edac_align_ptr(&p, sz_private, 1);
104 
105 	/* 'pvt' now points to where the private data area is.
106 	 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
107 	 * is baselined at ZERO
108 	 */
109 	total_size = ((unsigned long)pvt) + sz_private;
110 
111 	/* Allocate the amount of memory for the set of control structures */
112 	dev_ctl = kzalloc(total_size, GFP_KERNEL);
113 	if (dev_ctl == NULL)
114 		return NULL;
115 
116 	/* Adjust pointers so they point within the actual memory we
117 	 * just allocated rather than an imaginary chunk of memory
118 	 * located at address 0.
119 	 * 'dev_ctl' points to REAL memory, while the others are
120 	 * ZERO based and thus need to be adjusted to point within
121 	 * the allocated memory.
122 	 */
123 	dev_inst = (struct edac_device_instance *)
124 		(((char *)dev_ctl) + ((unsigned long)dev_inst));
125 	dev_blk = (struct edac_device_block *)
126 		(((char *)dev_ctl) + ((unsigned long)dev_blk));
127 	dev_attrib = (struct edac_dev_sysfs_block_attribute *)
128 		(((char *)dev_ctl) + ((unsigned long)dev_attrib));
129 	pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
130 
131 	/* Begin storing the information into the control info structure */
132 	dev_ctl->dev_idx = device_index;
133 	dev_ctl->nr_instances = nr_instances;
134 	dev_ctl->instances = dev_inst;
135 	dev_ctl->pvt_info = pvt;
136 
137 	/* Default logging of CEs and UEs */
138 	dev_ctl->log_ce = 1;
139 	dev_ctl->log_ue = 1;
140 
141 	/* Name of this edac device */
142 	snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
143 
144 	edac_dbg(4, "edac_dev=%p next after end=%p\n",
145 		 dev_ctl, pvt + sz_private);
146 
147 	/* Initialize every Instance */
148 	for (instance = 0; instance < nr_instances; instance++) {
149 		inst = &dev_inst[instance];
150 		inst->ctl = dev_ctl;
151 		inst->nr_blocks = nr_blocks;
152 		blk_p = &dev_blk[instance * nr_blocks];
153 		inst->blocks = blk_p;
154 
155 		/* name of this instance */
156 		snprintf(inst->name, sizeof(inst->name),
157 			 "%s%u", edac_device_name, instance);
158 
159 		/* Initialize every block in each instance */
160 		for (block = 0; block < nr_blocks; block++) {
161 			blk = &blk_p[block];
162 			blk->instance = inst;
163 			snprintf(blk->name, sizeof(blk->name),
164 				 "%s%d", edac_block_name, block+offset_value);
165 
166 			edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
167 				 instance, inst, block, blk, blk->name);
168 
169 			/* if there are NO attributes OR no attribute pointer
170 			 * then continue on to next block iteration
171 			 */
172 			if ((nr_attrib == 0) || (attrib_spec == NULL))
173 				continue;
174 
175 			/* setup the attribute array for this block */
176 			blk->nr_attribs = nr_attrib;
177 			attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
178 			blk->block_attributes = attrib_p;
179 
180 			edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
181 				 blk->block_attributes);
182 
183 			/* Initialize every user specified attribute in this
184 			 * block with the data the caller passed in
185 			 * Each block gets its own copy of pointers,
186 			 * and its unique 'value'
187 			 */
188 			for (attr = 0; attr < nr_attrib; attr++) {
189 				attrib = &attrib_p[attr];
190 
191 				/* populate the unique per attrib
192 				 * with the code pointers and info
193 				 */
194 				attrib->attr = attrib_spec[attr].attr;
195 				attrib->show = attrib_spec[attr].show;
196 				attrib->store = attrib_spec[attr].store;
197 
198 				attrib->block = blk;	/* up link */
199 
200 				edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
201 					 attrib, attrib->attr.name,
202 					 &attrib_spec[attr],
203 					 attrib_spec[attr].attr.name
204 					);
205 			}
206 		}
207 	}
208 
209 	/* Mark this instance as merely ALLOCATED */
210 	dev_ctl->op_state = OP_ALLOC;
211 
212 	/*
213 	 * Initialize the 'root' kobj for the edac_device controller
214 	 */
215 	err = edac_device_register_sysfs_main_kobj(dev_ctl);
216 	if (err) {
217 		kfree(dev_ctl);
218 		return NULL;
219 	}
220 
221 	/* at this point, the root kobj is valid, and in order to
222 	 * 'free' the object, then the function:
223 	 *	edac_device_unregister_sysfs_main_kobj() must be called
224 	 * which will perform kobj unregistration and the actual free
225 	 * will occur during the kobject callback operation
226 	 */
227 
228 	return dev_ctl;
229 }
230 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
231 
edac_device_free_ctl_info(struct edac_device_ctl_info * ctl_info)232 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
233 {
234 	edac_device_unregister_sysfs_main_kobj(ctl_info);
235 }
236 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
237 
238 /*
239  * find_edac_device_by_dev
240  *	scans the edac_device list for a specific 'struct device *'
241  *
242  *	lock to be held prior to call:	device_ctls_mutex
243  *
244  *	Return:
245  *		pointer to control structure managing 'dev'
246  *		NULL if not found on list
247  */
find_edac_device_by_dev(struct device * dev)248 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
249 {
250 	struct edac_device_ctl_info *edac_dev;
251 	struct list_head *item;
252 
253 	edac_dbg(0, "\n");
254 
255 	list_for_each(item, &edac_device_list) {
256 		edac_dev = list_entry(item, struct edac_device_ctl_info, link);
257 
258 		if (edac_dev->dev == dev)
259 			return edac_dev;
260 	}
261 
262 	return NULL;
263 }
264 
265 /*
266  * add_edac_dev_to_global_list
267  *	Before calling this function, caller must
268  *	assign a unique value to edac_dev->dev_idx.
269  *
270  *	lock to be held prior to call:	device_ctls_mutex
271  *
272  *	Return:
273  *		0 on success
274  *		1 on failure.
275  */
add_edac_dev_to_global_list(struct edac_device_ctl_info * edac_dev)276 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
277 {
278 	struct list_head *item, *insert_before;
279 	struct edac_device_ctl_info *rover;
280 
281 	insert_before = &edac_device_list;
282 
283 	/* Determine if already on the list */
284 	rover = find_edac_device_by_dev(edac_dev->dev);
285 	if (unlikely(rover != NULL))
286 		goto fail0;
287 
288 	/* Insert in ascending order by 'dev_idx', so find position */
289 	list_for_each(item, &edac_device_list) {
290 		rover = list_entry(item, struct edac_device_ctl_info, link);
291 
292 		if (rover->dev_idx >= edac_dev->dev_idx) {
293 			if (unlikely(rover->dev_idx == edac_dev->dev_idx))
294 				goto fail1;
295 
296 			insert_before = item;
297 			break;
298 		}
299 	}
300 
301 	list_add_tail_rcu(&edac_dev->link, insert_before);
302 	return 0;
303 
304 fail0:
305 	edac_printk(KERN_WARNING, EDAC_MC,
306 			"%s (%s) %s %s already assigned %d\n",
307 			dev_name(rover->dev), edac_dev_name(rover),
308 			rover->mod_name, rover->ctl_name, rover->dev_idx);
309 	return 1;
310 
311 fail1:
312 	edac_printk(KERN_WARNING, EDAC_MC,
313 			"bug in low-level driver: attempt to assign\n"
314 			"    duplicate dev_idx %d in %s()\n", rover->dev_idx,
315 			__func__);
316 	return 1;
317 }
318 
319 /*
320  * del_edac_device_from_global_list
321  */
del_edac_device_from_global_list(struct edac_device_ctl_info * edac_device)322 static void del_edac_device_from_global_list(struct edac_device_ctl_info
323 						*edac_device)
324 {
325 	list_del_rcu(&edac_device->link);
326 
327 	/* these are for safe removal of devices from global list while
328 	 * NMI handlers may be traversing list
329 	 */
330 	synchronize_rcu();
331 	INIT_LIST_HEAD(&edac_device->link);
332 }
333 
334 /*
335  * edac_device_workq_function
336  *	performs the operation scheduled by a workq request
337  *
338  *	this workq is embedded within an edac_device_ctl_info
339  *	structure, that needs to be polled for possible error events.
340  *
341  *	This operation is to acquire the list mutex lock
342  *	(thus preventing insertation or deletion)
343  *	and then call the device's poll function IFF this device is
344  *	running polled and there is a poll function defined.
345  */
edac_device_workq_function(struct work_struct * work_req)346 static void edac_device_workq_function(struct work_struct *work_req)
347 {
348 	struct delayed_work *d_work = to_delayed_work(work_req);
349 	struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
350 
351 	mutex_lock(&device_ctls_mutex);
352 
353 	/* If we are being removed, bail out immediately */
354 	if (edac_dev->op_state == OP_OFFLINE) {
355 		mutex_unlock(&device_ctls_mutex);
356 		return;
357 	}
358 
359 	/* Only poll controllers that are running polled and have a check */
360 	if ((edac_dev->op_state == OP_RUNNING_POLL) &&
361 		(edac_dev->edac_check != NULL)) {
362 			edac_dev->edac_check(edac_dev);
363 	}
364 
365 	mutex_unlock(&device_ctls_mutex);
366 
367 	/* Reschedule the workq for the next time period to start again
368 	 * if the number of msec is for 1 sec, then adjust to the next
369 	 * whole one second to save timers firing all over the period
370 	 * between integral seconds
371 	 */
372 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
373 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
374 	else
375 		edac_queue_work(&edac_dev->work, edac_dev->delay);
376 }
377 
378 /*
379  * edac_device_workq_setup
380  *	initialize a workq item for this edac_device instance
381  *	passing in the new delay period in msec
382  */
edac_device_workq_setup(struct edac_device_ctl_info * edac_dev,unsigned msec)383 static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
384 				    unsigned msec)
385 {
386 	edac_dbg(0, "\n");
387 
388 	/* take the arg 'msec' and set it into the control structure
389 	 * to used in the time period calculation
390 	 * then calc the number of jiffies that represents
391 	 */
392 	edac_dev->poll_msec = msec;
393 	edac_dev->delay = msecs_to_jiffies(msec);
394 
395 	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
396 
397 	/* optimize here for the 1 second case, which will be normal value, to
398 	 * fire ON the 1 second time event. This helps reduce all sorts of
399 	 * timers firing on sub-second basis, while they are happy
400 	 * to fire together on the 1 second exactly
401 	 */
402 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
403 		edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
404 	else
405 		edac_queue_work(&edac_dev->work, edac_dev->delay);
406 }
407 
408 /*
409  * edac_device_workq_teardown
410  *	stop the workq processing on this edac_dev
411  */
edac_device_workq_teardown(struct edac_device_ctl_info * edac_dev)412 static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
413 {
414 	if (!edac_dev->edac_check)
415 		return;
416 
417 	edac_dev->op_state = OP_OFFLINE;
418 
419 	edac_stop_work(&edac_dev->work);
420 }
421 
422 /*
423  * edac_device_reset_delay_period
424  *
425  *	need to stop any outstanding workq queued up at this time
426  *	because we will be resetting the sleep time.
427  *	Then restart the workq on the new delay
428  */
edac_device_reset_delay_period(struct edac_device_ctl_info * edac_dev,unsigned long msec)429 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
430 				    unsigned long msec)
431 {
432 	edac_dev->poll_msec = msec;
433 	edac_dev->delay	    = msecs_to_jiffies(msec);
434 
435 	/* See comment in edac_device_workq_setup() above */
436 	if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
437 		edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
438 	else
439 		edac_mod_work(&edac_dev->work, edac_dev->delay);
440 }
441 
edac_device_alloc_index(void)442 int edac_device_alloc_index(void)
443 {
444 	static atomic_t device_indexes = ATOMIC_INIT(0);
445 
446 	return atomic_inc_return(&device_indexes) - 1;
447 }
448 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
449 
edac_device_add_device(struct edac_device_ctl_info * edac_dev)450 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
451 {
452 	edac_dbg(0, "\n");
453 
454 #ifdef CONFIG_EDAC_DEBUG
455 	if (edac_debug_level >= 3)
456 		edac_device_dump_device(edac_dev);
457 #endif
458 	mutex_lock(&device_ctls_mutex);
459 
460 	if (add_edac_dev_to_global_list(edac_dev))
461 		goto fail0;
462 
463 	/* set load time so that error rate can be tracked */
464 	edac_dev->start_time = jiffies;
465 
466 	/* create this instance's sysfs entries */
467 	if (edac_device_create_sysfs(edac_dev)) {
468 		edac_device_printk(edac_dev, KERN_WARNING,
469 					"failed to create sysfs device\n");
470 		goto fail1;
471 	}
472 
473 	/* If there IS a check routine, then we are running POLLED */
474 	if (edac_dev->edac_check != NULL) {
475 		/* This instance is NOW RUNNING */
476 		edac_dev->op_state = OP_RUNNING_POLL;
477 
478 		edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL);
479 	} else {
480 		edac_dev->op_state = OP_RUNNING_INTERRUPT;
481 	}
482 
483 	/* Report action taken */
484 	edac_device_printk(edac_dev, KERN_INFO,
485 		"Giving out device to module %s controller %s: DEV %s (%s)\n",
486 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev->dev_name,
487 		edac_op_state_to_string(edac_dev->op_state));
488 
489 	mutex_unlock(&device_ctls_mutex);
490 	return 0;
491 
492 fail1:
493 	/* Some error, so remove the entry from the lsit */
494 	del_edac_device_from_global_list(edac_dev);
495 
496 fail0:
497 	mutex_unlock(&device_ctls_mutex);
498 	return 1;
499 }
500 EXPORT_SYMBOL_GPL(edac_device_add_device);
501 
edac_device_del_device(struct device * dev)502 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
503 {
504 	struct edac_device_ctl_info *edac_dev;
505 
506 	edac_dbg(0, "\n");
507 
508 	mutex_lock(&device_ctls_mutex);
509 
510 	/* Find the structure on the list, if not there, then leave */
511 	edac_dev = find_edac_device_by_dev(dev);
512 	if (edac_dev == NULL) {
513 		mutex_unlock(&device_ctls_mutex);
514 		return NULL;
515 	}
516 
517 	/* mark this instance as OFFLINE */
518 	edac_dev->op_state = OP_OFFLINE;
519 
520 	/* deregister from global list */
521 	del_edac_device_from_global_list(edac_dev);
522 
523 	mutex_unlock(&device_ctls_mutex);
524 
525 	/* clear workq processing on this instance */
526 	edac_device_workq_teardown(edac_dev);
527 
528 	/* Tear down the sysfs entries for this instance */
529 	edac_device_remove_sysfs(edac_dev);
530 
531 	edac_printk(KERN_INFO, EDAC_MC,
532 		"Removed device %d for %s %s: DEV %s\n",
533 		edac_dev->dev_idx,
534 		edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
535 
536 	return edac_dev;
537 }
538 EXPORT_SYMBOL_GPL(edac_device_del_device);
539 
edac_device_get_log_ce(struct edac_device_ctl_info * edac_dev)540 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
541 {
542 	return edac_dev->log_ce;
543 }
544 
edac_device_get_log_ue(struct edac_device_ctl_info * edac_dev)545 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
546 {
547 	return edac_dev->log_ue;
548 }
549 
edac_device_get_panic_on_ue(struct edac_device_ctl_info * edac_dev)550 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
551 					*edac_dev)
552 {
553 	return edac_dev->panic_on_ue;
554 }
555 
edac_device_handle_ce(struct edac_device_ctl_info * edac_dev,int inst_nr,int block_nr,const char * msg)556 void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
557 			int inst_nr, int block_nr, const char *msg)
558 {
559 	struct edac_device_instance *instance;
560 	struct edac_device_block *block = NULL;
561 
562 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
563 		edac_device_printk(edac_dev, KERN_ERR,
564 				"INTERNAL ERROR: 'instance' out of range "
565 				"(%d >= %d)\n", inst_nr,
566 				edac_dev->nr_instances);
567 		return;
568 	}
569 
570 	instance = edac_dev->instances + inst_nr;
571 
572 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
573 		edac_device_printk(edac_dev, KERN_ERR,
574 				"INTERNAL ERROR: instance %d 'block' "
575 				"out of range (%d >= %d)\n",
576 				inst_nr, block_nr,
577 				instance->nr_blocks);
578 		return;
579 	}
580 
581 	if (instance->nr_blocks > 0) {
582 		block = instance->blocks + block_nr;
583 		block->counters.ce_count++;
584 	}
585 
586 	/* Propagate the count up the 'totals' tree */
587 	instance->counters.ce_count++;
588 	edac_dev->counters.ce_count++;
589 
590 	if (edac_device_get_log_ce(edac_dev))
591 		edac_device_printk(edac_dev, KERN_WARNING,
592 				"CE: %s instance: %s block: %s '%s'\n",
593 				edac_dev->ctl_name, instance->name,
594 				block ? block->name : "N/A", msg);
595 }
596 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
597 
edac_device_handle_ue(struct edac_device_ctl_info * edac_dev,int inst_nr,int block_nr,const char * msg)598 void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
599 			int inst_nr, int block_nr, const char *msg)
600 {
601 	struct edac_device_instance *instance;
602 	struct edac_device_block *block = NULL;
603 
604 	if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
605 		edac_device_printk(edac_dev, KERN_ERR,
606 				"INTERNAL ERROR: 'instance' out of range "
607 				"(%d >= %d)\n", inst_nr,
608 				edac_dev->nr_instances);
609 		return;
610 	}
611 
612 	instance = edac_dev->instances + inst_nr;
613 
614 	if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
615 		edac_device_printk(edac_dev, KERN_ERR,
616 				"INTERNAL ERROR: instance %d 'block' "
617 				"out of range (%d >= %d)\n",
618 				inst_nr, block_nr,
619 				instance->nr_blocks);
620 		return;
621 	}
622 
623 	if (instance->nr_blocks > 0) {
624 		block = instance->blocks + block_nr;
625 		block->counters.ue_count++;
626 	}
627 
628 	/* Propagate the count up the 'totals' tree */
629 	instance->counters.ue_count++;
630 	edac_dev->counters.ue_count++;
631 
632 	if (edac_device_get_log_ue(edac_dev))
633 		edac_device_printk(edac_dev, KERN_EMERG,
634 				"UE: %s instance: %s block: %s '%s'\n",
635 				edac_dev->ctl_name, instance->name,
636 				block ? block->name : "N/A", msg);
637 
638 	if (edac_device_get_panic_on_ue(edac_dev))
639 		panic("EDAC %s: UE instance: %s block %s '%s'\n",
640 			edac_dev->ctl_name, instance->name,
641 			block ? block->name : "N/A", msg);
642 }
643 EXPORT_SYMBOL_GPL(edac_device_handle_ue);
644