This patch fixes some remnant spaces inserted by the use of Lindent.
Seems Lindent adds some spaces when it shoulded. These have been fixed.
In addition, goto targets have issues, these have been fixed
in this patch.
Signed-off-by: Douglas Thompson <dougthompson@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
static const struct amd76x_dev_info amd76x_devs[] = {
[AMD761] = {
- .ctl_name = "AMD761"},
+ .ctl_name = "AMD761"},
[AMD762] = {
- .ctl_name = "AMD762"},
+ .ctl_name = "AMD762"},
};
static struct edac_pci_ctl_info *amd76x_pci;
* on the chip so that further errors will be reported
*/
static void amd76x_get_error_info(struct mem_ctl_info *mci,
- struct amd76x_error_info *info)
+ struct amd76x_error_info *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->dev);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
- &info->ecc_mode_status);
+ &info->ecc_mode_status);
if (info->ecc_mode_status & BIT(8))
pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
* then attempt to handle and clean up after the error
*/
static int amd76x_process_error_info(struct mem_ctl_info *mci,
- struct amd76x_error_info *info,
- int handle_errors)
+ struct amd76x_error_info *info,
+ int handle_errors)
{
int error_found;
u32 row;
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
+ row, mci->ctl_name);
}
}
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
+ 0, row, 0, mci->ctl_name);
}
}
}
static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- enum edac_type edac_mode)
+ enum edac_type edac_mode)
{
struct csrow_info *csrow;
u32 mba, mba_base, mba_mask, dms;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
- AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
+ AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
if (!(mba & BIT(0)))
continue;
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
- (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
+ (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = AMD76X_REVISION;
mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
debugf3("%s(): success\n", __func__);
return 0;
- fail:
+fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int __devinit amd76x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
static const struct e752x_dev_info e752x_devs[] = {
[E7520] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
- .ctl_name = "E7520"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
+ .ctl_name = "E7520"},
[E7525] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
- .ctl_name = "E7525"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
+ .ctl_name = "E7525"},
[E7320] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
- .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
- .ctl_name = "E7320"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
+ .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
+ .ctl_name = "E7320"},
};
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
+ unsigned long page)
{
u32 remap;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
}
static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome)
+ u32 sec1_add, u16 sec1_syndrome)
{
u32 page;
int row;
/* chip select are bits 14 & 13 */
row = ((page >> 1) & 3);
e752x_printk(KERN_WARNING,
- "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
- pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
- pvt->map[4], pvt->map[5], pvt->map[6],
- pvt->map[7]);
+ "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
+ pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
+ pvt->map[4], pvt->map[5], pvt->map[6],
+ pvt->map[7]);
/* test for channel remapping */
for (i = 0; i < 8; i++) {
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
- sec1_syndrome, row, channel, "e752x CE");
+ sec1_syndrome, row, channel, "e752x CE");
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
- u32 sec1_add, u16 sec1_syndrome, int *error_found,
- int handle_error)
+ u32 sec1_add, u16 sec1_syndrome, int *error_found,
+ int handle_error)
{
*error_found = 1;
}
static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
- u32 ded_add, u32 scrb_add)
+ u32 ded_add, u32 scrb_add)
{
u32 error_2b, block_page;
int row;
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
- ((block_page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, block_page);
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
+ offset_in_page(error_2b << 4),
+ row, "e752x UE from Read");
}
if (error_one & 0x0404) {
error_2b = scrb_add;
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
- /* chip select are bits 14 & 13 */
- ((block_page >> 1) & 3) :
- edac_mc_find_csrow_by_page(mci, block_page);
+ /* chip select are bits 14 & 13 */
+ ((block_page >> 1) & 3) :
+ edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
+ offset_in_page(error_2b << 4),
+ row, "e752x UE from Scruber");
}
}
static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
- u32 ded_add, u32 scrb_add, int *error_found,
- int handle_error)
+ u32 ded_add, u32 scrb_add, int *error_found,
+ int handle_error)
{
*error_found = 1;
error_1b = retry_add;
page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
row = pvt->mc_symmetric ? ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
- edac_mc_find_csrow_by_page(mci, page);
+ edac_mc_find_csrow_by_page(mci, page);
e752x_mc_printk(mci, KERN_WARNING,
"CE page 0x%lx, row %d : Memory read retry\n",
(long unsigned int)page, row);
}
static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
- u32 retry_add, int *error_found,
- int handle_error)
+ u32 retry_add, int *error_found,
+ int handle_error)
{
*error_found = 1;
for (i = 0; i < 11; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError %s\n",
- fatal_message[fatal], global_message[i]);
+ fatal_message[fatal], global_message[i]);
}
}
for (i = 0; i < 7; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError %s\n",
- fatal_message[fatal], hub_message[i]);
+ fatal_message[fatal], hub_message[i]);
}
}
static inline void hub_error(int fatal, u8 errors, int *error_found,
- int handle_error)
+ int handle_error)
{
*error_found = 1;
for (i = 0; i < 4; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
- membuf_message[i]);
+ membuf_message[i]);
}
}
for (i = 0; i < 10; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
- fatal_message[fatal], sysbus_message[i]);
+ fatal_message[fatal], sysbus_message[i]);
}
}
}
static void e752x_check_hub_interface(struct e752x_error_info *info,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
u8 stat8;
}
static void e752x_check_sysbus(struct e752x_error_info *info,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
u32 stat32, error32;
}
static void e752x_check_membuf(struct e752x_error_info *info,
- int *error_found, int handle_error)
+ int *error_found, int handle_error)
{
u8 stat8;
}
static void e752x_check_dram(struct mem_ctl_info *mci,
- struct e752x_error_info *info, int *error_found,
- int handle_error)
+ struct e752x_error_info *info, int *error_found,
+ int handle_error)
{
u16 error_one, error_next;
/* decode and report errors */
if (error_one & 0x0101) /* check first error correctable */
process_ce(mci, error_one, info->dram_sec1_add,
- info->dram_sec1_syndrome, error_found, handle_error);
+ info->dram_sec1_syndrome, error_found, handle_error);
if (error_next & 0x0101) /* check next error correctable */
process_ce(mci, error_next, info->dram_sec2_add,
- info->dram_sec2_syndrome, error_found, handle_error);
+ info->dram_sec2_syndrome, error_found, handle_error);
if (error_one & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
if (error_one & 0x2020)
process_ded_retry(mci, error_one, info->dram_retr_add,
- error_found, handle_error);
+ error_found, handle_error);
if (error_next & 0x2020)
process_ded_retry(mci, error_next, info->dram_retr_add,
- error_found, handle_error);
+ error_found, handle_error);
if (error_one & 0x0808)
process_threshold_ce(mci, error_one, error_found, handle_error);
if (error_next & 0x0808)
process_threshold_ce(mci, error_next, error_found,
- handle_error);
+ handle_error);
if (error_one & 0x0606)
process_ue(mci, error_one, info->dram_ded_add,
- info->dram_scrb_add, error_found, handle_error);
+ info->dram_scrb_add, error_found, handle_error);
if (error_next & 0x0606)
process_ue(mci, error_next, info->dram_ded_add,
- info->dram_scrb_add, error_found, handle_error);
+ info->dram_scrb_add, error_found, handle_error);
}
static void e752x_get_error_info(struct mem_ctl_info *mci,
if (info->ferr_global) {
pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
pci_read_config_word(dev, E752X_SYSBUS_FERR,
- &info->sysbus_ferr);
+ &info->sysbus_ferr);
pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
- &info->dram_sec1_add);
+ &info->dram_sec1_add);
pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
- &info->dram_sec1_syndrome);
+ &info->dram_sec1_syndrome);
pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
- &info->dram_ded_add);
+ &info->dram_ded_add);
pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
- &info->dram_scrb_add);
+ &info->dram_scrb_add);
pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
- &info->dram_retr_add);
+ &info->dram_retr_add);
if (info->hi_ferr & 0x7f)
pci_write_config_byte(dev, E752X_HI_FERR,
- info->hi_ferr);
+ info->hi_ferr);
if (info->sysbus_ferr)
pci_write_config_word(dev, E752X_SYSBUS_FERR,
- info->sysbus_ferr);
+ info->sysbus_ferr);
if (info->buf_ferr & 0x0f)
pci_write_config_byte(dev, E752X_BUF_FERR,
- info->buf_ferr);
+ info->buf_ferr);
if (info->dram_ferr)
pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
info->dram_ferr, info->dram_ferr);
pci_write_config_dword(dev, E752X_FERR_GLOBAL,
- info->ferr_global);
+ info->ferr_global);
}
pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
if (info->nerr_global) {
pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
pci_read_config_word(dev, E752X_SYSBUS_NERR,
- &info->sysbus_nerr);
+ &info->sysbus_nerr);
pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
- &info->dram_sec2_add);
+ &info->dram_sec2_add);
pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
- &info->dram_sec2_syndrome);
+ &info->dram_sec2_syndrome);
if (info->hi_nerr & 0x7f)
pci_write_config_byte(dev, E752X_HI_NERR,
- info->hi_nerr);
+ info->hi_nerr);
if (info->sysbus_nerr)
pci_write_config_word(dev, E752X_SYSBUS_NERR,
- info->sysbus_nerr);
+ info->sysbus_nerr);
if (info->buf_nerr & 0x0f)
pci_write_config_byte(dev, E752X_BUF_NERR,
- info->buf_nerr);
+ info->buf_nerr);
if (info->dram_nerr)
pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
info->dram_nerr, info->dram_nerr);
pci_write_config_dword(dev, E752X_NERR_GLOBAL,
- info->nerr_global);
+ info->nerr_global);
}
}
static int e752x_process_error_info(struct mem_ctl_info *mci,
- struct e752x_error_info *info,
- int handle_errors)
+ struct e752x_error_info *info,
+ int handle_errors)
{
u32 error32, stat32;
int error_found;
}
static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- u16 ddrcsr)
+ u16 ddrcsr)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
}
static void e752x_init_mem_map_table(struct pci_dev *pdev,
- struct e752x_pvt *pvt)
+ struct e752x_pvt *pvt)
{
int index;
u8 value, last, row, stat8;
* sided
*/
pci_read_config_byte(pdev, E752X_DRB + index + 1,
- &value);
- pvt->map[index + 1] = (value == last) ? 0xff : /* the dimm is single sided,
- so flag as empty */
- row; /* this is a double sided dimm
- to save the next row # */
+ &value);
+
+ /* the dimm is single sided, so flag as empty */
+ /* this is a double sided dimm to save the next row #*/
+ pvt->map[index + 1] = (value == last) ? 0xff : row;
row++;
last = value;
}
/* Return 0 on success or 1 on failure. */
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
- struct e752x_pvt *pvt)
+ struct e752x_pvt *pvt)
{
struct pci_dev *dev;
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
- NULL);
+ NULL);
if (dev == NULL)
goto fail;
return 0;
- fail:
+fail:
pci_dev_put(pvt->bridge_ck);
return 1;
}
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
if (!force_function_unhide && !(stat8 & (1 << 5))) {
printk(KERN_INFO "Contact your BIOS vendor to see if the "
- "E752x error registers can be safely un-hidden\n");
+ "E752x error registers can be safely un-hidden\n");
return -ENOMEM;
}
stat8 |= (1 << 5);
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
- EDAC_FLAG_S4ECD4ED;
+ EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E752X_REVISION;
pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
e752x_printk(KERN_INFO,
- "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
- pvt->remapbase, pvt->remaplimit);
+ "tolm = %x, remapbase = %x, remaplimit = %x\n",
+ pvt->tolm, pvt->remapbase, pvt->remaplimit);
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!e752x_pci) {
printk(KERN_WARNING
- "%s(): Unable to create PCI control\n", __func__);
+ "%s(): Unable to create PCI control\n", __func__);
printk(KERN_WARNING
- "%s(): PCI error report via EDAC not setup\n", __func__);
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
}
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
- fail:
+fail:
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
pci_dev_put(pvt->bridge_ck);
/* returns count (>= 0), or negative on error */
static int __devinit e752x_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
static const struct e7xxx_dev_info e7xxx_devs[] = {
[E7500] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
- .ctl_name = "E7500"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
+ .ctl_name = "E7500"},
[E7501] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
- .ctl_name = "E7501"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
+ .ctl_name = "E7501"},
[E7505] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
- .ctl_name = "E7505"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
+ .ctl_name = "E7505"},
[E7205] = {
- .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
- .ctl_name = "E7205"},
+ .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
+ .ctl_name = "E7205"},
};
/* FIXME - is this valid for both SECDED and S4ECD4ED? */
}
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
- unsigned long page)
+ unsigned long page)
{
u32 remap;
struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
if ((page < pvt->tolm) ||
- ((page >= 0x100000) && (page < pvt->remapbase)))
+ ((page >= 0x100000) && (page < pvt->remapbase)))
return page;
remap = (page - pvt->tolm) + pvt->remapbase;
if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
- &info->dram_celog_add);
+ &info->dram_celog_add);
pci_read_config_word(pvt->bridge_ck,
- E7XXX_DRAM_CELOG_SYNDROME,
- &info->dram_celog_syndrome);
+ E7XXX_DRAM_CELOG_SYNDROME,
+ &info->dram_celog_syndrome);
}
if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
- &info->dram_uelog_add);
+ &info->dram_uelog_add);
if (info->dram_ferr & 3)
pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
}
static int e7xxx_process_error_info(struct mem_ctl_info *mci,
- struct e7xxx_error_info *info,
- int handle_errors)
+ struct e7xxx_error_info *info,
+ int handle_errors)
{
int error_found;
}
static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- int dev_idx, u32 drc)
+ int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
int index;
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
- EDAC_FLAG_S4ECD4ED;
+ EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
if (!pvt->bridge_ck) {
e7xxx_printk(KERN_ERR, "error reporting device not found:"
- "vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
+ "vendor %x device 0x%x (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
goto fail0;
}
pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
e7xxx_printk(KERN_INFO,
- "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
- pvt->remapbase, pvt->remaplimit);
+ "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
+ pvt->remapbase, pvt->remaplimit);
/* clear any pending errors, or initial state bits */
e7xxx_get_error_info(mci, &discard);
debugf3("%s(): success\n", __func__);
return 0;
- fail1:
+fail1:
pci_dev_put(pvt->bridge_ck);
- fail0:
+fail0:
edac_mc_free(mci);
return -ENODEV;
/* returns count (>= 0), or negative on error */
static int __devinit e7xxx_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
/* wake up and enable device */
return pci_enable_device(pdev) ?
- -EIO : e7xxx_probe1(pdev, ent->driver_data);
+ -EIO : e7xxx_probe1(pdev, ent->driver_data);
}
static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
- "Based on.work by Dan Hollis et al");
+ "Based on.work by Dan Hollis et al");
MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
/* Calc the 'end' offset past the ctl_info structure */
dev_inst = (struct edac_device_instance *)
- edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+ edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
/* Calc the 'end' offset past the instance array */
dev_blk = (struct edac_device_block *)
- edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+ edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
/* Calc the 'end' offset past the dev_blk array */
count = nr_instances * nr_blocks;
dev_attrib = (struct edac_attrib *)
- edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
+ edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
/* Check for case of NO attributes specified */
if (nr_attribs > 0)
* rather than an imaginary chunk of memory located at address 0.
*/
dev_inst = (struct edac_device_instance *)
- (((char *)dev_ctl) + ((unsigned long)dev_inst));
+ (((char *)dev_ctl) + ((unsigned long)dev_inst));
dev_blk = (struct edac_device_block *)
- (((char *)dev_ctl) + ((unsigned long)dev_blk));
+ (((char *)dev_ctl) + ((unsigned long)dev_blk));
dev_attrib = (struct edac_attrib *)
- (((char *)dev_ctl) + ((unsigned long)dev_attrib));
+ (((char *)dev_ctl) + ((unsigned long)dev_attrib));
pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
memset(dev_ctl, 0, total_size); /* clear all fields */
list_add_tail_rcu(&edac_dev->link, insert_before);
return 0;
- fail0:
+fail0:
edac_printk(KERN_WARNING, EDAC_MC,
- "%s (%s) %s %s already assigned %d\n",
- rover->dev->bus_id, dev_name(rover),
- rover->mod_name, rover->ctl_name, rover->dev_idx);
+ "%s (%s) %s %s already assigned %d\n",
+ rover->dev->bus_id, dev_name(rover),
+ rover->mod_name, rover->ctl_name, rover->dev_idx);
return 1;
- fail1:
+fail1:
edac_printk(KERN_WARNING, EDAC_MC,
- "bug in low-level driver: attempt to assign\n"
- " duplicate dev_idx %d in %s()\n", rover->dev_idx,
- __func__);
+ "bug in low-level driver: attempt to assign\n"
+ " duplicate dev_idx %d in %s()\n", rover->dev_idx,
+ __func__);
return 1;
}
* del_edac_device_from_global_list
*/
static void del_edac_device_from_global_list(struct edac_device_ctl_info
- *edac_device)
+ *edac_device)
{
list_del_rcu(&edac_device->link);
init_completion(&edac_device->complete);
/* Only poll controllers that are running polled and have a check */
if ((edac_dev->op_state == OP_RUNNING_POLL) &&
- (edac_dev->edac_check != NULL)) {
- edac_dev->edac_check(edac_dev);
+ (edac_dev->edac_check != NULL)) {
+ edac_dev->edac_check(edac_dev);
}
unlock_device_list();
* passing in the new delay period in msec
*/
void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
- unsigned msec)
+ unsigned msec)
{
debugf0("%s()\n", __func__);
*/
void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
- unsigned long value)
+ unsigned long value)
{
lock_device_list();
/* create this instance's sysfs entries */
if (edac_device_create_sysfs(edac_dev)) {
edac_device_printk(edac_dev, KERN_WARNING,
- "failed to create sysfs device\n");
+ "failed to create sysfs device\n");
goto fail1;
}
/* Report action taken */
edac_device_printk(edac_dev, KERN_INFO,
- "Giving out device to module '%s' controller '%s': DEV '%s' (%s)\n",
- edac_dev->mod_name,
- edac_dev->ctl_name,
- dev_name(edac_dev),
- edac_op_state_toString(edac_dev->op_state)
- );
+ "Giving out device to module '%s' controller "
+ "'%s': DEV '%s' (%s)\n",
+ edac_dev->mod_name,
+ edac_dev->ctl_name,
+ dev_name(edac_dev),
+ edac_op_state_toString(edac_dev->op_state));
unlock_device_list();
return 0;
- fail1:
+fail1:
/* Some error, so remove the entry from the lsit */
del_edac_device_from_global_list(edac_dev);
- fail0:
+fail0:
unlock_device_list();
return 1;
}
unlock_device_list();
edac_printk(KERN_INFO, EDAC_MC,
- "Removed device %d for %s %s: DEV %s\n",
- edac_dev->dev_idx,
- edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev));
+ "Removed device %d for %s %s: DEV %s\n",
+ edac_dev->dev_idx,
+ edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev));
return edac_dev;
}
}
static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
- *edac_dev)
+ *edac_dev)
{
return edac_dev->panic_on_ue;
}
* perform a common output and handling of an 'edac_dev' CE event
*/
void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
+ int inst_nr, int block_nr, const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: 'instance' out of range "
- "(%d >= %d)\n", inst_nr,
- edac_dev->nr_instances);
+ "INTERNAL ERROR: 'instance' out of range "
+ "(%d >= %d)\n", inst_nr,
+ edac_dev->nr_instances);
return;
}
if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: instance %d 'block' out of range "
- "(%d >= %d)\n", inst_nr, block_nr,
- instance->nr_blocks);
+ "INTERNAL ERROR: instance %d 'block' "
+ "out of range (%d >= %d)\n",
+ inst_nr, block_nr,
+ instance->nr_blocks);
return;
}
if (edac_device_get_log_ce(edac_dev))
edac_device_printk(edac_dev, KERN_WARNING,
- "CE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ "CE: %s instance: %s block: %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ce);
* perform a common output and handling of an 'edac_dev' UE event
*/
void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
- int inst_nr, int block_nr, const char *msg)
+ int inst_nr, int block_nr, const char *msg)
{
struct edac_device_instance *instance;
struct edac_device_block *block = NULL;
if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: 'instance' out of range "
- "(%d >= %d)\n", inst_nr,
- edac_dev->nr_instances);
+ "INTERNAL ERROR: 'instance' out of range "
+ "(%d >= %d)\n", inst_nr,
+ edac_dev->nr_instances);
return;
}
if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
edac_device_printk(edac_dev, KERN_ERR,
- "INTERNAL ERROR: instance %d 'block' out of range "
- "(%d >= %d)\n", inst_nr, block_nr,
- instance->nr_blocks);
+ "INTERNAL ERROR: instance %d 'block' "
+ "out of range (%d >= %d)\n",
+ inst_nr, block_nr,
+ instance->nr_blocks);
return;
}
if (edac_device_get_log_ue(edac_dev))
edac_device_printk(edac_dev, KERN_EMERG,
- "UE: %s instance: %s block: %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ "UE: %s instance: %s block: %s '%s'\n",
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
if (edac_device_get_panic_on_ue(edac_dev))
panic("EDAC %s: UE instance: %s block %s '%s'\n",
- edac_dev->ctl_name, instance->name,
- block ? block->name : "N/A", msg);
+ edac_dev->ctl_name, instance->name,
+ block ? block->name : "N/A", msg);
}
EXPORT_SYMBOL_GPL(edac_device_handle_ue);
/* 'log_ue' */
static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info
- *ctl_info, char *data)
+ *ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->log_ue);
}
static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
+ *ctl_info, const char *data,
+ size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0);
/* 'log_ce' */
static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info
- *ctl_info, char *data)
+ *ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->log_ce);
}
static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
+ *ctl_info, const char *data,
+ size_t count)
{
/* if parameter is zero, turn off flag, if non-zero turn on flag */
ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0);
/* 'poll_msec' show and store functions*/
static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info
- *ctl_info, char *data)
+ *ctl_info, char *data)
{
return sprintf(data, "%u\n", ctl_info->poll_msec);
}
static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info
- *ctl_info, const char *data,
- size_t count)
+ *ctl_info, const char *data,
+ size_t count)
{
unsigned long value;
/* Function to 'show' fields from the edac_dev 'ctl_info' structure */
static ssize_t edac_dev_ctl_info_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
+ struct attribute *attr, char *buffer)
{
struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
/* Function to 'store' fields into the edac_dev 'ctl_info' structure */
static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
+ struct attribute *attr,
+ const char *buffer, size_t count)
{
struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj);
struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr);
#define CTL_INFO_ATTR(_name,_mode,_show,_store) \
static struct ctl_info_attribute attr_ctl_info_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
};
/* Declare the various ctl_info attributes here and their respective ops */
CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR,
- edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
+ edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
- edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+ edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
- edac_device_ctl_panic_on_ue_show,
- edac_device_ctl_panic_on_ue_store);
+ edac_device_ctl_panic_on_ue_show,
+ edac_device_ctl_panic_on_ue_store);
CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR,
- edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
+ edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store);
/* Base Attributes of the EDAC_DEVICE ECC object */
static struct ctl_info_attribute *device_ctrl_attr[] = {
* the '..../edac/<name>' kobject
*/
static void edac_device_unregister_main_kobj(struct edac_device_ctl_info
- *edac_dev)
+ *edac_dev)
{
debugf0("%s()\n", __func__);
debugf1("%s() name of kobject is: %s\n",
* Set of low-level instance attribute show functions
*/
static ssize_t instance_ue_count_show(struct edac_device_instance *instance,
- char *data)
+ char *data)
{
return sprintf(data, "%u\n", instance->counters.ue_count);
}
static ssize_t instance_ce_count_show(struct edac_device_instance *instance,
- char *data)
+ char *data)
{
return sprintf(data, "%u\n", instance->counters.ce_count);
}
/* Function to 'show' fields from the edac_dev 'instance' structure */
static ssize_t edac_dev_instance_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
+ struct attribute *attr, char *buffer)
{
struct edac_device_instance *instance = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
/* Function to 'store' fields into the edac_dev 'instance' structure */
static ssize_t edac_dev_instance_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
+ struct attribute *attr,
+ const char *buffer, size_t count)
{
struct edac_device_instance *instance = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
#define INSTANCE_ATTR(_name,_mode,_show,_store) \
static struct instance_attribute attr_instance_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
};
/*
/* Function to 'show' fields from the edac_dev 'block' structure */
static ssize_t edac_dev_block_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
+ struct attribute *attr, char *buffer)
{
struct edac_device_block *block = to_block(kobj);
struct block_attribute *block_attr = to_block_attr(attr);
/* Function to 'store' fields into the edac_dev 'block' structure */
static ssize_t edac_dev_block_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
+ struct attribute *attr,
+ const char *buffer, size_t count)
{
struct edac_device_block *block = to_block(kobj);
struct block_attribute *block_attr = to_block_attr(attr);
#define BLOCK_ATTR(_name,_mode,_show,_store) \
static struct block_attribute attr_block_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
};
BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL);
* edac_device_create_block
*/
static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
- struct edac_device_instance *instance,
- int idx)
+ struct edac_device_instance *instance,
+ int idx)
{
int err;
struct edac_device_block *block;
* edac_device_delete_block(edac_dev,j);
*/
static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev,
- struct edac_device_instance *instance,
- int idx)
+ struct edac_device_instance *instance,
+ int idx)
{
struct edac_device_block *block;
* create just one instance of an edac_device 'instance'
*/
static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
- int idx)
+ int idx)
{
int i, j;
int err;
struct edac_device_ctl_info *edac_dev)
{
int err;
- struct edac_dev_sysfs_attribute *sysfs_attrib;
-
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = edac_dev->sysfs_attributes;
-
- while (sysfs_attrib->attr.name != NULL) {
- err = sysfs_create_file(&edac_dev->kobj,
- (struct attribute*) sysfs_attrib);
- if (err) {
- return err;
- }
-
- sysfs_attrib++;
- }
+ struct edac_dev_sysfs_attribute *sysfs_attrib;
+
+ /* point to the start of the array and iterate over it
+ * adding each attribute listed to this mci instance's kobject
+ */
+ sysfs_attrib = edac_dev->sysfs_attributes;
+
+ while (sysfs_attrib->attr.name != NULL) {
+ err = sysfs_create_file(&edac_dev->kobj,
+ (struct attribute*) sysfs_attrib);
+ if (err) {
+ return err;
+ }
+
+ sysfs_attrib++;
+ }
return 0;
}
* struct mem_ctl_info pointer
*/
struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans)
+ unsigned nr_chans)
{
struct mem_ctl_info *mci;
struct csrow_info *csi, *csrow;
mci = (struct mem_ctl_info *)0;
csi = (struct csrow_info *)edac_align_ptr(&mci[1], sizeof(*csi));
chi = (struct channel_info *)
- edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
+ edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
size = ((unsigned long)pvt) + sz_pvt;
/* Reschedule */
queue_delayed_work(edac_workqueue, &mci->work,
- msecs_to_jiffies(edac_mc_get_poll_msec()));
+ msecs_to_jiffies(edac_mc_get_poll_msec()));
}
/*
atomic_inc(&edac_handlers);
return 0;
- fail0:
+fail0:
edac_printk(KERN_WARNING, EDAC_MC,
- "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
- dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
+ "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
+ dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
return 1;
- fail1:
+fail1:
edac_printk(KERN_WARNING, EDAC_MC,
- "bug in low-level driver: attempt to assign\n"
- " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
+ "bug in low-level driver: attempt to assign\n"
+ " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
return 1;
}
edac_mc_dump_csrow(&mci->csrows[i]);
for (j = 0; j < mci->csrows[i].nr_channels; j++)
edac_mc_dump_channel(&mci->csrows[i].
- channels[j]);
+ channels[j]);
}
}
#endif
if (edac_create_sysfs_mci_device(mci)) {
edac_mc_printk(mci, KERN_WARNING,
- "failed to create sysfs device\n");
+ "failed to create sysfs device\n");
goto fail1;
}
/* Report action taken */
edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
- mci->mod_name, mci->ctl_name, dev_name(mci));
+ mci->mod_name, mci->ctl_name, dev_name(mci));
mutex_unlock(&mem_ctls_mutex);
return 0;
- fail1:
+fail1:
del_mc_from_global_list(mci);
- fail0:
+fail0:
mutex_unlock(&mem_ctls_mutex);
return 1;
}
del_mc_from_global_list(mci);
mutex_unlock(&mem_ctls_mutex);
edac_printk(KERN_INFO, EDAC_MC,
- "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
- mci->mod_name, mci->ctl_name, dev_name(mci));
+ "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
+ mci->mod_name, mci->ctl_name, dev_name(mci));
return mci;
}
if (row == -1)
edac_mc_printk(mci, KERN_ERR,
- "could not look up page error address %lx\n",
- (unsigned long)page);
+ "could not look up page error address %lx\n",
+ (unsigned long)page);
return row;
}
/* FIXME - setable log (warning/emerg) levels */
/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, unsigned long syndrome,
- int row, int channel, const char *msg)
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, unsigned long syndrome,
+ int row, int channel, const char *msg)
{
unsigned long remapped_page;
if (row >= mci->nr_csrows || row < 0) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
+ "INTERNAL ERROR: row out of range "
+ "(%d >= %d)\n", row, mci->nr_csrows);
edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
return;
}
if (channel >= mci->csrows[row].nr_channels || channel < 0) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n", channel,
- mci->csrows[row].nr_channels);
+ "INTERNAL ERROR: channel out of range "
+ "(%d >= %d)\n", channel,
+ mci->csrows[row].nr_channels);
edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
return;
}
if (edac_mc_get_log_ce())
/* FIXME - put in DIMM location */
edac_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
- "0x%lx, row %d, channel %d, label \"%s\": %s\n",
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
+ "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
+ "0x%lx, row %d, channel %d, label \"%s\": %s\n",
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, syndrome, row, channel,
+ mci->csrows[row].channels[channel].label, msg);
mci->ce_count++;
mci->csrows[row].ce_count++;
* page - which can then be scrubbed.
*/
remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ mci->csrows[row].grain);
}
}
{
if (edac_mc_get_log_ce())
edac_mc_printk(mci, KERN_WARNING,
- "CE - no information available: %s\n", msg);
+ "CE - no information available: %s\n", msg);
mci->ce_noinfo_count++;
mci->ce_count++;
EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row, const char *msg)
+ unsigned long page_frame_number,
+ unsigned long offset_in_page, int row, const char *msg)
{
int len = EDAC_MC_LABEL_LEN * 4;
char labels[len + 1];
if (row >= mci->nr_csrows || row < 0) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
+ "INTERNAL ERROR: row out of range "
+ "(%d >= %d)\n", row, mci->nr_csrows);
edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
return;
}
pos += chars;
for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
+ chan++) {
chars = snprintf(pos, len + 1, ":%s",
mci->csrows[row].channels[chan].label);
len -= chars;
if (edac_mc_get_log_ue())
edac_mc_printk(mci, KERN_EMERG,
- "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
- "labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row,
- labels, msg);
+ "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
+ "labels \"%s\": %s\n", page_frame_number,
+ offset_in_page, mci->csrows[row].grain, row,
+ labels, msg);
if (edac_mc_get_panic_on_ue())
panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
- "row %d, labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ "row %d, labels \"%s\": %s\n", mci->mc_idx,
+ page_frame_number, offset_in_page,
+ mci->csrows[row].grain, row, labels, msg);
mci->ue_count++;
mci->csrows[row].ue_count++;
if (edac_mc_get_log_ue())
edac_mc_printk(mci, KERN_WARNING,
- "UE - no information available: %s\n", msg);
+ "UE - no information available: %s\n", msg);
mci->ue_noinfo_count++;
mci->ue_count++;
}
* called to process UE events
*/
void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb, char *msg)
+ unsigned int csrow,
+ unsigned int channela,
+ unsigned int channelb, char *msg)
{
int len = EDAC_MC_LABEL_LEN * 4;
char labels[len + 1];
if (csrow >= mci->nr_csrows) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
+ "INTERNAL ERROR: row out of range (%d >= %d)\n",
+ csrow, mci->nr_csrows);
edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
return;
}
if (channela >= mci->csrows[csrow].nr_channels) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-a out of range "
- "(%d >= %d)\n",
- channela, mci->csrows[csrow].nr_channels);
+ "INTERNAL ERROR: channel-a out of range "
+ "(%d >= %d)\n",
+ channela, mci->csrows[csrow].nr_channels);
edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
return;
}
if (channelb >= mci->csrows[csrow].nr_channels) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-b out of range "
- "(%d >= %d)\n",
- channelb, mci->csrows[csrow].nr_channels);
+ "INTERNAL ERROR: channel-b out of range "
+ "(%d >= %d)\n",
+ channelb, mci->csrows[csrow].nr_channels);
edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
return;
}
if (edac_mc_get_log_ue())
edac_mc_printk(mci, KERN_EMERG,
- "UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela, channelb,
- labels, msg);
+ "UE row %d, channel-a= %d channel-b= %d "
+ "labels \"%s\": %s\n", csrow, channela, channelb,
+ labels, msg);
if (edac_mc_get_panic_on_ue())
panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
+ "labels \"%s\": %s\n", csrow, channela,
+ channelb, labels, msg);
}
EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
* called to process CE events
*/
void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow, unsigned int channel, char *msg)
+ unsigned int csrow, unsigned int channel, char *msg)
{
/* Ensure boundary values */
if (csrow >= mci->nr_csrows) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
+ "INTERNAL ERROR: row out of range (%d >= %d)\n",
+ csrow, mci->nr_csrows);
edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
return;
}
if (channel >= mci->csrows[csrow].nr_channels) {
/* something is wrong */
edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range (%d >= %d)\n",
- channel, mci->csrows[csrow].nr_channels);
+ "INTERNAL ERROR: channel out of range (%d >= %d)\n",
+ channel, mci->csrows[csrow].nr_channels);
edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
return;
}
if (edac_mc_get_log_ce())
/* FIXME - put in DIMM location */
edac_mc_printk(mci, KERN_WARNING,
- "CE row %d, channel %d, label \"%s\": %s\n",
- csrow, channel,
- mci->csrows[csrow].channels[channel].label, msg);
+ "CE row %d, channel %d, label \"%s\": %s\n",
+ csrow, channel,
+ mci->csrows[csrow].channels[channel].label, msg);
mci->ce_count++;
mci->csrows[csrow].ce_count++;
/* csrow<id> control files */
MEMCTRL_ATTR(edac_mc_panic_on_ue,
- S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
MEMCTRL_ATTR(edac_mc_log_ue,
- S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
MEMCTRL_ATTR(edac_mc_log_ce,
- S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
MEMCTRL_ATTR(edac_mc_poll_msec,
- S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
+ S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store);
/* Base Attributes of the memory ECC object */
static struct memctrl_dev_attribute *memctrl_attr[] = {
/* Set of more default csrow<id> attribute show/store functions */
static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
- int private)
+ int private)
{
return sprintf(data, "%u\n", csrow->ue_count);
}
static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
- int private)
+ int private)
{
return sprintf(data, "%u\n", csrow->ce_count);
}
static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
- int private)
+ int private)
{
return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
}
static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
- int private)
+ int private)
{
return sprintf(data, "%s\n", mem_types[csrow->mtype]);
}
static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
- int private)
+ int private)
{
return sprintf(data, "%s\n", dev_types[csrow->dtype]);
}
static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
- int private)
+ int private)
{
return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
- char *data, int channel)
+ char *data, int channel)
{
return snprintf(data, EDAC_MC_LABEL_LEN, "%s",
csrow->channels[channel].label);
/* show function for dynamic chX_ce_count attribute */
static ssize_t channel_ce_count_show(struct csrow_info *csrow,
- char *data, int channel)
+ char *data, int channel)
{
return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
}
/* Set of show/store higher level functions for default csrow attributes */
static ssize_t csrowdev_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
+ struct attribute *attr, char *buffer)
{
struct csrow_info *csrow = to_csrow(kobj);
struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
if (csrowdev_attr->show)
return csrowdev_attr->show(csrow,
- buffer, csrowdev_attr->private);
+ buffer, csrowdev_attr->private);
return -EIO;
}
static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+ const char *buffer, size_t count)
{
struct csrow_info *csrow = to_csrow(kobj);
struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
if (csrowdev_attr->store)
return csrowdev_attr->store(csrow,
- buffer,
- count, csrowdev_attr->private);
+ buffer,
+ count, csrowdev_attr->private);
return -EIO;
}
/* possible dynamic channel DIMM Label attribute files */
CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 0);
+ channel_dimm_label_show, channel_dimm_label_store, 0);
CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 1);
+ channel_dimm_label_show, channel_dimm_label_store, 1);
CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 2);
+ channel_dimm_label_show, channel_dimm_label_store, 2);
CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 3);
+ channel_dimm_label_show, channel_dimm_label_store, 3);
CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 4);
+ channel_dimm_label_show, channel_dimm_label_store, 4);
CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 5);
+ channel_dimm_label_show, channel_dimm_label_store, 5);
/* Total possible dynamic DIMM Label attribute file table */
static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
/* Create a CSROW object under specifed edac_mc_device */
static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
- struct csrow_info *csrow, int index)
+ struct csrow_info *csrow, int index)
{
int err = 0;
int chan;
}
}
- error_exit:
+error_exit:
return err;
}
/* memory scrubbing */
static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+ const char *data, size_t count)
{
u32 bandwidth = -1;
if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) {
edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate set successfully, applied: %d\n",
- bandwidth);
+ "Scrub rate set successfully, applied: %d\n",
+ bandwidth);
} else {
/* FIXME: error codes maybe? */
edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate set FAILED, could not apply: %d\n",
- bandwidth);
+ "Scrub rate set FAILED, could not apply: %d\n",
+ bandwidth);
}
} else {
/* FIXME: produce "not implemented" ERROR for user-side. */
edac_printk(KERN_WARNING, EDAC_MC,
- "Memory scrubbing 'set'control is not implemented!\n");
+ "Memory scrubbing 'set'control is not implemented!\n");
}
return count;
}
if (mci->get_sdram_scrub_rate) {
if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) {
edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate successfully, fetched: %d\n",
- bandwidth);
+ "Scrub rate successfully, fetched: %d\n",
+ bandwidth);
} else {
/* FIXME: error codes maybe? */
edac_printk(KERN_DEBUG, EDAC_MC,
- "Scrub rate fetch FAILED, got: %d\n",
- bandwidth);
+ "Scrub rate fetch FAILED, got: %d\n",
+ bandwidth);
}
} else {
/* FIXME: produce "not implemented" ERROR for user-side. */
edac_printk(KERN_WARNING, EDAC_MC,
- "Memory scrubbing 'get' control is not implemented\n");
+ "Memory scrubbing 'get' control is not implemented\n");
}
return sprintf(data, "%d\n", bandwidth);
}
int total_pages, csrow_idx;
for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
+ csrow_idx++) {
struct csrow_info *csrow = &mci->csrows[csrow_idx];
if (!csrow->nr_pages)
/* MCI show/store functions for top most object */
static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
+ char *buffer)
{
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
}
static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+ const char *buffer, size_t count)
{
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
/* memory scrubber attribute file */
MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
- mci_sdram_scrub_rate_store);
+ mci_sdram_scrub_rate_store);
static struct mcidev_sysfs_attribute *mci_attr[] = {
&mci_attr_reset_counters,
return 0;
/* CSROW error: backout what has already been registered, */
- fail1:
+fail1:
for (i--; i >= 0; i--) {
if (csrow->nr_pages > 0) {
init_completion(&csrow->kobj_complete);
}
}
- fail0:
+fail0:
init_completion(&mci->kobj_complete);
kobject_unregister(edac_mci_kobj);
wait_for_completion(&mci->kobj_complete);
*/
if (edac_register_sysfs_edac_name()) {
edac_printk(KERN_ERR, EDAC_MC,
- "Error initializing 'edac' kobject\n");
+ "Error initializing 'edac' kobject\n");
err = -ENODEV;
goto error;
}
*/
if (edac_sysfs_memctrl_setup()) {
edac_printk(KERN_ERR, EDAC_MC,
- "Error initializing sysfs code\n");
+ "Error initializing sysfs code\n");
err = -ENODEV;
goto error_sysfs;
}
return 0;
/* Error teardown stack */
- error_mem:
+error_mem:
edac_sysfs_memctrl_teardown();
- error_sysfs:
+error_sysfs:
edac_unregister_sysfs_edac_name();
- error:
+error:
return err;
}
* edac_pci it is going to control/register with the EDAC CORE.
*/
struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
- const char *edac_pci_name)
+ const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
void *pvt;
list_add_tail_rcu(&pci->link, insert_before);
return 0;
- fail0:
+fail0:
edac_printk(KERN_WARNING, EDAC_PCI,
- "%s (%s) %s %s already assigned %d\n",
- rover->dev->bus_id, dev_name(rover),
- rover->mod_name, rover->ctl_name, rover->pci_idx);
+ "%s (%s) %s %s already assigned %d\n",
+ rover->dev->bus_id, dev_name(rover),
+ rover->mod_name, rover->ctl_name, rover->pci_idx);
return 1;
- fail1:
+fail1:
edac_printk(KERN_WARNING, EDAC_PCI,
- "but in low-level driver: attempt to assign\n"
- "\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
- __func__);
+ "but in low-level driver: attempt to assign\n"
+ "\tduplicate pci_idx %d in %s()\n", rover->pci_idx,
+ __func__);
return 1;
}
edac_lock_pci_list();
if ((pci->op_state == OP_RUNNING_POLL) &&
- (pci->edac_check != NULL) && (edac_pci_get_check_errors()))
+ (pci->edac_check != NULL) && (edac_pci_get_check_errors()))
pci->edac_check(pci);
edac_unlock_pci_list();
/* Reschedule */
queue_delayed_work(edac_workqueue, &pci->work,
- msecs_to_jiffies(edac_pci_get_poll_msec()));
+ msecs_to_jiffies(edac_pci_get_poll_msec()));
}
/*
INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
queue_delayed_work(edac_workqueue, &pci->work,
- msecs_to_jiffies(edac_pci_get_poll_msec()));
+ msecs_to_jiffies(edac_pci_get_poll_msec()));
}
/*
edac_unlock_pci_list();
return 0;
- fail1:
+fail1:
del_edac_pci_from_global_list(pci);
- fail0:
+fail0:
edac_unlock_pci_list();
return 1;
}
edac_unlock_pci_list();
edac_printk(KERN_INFO, EDAC_PCI,
- "Removed device %d for %s %s: DEV %s\n",
- pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci));
+ "Removed device %d for %s %s: DEV %s\n",
+ pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci));
return pci;
}
};
struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
- const char *mod_name)
+ const char *mod_name)
{
struct edac_pci_ctl_info *pci;
struct edac_pci_gen_data *pdata;
}
static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci,
- char *data)
+ char *data)
{
return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count));
}
/* Function to 'show' fields from the edac_pci 'instance' structure */
static ssize_t edac_pci_instance_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
+ struct attribute *attr, char *buffer)
{
struct edac_pci_ctl_info *pci = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
/* Function to 'store' fields into the edac_pci 'instance' structure */
static ssize_t edac_pci_instance_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
+ struct attribute *attr,
+ const char *buffer, size_t count)
{
struct edac_pci_ctl_info *pci = to_instance(kobj);
struct instance_attribute *instance_attr = to_instance_attr(attr);
}
static ssize_t edac_pci_dev_store(struct kobject *kobj,
- struct attribute *attr, const char *buffer,
- size_t count)
+ struct attribute *attr, const char *buffer,
+ size_t count)
{
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
/* PCI Parity control files */
EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
+ edac_pci_int_store);
EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
+ edac_pci_int_store);
EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
+ edac_pci_int_store);
EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show,
- edac_pci_int_store);
+ edac_pci_int_store);
EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL);
}
status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
- PCI_STATUS_PARITY;
+ PCI_STATUS_PARITY;
if (status)
/* reset only the bits we are interested in */
if (status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI,
- "Signaled System Error on %s\n",
- pci_name(dev));
+ "Signaled System Error on %s\n",
+ pci_name(dev));
atomic_inc(&pci_nonparity_count);
}
if (status & (PCI_STATUS_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI,
- "Master Data Parity Error on %s\n",
- pci_name(dev));
+ "Master Data Parity Error on %s\n",
+ pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status & (PCI_STATUS_DETECTED_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI,
- "Detected Parity Error on %s\n",
- pci_name(dev));
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Signaled System Error on %s\n",
- pci_name(dev));
+ "Signaled System Error on %s\n",
+ pci_name(dev));
atomic_inc(&pci_nonparity_count);
}
if (status & (PCI_STATUS_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Master Data Parity Error on "
- "%s\n", pci_name(dev));
+ "Master Data Parity Error on "
+ "%s\n", pci_name(dev));
atomic_inc(&pci_parity_count);
}
if (status & (PCI_STATUS_DETECTED_PARITY)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
- "Detected Parity Error on %s\n",
- pci_name(dev));
+ "Detected Parity Error on %s\n",
+ pci_name(dev));
atomic_inc(&pci_parity_count);
}
static const struct i3000_dev_info i3000_devs[] = {
[I3000] = {
- .ctl_name = "i3000"},
+ .ctl_name = "i3000"},
};
static struct pci_dev *mci_pdev = NULL;
}
static int i3000_process_error_info(struct mem_ctl_info *mci,
- struct i3000_error_info *info,
- int handle_errors)
+ struct i3000_error_info *info,
+ int handle_errors)
{
int row, multi_chan;
int pfn, offset, channel;
edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
else
edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
- multi_chan ? channel : 0, "i3000 CE");
+ multi_chan ? channel : 0, "i3000 CE");
return 1;
}
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
if (ODD_RANK_ATTRIB(c0dra[i]) != ODD_RANK_ATTRIB(c1dra[i]) ||
- EVEN_RANK_ATTRIB(c0dra[i]) != EVEN_RANK_ATTRIB(c1dra[i]))
+ EVEN_RANK_ATTRIB(c0dra[i]) !=
+ EVEN_RANK_ATTRIB(c1dra[i]))
return 0;
/* If the rank boundaries for the two channels are different
window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
if (!window) {
printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
- mchbar);
+ mchbar);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int __devinit i3000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
if (mci_pdev == NULL) {
i3000_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_3000_HB, NULL);
+ PCI_DEVICE_ID_INTEL_3000_HB, NULL);
if (!mci_pdev) {
debugf0("i3000 pci_get_device fail\n");
pci_rc = -ENODEV;
return 0;
- fail1:
+fail1:
pci_unregister_driver(&i3000_driver);
- fail0:
+fail0:
if (mci_pdev)
pci_dev_put(mci_pdev);
#define FERR_FAT_M3ERR 0x00000004
#define FERR_FAT_M2ERR 0x00000002
#define FERR_FAT_M1ERR 0x00000001
-#define FERR_FAT_MASK (FERR_FAT_M1ERR | \
+#define FERR_FAT_MASK (FERR_FAT_M1ERR | \
FERR_FAT_M2ERR | \
FERR_FAT_M3ERR)
#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \
FERR_NF_M11ERR | \
FERR_NF_M10ERR | \
- FERR_NF_M8ERR | \
+ FERR_NF_M8ERR | \
FERR_NF_M7ERR | \
FERR_NF_M6ERR | \
FERR_NF_M5ERR | \
#define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \
FERR_NF_M28ERR)
#define FERR_NF_THERMAL (FERR_NF_M26ERR | \
- FERR_NF_M25ERR | \
+ FERR_NF_M25ERR | \
FERR_NF_M24ERR | \
FERR_NF_M23ERR)
#define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR)
/* Table of devices attributes supported by this driver */
static const struct i5000_dev_info i5000_devs[] = {
[I5000P] = {
- .ctl_name = "I5000",
- .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
- },
+ .ctl_name = "I5000",
+ .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16,
+ },
};
struct i5000_dimm_info {
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
- NERR_FAT_FBD, &info->nerr_fat_fbd);
+ NERR_FAT_FBD, &info->nerr_fat_fbd);
pci_read_config_word(pvt->branchmap_werrors,
- NRECMEMA, &info->nrecmema);
+ NRECMEMA, &info->nrecmema);
pci_read_config_word(pvt->branchmap_werrors,
- NRECMEMB, &info->nrecmemb);
+ NRECMEMB, &info->nrecmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
- FERR_FAT_FBD, value);
+ FERR_FAT_FBD, value);
} else {
info->ferr_fat_fbd = 0;
info->nerr_fat_fbd = 0;
/* harvest the various error data we need */
pci_read_config_dword(pvt->branchmap_werrors,
- NERR_NF_FBD, &info->nerr_nf_fbd);
+ NERR_NF_FBD, &info->nerr_nf_fbd);
pci_read_config_word(pvt->branchmap_werrors,
- RECMEMA, &info->recmema);
+ RECMEMA, &info->recmema);
pci_read_config_dword(pvt->branchmap_werrors,
- RECMEMB, &info->recmemb);
+ RECMEMB, &info->recmemb);
pci_read_config_dword(pvt->branchmap_werrors,
- REDMEMB, &info->redmemb);
+ REDMEMB, &info->redmemb);
/* Clear the error bits, by writing them back */
pci_write_config_dword(pvt->branchmap_werrors,
- FERR_NF_FBD, value);
+ FERR_NF_FBD, value);
} else {
info->ferr_nf_fbd = 0;
info->nerr_nf_fbd = 0;
* handle the Intel FATAL errors, if any
*/
static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info * info,
- int handle_errors)
+ struct i5000_error_info * info,
+ int handle_errors)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 90];
u32 allErrors;
/******************************************************************************
* i5000_process_fatal_error_info(struct mem_ctl_info *mci,
- * struct i5000_error_info *info,
- * int handle_errors);
+ * struct i5000_error_info *info,
+ * int handle_errors);
*
* handle the Intel NON-FATAL errors, if any
*/
static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info * info,
- int handle_errors)
+ struct i5000_error_info * info,
+ int handle_errors)
{
char msg[EDAC_MC_LABEL_LEN + 1 + 90];
u32 allErrors;
cas = NREC_CAS(info->nrecmemb);
debugf0
- ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
/* Form out message */
snprintf(msg, sizeof(msg),
misc_errors = allErrors & FERR_NF_THERMAL;
if (misc_errors) {
i5000_printk(KERN_WARNING, "\tTHERMAL Error, bits= 0x%x\n",
- misc_errors);
+ misc_errors);
}
/* See if any of the thermal errors have fired */
misc_errors = allErrors & FERR_NF_NON_RETRY;
if (misc_errors) {
i5000_printk(KERN_WARNING, "\tNON-Retry Errors, bits= 0x%x\n",
- misc_errors);
+ misc_errors);
}
/* See if any of the thermal errors have fired */
misc_errors = allErrors & FERR_NF_NORTH_CRC;
if (misc_errors) {
i5000_printk(KERN_WARNING,
- "\tNORTHBOUND CRC Error, bits= 0x%x\n",
- misc_errors);
+ "\tNORTHBOUND CRC Error, bits= 0x%x\n",
+ misc_errors);
}
/* See if any of the thermal errors have fired */
misc_errors = allErrors & FERR_NF_SPD_PROTOCOL;
if (misc_errors) {
i5000_printk(KERN_WARNING,
- "\tSPD Protocol Error, bits= 0x%x\n",
- misc_errors);
+ "\tSPD Protocol Error, bits= 0x%x\n",
+ misc_errors);
}
/* See if any of the thermal errors have fired */
misc_errors = allErrors & FERR_NF_DIMM_SPARE;
if (misc_errors) {
i5000_printk(KERN_WARNING, "\tDIMM-Spare Error, bits= 0x%x\n",
- misc_errors);
+ misc_errors);
}
}
* in the 'info' structure, previously retrieved from hardware
*/
static void i5000_process_error_info(struct mem_ctl_info *mci,
- struct i5000_error_info * info,
- int handle_errors)
+ struct i5000_error_info * info,
+ int handle_errors)
{
/* First handle any fatal errors that occurred */
i5000_process_fatal_error_info(mci, info, handle_errors);
pdev = NULL;
while (1) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+ PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
/* End of list, leave */
if (pdev == NULL) {
i5000_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x FUNC 1 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16);
+ "'system address,Process Bus' "
+ "device not found:"
+ "vendor 0x%x device 0x%x FUNC 1 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16);
return 1;
}
pdev = NULL;
while (1) {
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
+ PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev);
if (pdev == NULL) {
i5000_printk(KERN_ERR,
- "MC: 'branchmap,control,errors' "
- "device not found:"
- "vendor 0x%x device 0x%x Func 2 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I5000_DEV16);
+ "MC: 'branchmap,control,errors' "
+ "device not found:"
+ "vendor 0x%x device 0x%x Func 2 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I5000_DEV16);
pci_dev_put(pvt->branchmap_werrors);
return 1;
pdev = NULL;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
+ PCI_DEVICE_ID_I5000_BRANCH_0, pdev);
if (pdev == NULL) {
i5000_printk(KERN_ERR,
- "MC: 'BRANCH 0' device not found:"
- "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
+ "MC: 'BRANCH 0' device not found:"
+ "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0);
pci_dev_put(pvt->branchmap_werrors);
pci_dev_put(pvt->fsb_error_regs);
if (pvt->maxch >= CHANNELS_PER_BRANCH) {
pdev = NULL;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
+ PCI_DEVICE_ID_I5000_BRANCH_1, pdev);
if (pdev == NULL) {
i5000_printk(KERN_ERR,
- "MC: 'BRANCH 1' device not found:"
- "vendor 0x%x device 0x%x Func 0 "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_I5000_BRANCH_1);
+ "MC: 'BRANCH 1' device not found:"
+ "vendor 0x%x device 0x%x Func 0 "
+ "(broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_I5000_BRANCH_1);
pci_dev_put(pvt->branchmap_werrors);
pci_dev_put(pvt->fsb_error_regs);
}
static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
- struct i5000_dimm_info *dinfo)
+ struct i5000_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
dinfo->dual_rank = MTR_DIMM_RANK(mtr);
if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
+ ((csrow & 0x1) == 0x1))) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
mem_buffer = p = kmalloc(space, GFP_KERNEL);
if (p == NULL) {
i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n",
- __FILE__, __func__);
+ __FILE__, __func__);
return;
}
* then reset the message buffer */
if (csrow & 0x1) {
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "--------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
- "--------------------------------\n");
+ "--------------------------------\n");
p += n;
space -= n;
pvt = (struct i5000_pvt *)mci->pvt_info;
pci_read_config_dword(pvt->system_address, AMBASE,
- (u32 *) & pvt->ambase);
+ (u32 *) & pvt->ambase);
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
- ((u32 *) & pvt->ambase) + sizeof(u32));
+ ((u32 *) & pvt->ambase) + sizeof(u32));
maxdimmperch = pvt->maxdimmperch;
maxch = pvt->maxch;
int where = MTR0 + (slot_row * sizeof(u32));
pci_read_config_word(pvt->branch_0, where,
- &pvt->b0_mtr[slot_row]);
+ &pvt->b0_mtr[slot_row]);
debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
pvt->b0_mtr[slot_row]);
if (pvt->maxch >= CHANNELS_PER_BRANCH) {
pci_read_config_word(pvt->branch_1, where,
- &pvt->b1_mtr[slot_row]);
+ &pvt->b1_mtr[slot_row]);
debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
where, pvt->b0_mtr[slot_row]);
} else {
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
- &pvt->b0_ambpresent0);
+ &pvt->b0_ambpresent0);
debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
- &pvt->b0_ambpresent1);
+ &pvt->b0_ambpresent1);
debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
- &pvt->b1_ambpresent0);
+ &pvt->b1_ambpresent0);
debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
- &pvt->b1_ambpresent1);
+ &pvt->b1_ambpresent1);
debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
pvt->b1_ambpresent1);
}
/* Read the FBD Error Mask Register */
pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD,
- &fbd_error_mask);
+ &fbd_error_mask);
/* Enable with a '0' */
fbd_error_mask &= ~(ENABLE_EMASK_ALL);
pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD,
- fbd_error_mask);
+ fbd_error_mask);
}
/******************************************************************************
* as well
*/
static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
- int *num_dimms_per_channel,
- int *num_channels)
+ int *num_dimms_per_channel,
+ int *num_channels)
{
u8 value;
* some fancy mobo determination.
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
- &num_channels);
+ &num_channels);
num_csrows = num_dimms_per_channel * 2;
debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
return 0;
/* Error exit unwinding stack */
- fail1:
+fail1:
i5000_put_devices(mci);
- fail0:
+fail0:
edac_mc_free(mci);
return -ENODEV;
}
* count (>= 0)
*/
static int __devinit i5000_init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id)
{
int rc;
MODULE_AUTHOR
("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
- I5000_REVISION);
+ I5000_REVISION);
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
static struct edac_pci_ctl_info *i82443bxgx_pci;
static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
- struct i82443bxgx_edacmc_error_info
- *info)
+ struct i82443bxgx_edacmc_error_info
+ *info)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->dev);
error_found = 1;
if (handle_errors)
edac_mc_handle_ce(mci, page, pageoffset,
- /* 440BX/GX don't make syndrome information available */
- 0, edac_mc_find_csrow_by_page(mci, page), 0, /* channel */
- mci->ctl_name);
+ /* 440BX/GX don't make syndrome information
+ * available */
+ 0, edac_mc_find_csrow_by_page(mci, page), 0,
+ mci->ctl_name);
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
edac_mc_handle_ue(mci, page, pageoffset,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
}
return error_found;
}
static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
- struct pci_dev *pdev,
- enum edac_type edac_mode,
- enum mem_type mtype)
+ struct pci_dev *pdev,
+ enum edac_type edac_mode,
+ enum mem_type mtype)
{
struct csrow_info *csrow;
int index;
debugf0("MC: " __FILE__ ": %s()\n", __func__);
/* Something is really hosed if PCI config space reads from
- the MC aren't working. */
+ * the MC aren't working.
+ */
if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
return -EIO;
mtype = MEM_RDR;
break;
default:
- debugf0
- ("Unknown/reserved DRAM type value in DRAMC register!\n");
+ debugf0("Unknown/reserved DRAM type value "
+ "in DRAMC register!\n");
mtype = -MEM_UNKNOWN;
}
mci->scrub_cap = SCRUB_FLAG_HW_SRC;
pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg);
ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) &
- (BIT(0) | BIT(1)));
+ (BIT(0) | BIT(1)));
mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB)
- ? SCRUB_HW_SRC : SCRUB_NONE;
+ ? SCRUB_HW_SRC : SCRUB_NONE;
switch (ecc_mode) {
case I82443BXGX_NBXCFG_INTEGRITY_NONE:
edac_mode = EDAC_SECDED;
break;
default:
- debugf0
- ("%s(): Unknown/reserved ECC state in NBXCFG register!\n",
- __func__);
+ debugf0("%s(): Unknown/reserved ECC state "
+ "in NBXCFG register!\n", __func__);
edac_mode = EDAC_UNKNOWN;
break;
}
* here, or we get "phantom" errors occuring at module-load
* time. */
pci_write_bits32(pdev, I82443BXGX_EAP,
- (I82443BXGX_EAP_OFFSET_SBE |
- I82443BXGX_EAP_OFFSET_MBE),
- (I82443BXGX_EAP_OFFSET_SBE |
- I82443BXGX_EAP_OFFSET_MBE));
+ (I82443BXGX_EAP_OFFSET_SBE |
+ I82443BXGX_EAP_OFFSET_MBE),
+ (I82443BXGX_EAP_OFFSET_SBE |
+ I82443BXGX_EAP_OFFSET_MBE));
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = I82443_REVISION;
debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
return 0;
- fail:
+fail:
edac_mc_free(mci);
return -ENODEV;
}
static const struct i82860_dev_info i82860_devs[] = {
[I82860] = {
- .ctl_name = "i82860"},
+ .ctl_name = "i82860"},
};
static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
static struct edac_pci_ctl_info *i82860_pci;
static void i82860_get_error_info(struct mem_ctl_info *mci,
- struct i82860_error_info *info)
+ struct i82860_error_info *info)
{
struct pci_dev *pdev;
}
static int i82860_process_error_info(struct mem_ctl_info *mci,
- struct i82860_error_info *info,
- int handle_errors)
+ struct i82860_error_info *info,
+ int handle_errors)
{
int row;
edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
else
edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
- "i82860 UE");
+ "i82860 UE");
return 1;
}
csrow = &mci->csrows[index];
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
- (I82860_GBA_SHIFT - PAGE_SHIFT);
+ (I82860_GBA_SHIFT - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
return 0;
- fail:
+fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int __devinit i82860_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
if (!mci_pdev) {
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82860_0, NULL);
+ PCI_DEVICE_ID_INTEL_82860_0, NULL);
if (mci_pdev == NULL) {
debugf0("860 pci_get_device fail\n");
return 0;
- fail1:
+fail1:
pci_unregister_driver(&i82860_driver);
- fail0:
+fail0:
if (mci_pdev != NULL)
pci_dev_put(mci_pdev);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
- "Ben Woodard <woodard@redhat.com>");
+ "Ben Woodard <woodard@redhat.com>");
MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
static const struct i82875p_dev_info i82875p_devs[] = {
[I82875P] = {
- .ctl_name = "i82875p"},
+ .ctl_name = "i82875p"},
};
static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has
static struct edac_pci_ctl_info *i82875p_pci;
static void i82875p_get_error_info(struct mem_ctl_info *mci,
- struct i82875p_error_info *info)
+ struct i82875p_error_info *info)
{
struct pci_dev *pdev;
}
static int i82875p_process_error_info(struct mem_ctl_info *mci,
- struct i82875p_error_info *info,
- int handle_errors)
+ struct i82875p_error_info *info,
+ int handle_errors)
{
int row, multi_chan;
edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
else
edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
+ multi_chan ? (info->des & 0x1) : 0,
+ "i82875p CE");
return 1;
}
/* Return 0 on success or 1 on failure. */
static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
- struct pci_dev **ovrfl_pdev,
- void __iomem ** ovrfl_window)
+ struct pci_dev **ovrfl_pdev,
+ void __iomem **ovrfl_window)
{
struct pci_dev *dev;
void __iomem *window;
if (pci_enable_device(dev)) {
i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
- "device\n", __func__);
+ "device\n", __func__);
return 1;
}
if (window == NULL) {
i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
- __func__);
+ __func__);
goto fail1;
}
*ovrfl_window = window;
return 0;
- fail1:
+fail1:
pci_release_regions(dev);
#ifdef CORRECT_BIOS
- fail0:
+fail0:
pci_disable_device(dev);
#endif
/* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans);
+ nr_chans);
if (!mci) {
rc = -ENOMEM;
debugf3("%s(): success\n", __func__);
return 0;
- fail1:
+fail1:
edac_mc_free(mci);
- fail0:
+fail0:
iounmap(ovrfl_window);
pci_release_regions(ovrfl_pdev);
/* returns count (>= 0), or negative on error */
static int __devinit i82875p_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
int rc;
if (mci_pdev == NULL) {
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82875_0, NULL);
+ PCI_DEVICE_ID_INTEL_82875_0, NULL);
if (!mci_pdev) {
debugf0("875p pci_get_device fail\n");
return 0;
- fail1:
+fail1:
pci_unregister_driver(&i82875p_driver);
- fail0:
+fail0:
if (mci_pdev != NULL)
pci_dev_put(mci_pdev);
static struct edac_pci_ctl_info *r82600_pci;
static void r82600_get_error_info(struct mem_ctl_info *mci,
- struct r82600_error_info *info)
+ struct r82600_error_info *info)
{
struct pci_dev *pdev;
}
static int r82600_process_error_info(struct mem_ctl_info *mci,
- struct r82600_error_info *info,
- int handle_errors)
+ struct r82600_error_info *info,
+ int handle_errors)
{
int error_found;
u32 eapaddr, page;
if (handle_errors)
edac_mc_handle_ce(mci, page, 0, /* not avail */
- syndrome, edac_mc_find_csrow_by_page(mci, page), 0, /* channel */
- mci->ctl_name);
+ syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, mci->ctl_name);
}
if (info->eapr & BIT(1)) { /* UE? */
if (handle_errors)
/* 82600 doesn't give enough info */
edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_find_csrow_by_page(mci, page),
+ mci->ctl_name);
}
return error_found;
}
static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
- u8 dramcr)
+ u8 dramcr)
{
struct csrow_info *csrow;
int index;
debugf3("%s(): success\n", __func__);
return 0;
- fail:
+fail:
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int __devinit r82600_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+ const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
- "on behalf of EADS Astrium");
+ "on behalf of EADS Astrium");
MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
module_param(disable_hardware_scrub, bool, 0644);