if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
- printk (KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] "
- "on \"%s\" failed\n",
- (unsigned long long)erase.addr, (unsigned long long)erase.len, mtd->name);
+ printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
+ (unsigned long long)erase.addr,
+ (unsigned long long)erase.len, mtd->name);
return ret;
}
ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
&retlen, (u_char *) &count);
- if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
- printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
- ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
- retlen, ret);
+ if (retlen != 4 || (ret < 0 && ret != -EUCLEAN)) {
+ printk(KERN_ERR "mtdoops: read failure at %d (%td of 4 read), err %d\n",
+ cxt->nextpage * OOPS_PAGE_SIZE, retlen, ret);
schedule_work(&cxt->work_erase);
return;
}
return;
}
- printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
- cxt->nextpage, cxt->nextcount);
+ printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
+ cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
}
if (!ret)
break;
if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
+ printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
return;
}
badblock:
- printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
- cxt->nextpage * OOPS_PAGE_SIZE);
+ printk(KERN_WARNING "mtdoops: bad block at %08x\n",
+ cxt->nextpage * OOPS_PAGE_SIZE);
i++;
cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
if (cxt->nextpage >= cxt->oops_pages)
cxt->nextpage = 0;
- if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
- printk(KERN_ERR "mtdoops: All blocks bad!\n");
+ if (i == cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE)) {
+ printk(KERN_ERR "mtdoops: all blocks bad!\n");
return;
}
}
ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
if (ret >= 0) {
- printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
+ printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
+ cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
return;
}
- if (mtd->block_markbad && (ret == -EIO)) {
+ if (mtd->block_markbad && ret == -EIO) {
ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
if (ret < 0) {
- printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
+ printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
}
}
cxt->writecount = 0;
- if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
- printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
- cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
+ if (retlen != OOPS_PAGE_SIZE || ret < 0)
+ printk(KERN_ERR "mtdoops: write failure at %d (%td of %d written), error %d\n",
+ cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
mtdoops_inc_counter(cxt);
}
container_of(work, struct mtdoops_context, work_write);
mtdoops_write(cxt, 0);
-}
+}
static void find_next_position(struct mtdoops_context *cxt)
{
for (page = 0; page < cxt->oops_pages; page++) {
ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 8, &retlen, (u_char *) &count[0]);
- if ((retlen != 8) || ((ret < 0) && (ret != -EUCLEAN))) {
- printk(KERN_ERR "mtdoops: Read failure at %d (%td of 8 read)"
- ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
+ if (retlen != 8 || (ret < 0 && ret != -EUCLEAN)) {
+ printk(KERN_ERR "mtdoops: read failure at %d (%td of 8 read), err %d\n",
+ page * OOPS_PAGE_SIZE, retlen, ret);
continue;
}
if (maxcount == 0xffffffff) {
maxcount = count[0];
maxpos = page;
- } else if ((count[0] < 0x40000000) && (maxcount > 0xc0000000)) {
+ } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
maxcount = count[0];
maxpos = page;
- } else if ((count[0] > maxcount) && (count[0] < 0xc0000000)) {
+ } else if (count[0] > maxcount && count[0] < 0xc0000000) {
maxcount = count[0];
maxpos = page;
- } else if ((count[0] > maxcount) && (count[0] > 0xc0000000)
- && (maxcount > 0x80000000)) {
+ } else if (count[0] > maxcount && count[0] > 0xc0000000
+ && maxcount > 0x80000000) {
maxcount = count[0];
maxpos = page;
}
if (cxt->name && !strcmp(mtd->name, cxt->name))
cxt->mtd_index = mtd->index;
- if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
- if (mtd->size < (mtd->erasesize * 2)) {
- printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
- mtd->index);
+ if (mtd->size < mtd->erasesize * 2) {
+ printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
+ mtd->index);
return;
}
if (mtd->erasesize < OOPS_PAGE_SIZE) {
- printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
- mtd->index);
+ printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
+ mtd->index);
return;
}
{
struct mtdoops_context *cxt = &oops_cxt;
- if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
+ if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
return;
cxt->mtd = NULL;
if (!cxt->ready || !mtd || cxt->writecount == 0)
return;
- /*
- * Once ready is 0 and we've held the lock no further writes to the
+ /*
+ * Once ready is 0 and we've held the lock no further writes to the
* buffer will happen
*/
spin_lock_irqsave(&cxt->writecount_lock, flags);
cxt->writecount = 8;
}
- if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
+ if (count + cxt->writecount > OOPS_PAGE_SIZE)
count = OOPS_PAGE_SIZE - cxt->writecount;
memcpy(cxt->oops_buf + cxt->writecount, s, count);
cxt->mtd_index = -1;
cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
- spin_lock_init(&cxt->writecount_lock);
-
if (!cxt->oops_buf) {
- printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
+ printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
return -ENOMEM;
}
+ spin_lock_init(&cxt->writecount_lock);
INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);