int busy = iop_chan_is_busy(iop_chan);
int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
/* free completed slots from the chain starting with
* the oldest descriptor
*/
spin_unlock_bh(&iop_chan->lock);
dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
- __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx);
+ __func__, sw_desc->async_tx.cookie, sw_desc->idx);
return cookie;
}
struct iop_adma_desc_slot *sw_desc, *grp_start;
int slot_cnt, slots_per_op;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
- __FUNCTION__, len);
+ __func__, len);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
- __FUNCTION__, len);
+ __func__, len);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
dev_dbg(iop_chan->device->common.dev,
"%s src_cnt: %d len: %u flags: %lx\n",
- __FUNCTION__, src_cnt, len, flags);
+ __func__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
return NULL;
dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
- __FUNCTION__, src_cnt, len);
+ __func__, src_cnt, len);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
iop_desc_set_zero_sum_byte_count(grp_start, len);
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
- __FUNCTION__, grp_start->xor_check_result);
+ __func__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
while (src_cnt--)
iop_chan->last_used = NULL;
dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
- __FUNCTION__, iop_chan->slots_allocated);
+ __func__, iop_chan->slots_allocated);
spin_unlock_bh(&iop_chan->lock);
/* one is ok since we left it on there on purpose */
{
struct iop_adma_chan *chan = data;
- dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(chan->device->common.dev, "%s\n", __func__);
tasklet_schedule(&chan->irq_tasklet);
{
struct iop_adma_chan *chan = data;
- dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(chan->device->common.dev, "%s\n", __func__);
tasklet_schedule(&chan->irq_tasklet);
int err = 0;
struct iop_adma_chan *iop_chan;
- dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(device->common.dev, "%s\n", __func__);
src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
if (!src)
int err = 0;
struct iop_adma_chan *iop_chan;
- dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(device->common.dev, "%s\n", __func__);
for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
}
dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
- __FUNCTION__, adev->dma_desc_pool_virt,
+ __func__, adev->dma_desc_pool_virt,
(void *) adev->dma_desc_pool);
adev->id = plat_data->hw_id;
dma_cookie_t cookie;
int slot_cnt, slots_per_op;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
dma_cookie_t cookie;
int slot_cnt, slots_per_op;
- dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
+ dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);