After updates to checkpatch new warnings pops up this patch fixes them.
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Acked-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
if (pending) {
int ctr = 0;
+
do {
msleep(100);
pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_cfg_device_data *dev_cfg = sfile->private;
+
mutex_lock(&qat_cfg_read_lock);
return seq_list_start(&dev_cfg->sec_list, *pos);
}
static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
{
struct adf_cfg_device_data *dev_cfg = sfile->private;
+
return seq_list_next(v, &dev_cfg->sec_list, pos);
}
if (!ret) {
struct seq_file *seq_f = file->private_data;
+
seq_f->private = inode->i_private;
}
return ret;
{
uint32_t div = data >> shift;
uint32_t mult = div << shift;
+
return data - mult;
}
static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
{
int i = ADF_MIN_RING_SIZE;
+
for (; i <= ADF_MAX_RING_SIZE; i++)
if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
return i;
static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
{
struct adf_etr_ring_data *ring = sfile->private;
+
mutex_lock(&ring_read_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
if (!ret) {
struct seq_file *seq_f = file->private_data;
+
seq_f->private = inode->i_private;
}
return ret;
if (!ret) {
struct seq_file *seq_f = file->private_data;
+
seq_f->private = inode->i_private;
}
return ret;
if (blp != blpout) {
/* If out of place operation dma unmap only data */
int bufless = bufs - blout->num_mapped_bufs;
+
for (i = bufless; i < bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
for_each_sg(sgl, sg, n, i) {
int y = i + bufs;
+
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
DMA_BIDIRECTIONAL);
}
for_each_sg(sglout, sg, n, i) {
int y = i + bufs;
+
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
DMA_BIDIRECTIONAL);
if (sgl != sglout && buflout) {
for_each_sg(sglout, sg, n, i) {
int y = i + bufs;
+
if (!dma_mapping_error(dev, buflout->bufers[y].addr))
dma_unmap_single(dev, buflout->bufers[y].addr,
buflout->bufers[y].len,
static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
{
int i;
- long unsigned int bank;
- long unsigned int num_inst, num_msg_sym, num_msg_asym;
+ unsigned long bank;
+ unsigned long num_inst, num_msg_sym, num_msg_asym;
int msg_size;
struct qat_crypto_instance *inst;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
unsigned short reg_num)
{
unsigned short reg_addr;
+
switch (type) {
case ICP_GPA_ABS:
case ICP_GPB_ABS:
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
unsigned int uwrd_lo, uwrd_hi, tmp;
+
uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
((data[i] & 0xff00) << 2) |
(0x3 << 8) | (data[i] & 0xff);
return -EFAULT;
if (endpc) {
unsigned int ctx_status;
+
qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
&ctx_status);
*endpc = ctx_status & handle->hal_handle->upc_mask;
alloc_inst_size = lm_init_header->size;
if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
alloc_inst_size = handle->hal_handle->max_ustore;
- micro_inst_arry = kmalloc(alloc_inst_size * sizeof(uint64_t),
- GFP_KERNEL);
+ micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+ GFP_KERNEL);
if (!micro_inst_arry)
return -ENOMEM;
micro_inst_num = 0;
num_in_bytes -= 4;
ptr += 4;
}
- return;
}
static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
umem_init = umem_init_header->next;
while (umem_init) {
unsigned int addr, *value, size;
+
ae = umem_init->ae;
addr = umem_init->addr;
value = umem_init->value;
umem_init = *base;
while (umem_init) {
struct icp_qat_uof_batch_init *pre;
+
pre = umem_init;
umem_init = umem_init->next;
kfree(pre);
uint64_t *fill_data;
uof_image = image->img_ptr;
- fill_data = kzalloc(ICP_QAT_UCLO_MAX_USTORE * sizeof(uint64_t),
+ fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
GFP_KERNEL);
if (!fill_data)
return -EFAULT;
obj_hdr->file_buff, tab_name, NULL);
if (chunk_hdr) {
int hdr_size;
+
memcpy(&str_table->table_len, obj_hdr->file_buff +
chunk_hdr->offset, sizeof(str_table->table_len));
hdr_size = (char *)&str_table->strings - (char *)str_table;
for (i = 0; i < encap_ae->init_regsym_num; i++) {
unsigned int exp_res;
+
init_regsym = &encap_ae->init_regsym[i];
exp_res = init_regsym->value;
switch (init_regsym->init_type) {
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
unsigned int ae;
- obj_handle->uword_buf = kzalloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t),
+ obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
GFP_KERNEL);
if (!obj_handle->uword_buf)
return -ENOMEM;
obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
obj_handle->obj_hdr->file_buff;
obj_handle->encap_uof_obj.chunk_hdr = (struct icp_qat_uof_chunkhdr *)
- obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr);
+ (obj_handle->obj_hdr->file_buff + sizeof(struct icp_qat_uof_objhdr));
obj_handle->uword_in_bytes = 6;
obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
obj_handle->prod_rev = PID_MAJOR_REV |
(PID_MINOR_REV & handle->hal_handle->revision_id);
if (qat_uclo_check_uof_compat(obj_handle)) {
- pr_err("QAT: uof incompatible\n ");
+ pr_err("QAT: uof incompatible\n");
return -EINVAL;
}
obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
if (!(obj_handle->obj_hdr->file_buff) ||
!(qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
&(obj_handle->str_table)))) {
- pr_err("QAT: uof doesn't have effective images");
+ pr_err("QAT: uof doesn't have effective images\n");
goto out_err;
}
obj_handle->uimage_num =
if (!obj_handle->uimage_num)
goto out_err;
if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
- pr_err("QAT: Bad object\n ");
+ pr_err("QAT: Bad object\n");
goto out_check_uof_aemask_err;
}
qat_uclo_init_uword_num(handle);
#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * index), value);
+ (ADF_ARB_REG_SLOT * index), value)
#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value);
+ ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
- (ADF_ARB_REG_SIZE * index), value);
+ (ADF_ARB_REG_SIZE * index), value)
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
- (ADF_ARB_REG_SIZE * index), value);
+ (ADF_ARB_REG_SIZE * index), value)
#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
- (ADF_ARB_REG_SIZE * index), value);
+ (ADF_ARB_REG_SIZE * index), value)
#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value);
+ ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
int adf_init_arb(struct adf_accel_dev *accel_dev)
{
if (!entries)
return -ENOMEM;
- names = kzalloc(msix_num_entries * sizeof(char *), GFP_KERNEL);
+ names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
if (!names) {
kfree(entries);
return -ENOMEM;