*/
#include "ehca_tools.h"
+#include "ehca_iverbs.h"
#include "hcp_if.h"
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
ib_device);
struct hipz_query_hca *rblock;
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
= min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
query_device1:
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
return ret;
}
ib_device);
struct hipz_query_port *rblock;
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
props->active_speed = 0x1;
query_port1:
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
return ret;
}
return -EINVAL;
}
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
query_pkey1:
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
return ret;
}
return -EINVAL;
}
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
query_gid1:
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
return ret;
}
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hipz_fns.h"
+#include "ipz_pt_fn.h"
#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
u64 *rblock;
unsigned long block_count;
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
ret = -ENOMEM;
goto error_data1;
}
+ /* rblock must be 4K aligned and should be 4K large */
ret = hipz_h_error_data(shca->ipz_hca_handle,
resource,
rblock,
&block_count);
- if (ret == H_R_STATE) {
+ if (ret == H_R_STATE)
ehca_err(&shca->ib_device,
"No error data is available: %lx.", resource);
- }
else if (ret == H_SUCCESS) {
int length;
length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
- if (length > PAGE_SIZE)
- length = PAGE_SIZE;
+ if (length > EHCA_PAGESIZE)
+ length = EHCA_PAGESIZE;
print_error_data(shca, data, rblock, length);
- }
- else {
+ } else
ehca_err(&shca->ib_device,
"Error data could not be fetched: %lx", resource);
- }
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
error_data1:
return ret;
int ehca_munmap(unsigned long addr, size_t len);
+#ifdef CONFIG_PPC_64K_PAGES
+void *ehca_alloc_fw_ctrlblock(void);
+void ehca_free_fw_ctrlblock(void *ptr);
+#else
+#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL))
+#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
+#endif
+
#endif
* POSSIBILITY OF SUCH DAMAGE.
*/
+#ifdef CONFIG_PPC_64K_PAGES
+#include <linux/slab.h>
+#endif
#include "ehca_classes.h"
#include "ehca_iverbs.h"
#include "ehca_mrmw.h"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION("SVNEHCA_0017");
+MODULE_VERSION("SVNEHCA_0018");
int ehca_open_aqp1 = 0;
int ehca_debug_level = 0;
DEFINE_IDR(ehca_qp_idr);
DEFINE_IDR(ehca_cq_idr);
+
static struct list_head shca_list; /* list of all registered ehcas */
static spinlock_t shca_list_lock;
static struct timer_list poll_eqs_timer;
+#ifdef CONFIG_PPC_64K_PAGES
+static struct kmem_cache *ctblk_cache = NULL;
+
+void *ehca_alloc_fw_ctrlblock(void)
+{
+ void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
+ if (!ret)
+ ehca_gen_err("Out of memory for ctblk");
+ return ret;
+}
+
+void ehca_free_fw_ctrlblock(void *ptr)
+{
+ if (ptr)
+ kmem_cache_free(ctblk_cache, ptr);
+
+}
+#endif
+
static int ehca_create_slab_caches(void)
{
int ret;
goto create_slab_caches5;
}
+#ifdef CONFIG_PPC_64K_PAGES
+ ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
+ EHCA_PAGESIZE, H_CB_ALIGNMENT,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (!ctblk_cache) {
+ ehca_gen_err("Cannot create ctblk SLAB cache.");
+ ehca_cleanup_mrmw_cache();
+ goto create_slab_caches5;
+ }
+#endif
return 0;
create_slab_caches5:
ehca_cleanup_qp_cache();
ehca_cleanup_cq_cache();
ehca_cleanup_pd_cache();
+#ifdef CONFIG_PPC_64K_PAGES
+ if (ctblk_cache)
+ kmem_cache_destroy(ctblk_cache);
+#endif
}
#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
u64 h_ret;
struct hipz_query_hca *rblock;
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_gen_err("Cannot allocate rblock memory.");
return -ENOMEM;
shca->sport[1].rate = IB_RATE_30_GBPS;
num_ports1:
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
return ret;
}
int ret = 0;
struct hipz_query_hca *rblock;
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ rblock = ehca_alloc_fw_ctrlblock();
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
return -ENOMEM;
memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
init_node_guid1:
- kfree(rblock);
+ ehca_free_fw_ctrlblock(rblock);
return ret;
}
\
shca = dev->driver_data; \
\
- rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \
+ rblock = ehca_alloc_fw_ctrlblock(); \
if (!rblock) { \
dev_err(dev, "Can't allocate rblock memory."); \
return 0; \
\
if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
dev_err(dev, "Can't query device properties"); \
- kfree(rblock); \
+ ehca_free_fw_ctrlblock(rblock); \
return 0; \
} \
\
data = rblock->name; \
- kfree(rblock); \
+ ehca_free_fw_ctrlblock(rblock); \
\
if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
return snprintf(buf, 256, "1\n"); \
int ret;
printk(KERN_INFO "eHCA Infiniband Device Driver "
- "(Rel.: SVNEHCA_0017)\n");
+ "(Rel.: SVNEHCA_0018)\n");
idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock);
u32 i;
u64 *kpage;
- kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ kpage = ehca_alloc_fw_ctrlblock();
if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM;
ehca_reg_mr_rpages_exit1:
- kfree(kpage);
+ ehca_free_fw_ctrlblock(kpage);
ehca_reg_mr_rpages_exit0:
if (ret)
ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
ehca_mrmw_map_acl(acl, &hipz_acl);
ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
- kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
+ kpage = ehca_alloc_fw_ctrlblock();
if (!kpage) {
ehca_err(&shca->ib_device, "kpage alloc failed");
ret = -ENOMEM;
}
ehca_rereg_mr_rereg1_exit1:
- kfree(kpage);
+ ehca_free_fw_ctrlblock(kpage);
ehca_rereg_mr_rereg1_exit0:
if ( ret && (ret != -EAGAIN) )
ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
unsigned long spl_flags = 0;
/* do query_qp to obtain current attr values */
- mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL);
- if (mqpcb == NULL) {
+ mqpcb = ehca_alloc_fw_ctrlblock();
+ if (!mqpcb) {
ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
"ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
return -ENOMEM;
}
modify_qp_exit1:
- kfree(mqpcb);
+ ehca_free_fw_ctrlblock(mqpcb);
return ret;
}
return -EINVAL;
}
- qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL );
+ qpcb = ehca_alloc_fw_ctrlblock();
if (!qpcb) {
ehca_err(qp->device,"Out of memory for qpcb "
"ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
query_qp_exit1:
- kfree(qpcb);
+ ehca_free_fw_ctrlblock(qpcb);
return ret;
}