{
struct hfi1_ctxtdata *rcd;
unsigned long flags;
- unsigned i;
+ u16 i;
for (i = 0; i < dd->first_dyn_alloc_ctxt; i++) {
rcd = dd->rcd[i];
{
struct hfi1_ctxtdata *rcd;
unsigned long flags;
- unsigned i;
+ u16 i;
aspm_enable(dd);
static inline void aspm_init(struct hfi1_devdata *dd)
{
- unsigned i;
+ u16 i;
spin_lock_init(&dd->aspm_lock);
dd->aspm_supported = aspm_hw_l1_supported(dd);
static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
{
u32 rcvmask;
- int i;
+ u16 i;
/* enable all kernel contexts */
for (i = 0; i < dd->num_rcv_contexts; i++) {
return 0x1; /* if invalid, go with the minimum size */
}
-void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, u16 ctxt)
{
struct hfi1_ctxtdata *rcd;
u64 rcvctrl, reg;
write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
}
-int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, u16 ctxt, u16 jkey)
{
struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
unsigned sctxt;
return ret;
}
-int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, u16 ctxt)
{
struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
unsigned sctxt;
return ret;
}
-int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, u16 ctxt, u16 pkey)
{
struct hfi1_ctxtdata *rcd;
unsigned sctxt;
void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
-void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt);
+void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, u16 ctxt);
u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp);
u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp);
int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which);
int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
-int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey);
-int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt);
-int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey);
+int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, u16 ctxt, u16 jkey);
+int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, u16 ctxt);
+int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, u16 ctxt, u16 pkey);
int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt);
void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
return last;
}
-static inline void set_nodma_rtail(struct hfi1_devdata *dd, u8 ctxt)
+static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
{
- int i;
+ u16 i;
/*
* For dynamically allocated kernel contexts (like vnic) switch
&handle_receive_interrupt_nodma_rtail;
}
-static inline void set_dma_rtail(struct hfi1_devdata *dd, u8 ctxt)
+static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
{
- int i;
+ u16 i;
/*
* For dynamically allocated kernel contexts (like vnic) switch
void set_all_slowpath(struct hfi1_devdata *dd)
{
- int i;
+ u16 i;
/* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) {
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
linkstate_active_work);
struct hfi1_devdata *dd = ppd->dd;
- int i;
+ u16 i;
/* Received non-SC15 packet implies neighbor_normal */
ppd->neighbor_normal = 1;
*/
int hfi1_reset_device(int unit)
{
- int ret, i;
+ int ret;
+ u16 i;
struct hfi1_devdata *dd = hfi1_lookup(unit);
struct hfi1_pportdata *ppd;
unsigned long flags;
static int find_sub_ctxt(struct hfi1_filedata *fd,
const struct hfi1_user_info *uinfo)
{
- int i;
+ u16 i;
struct hfi1_devdata *dd = fd->dd;
u16 subctxt;
struct hfi1_user_info *uinfo)
{
struct hfi1_ctxtdata *uctxt;
- unsigned int ctxt;
+ u16 ctxt;
int ret, numa;
if (dd->flags & HFI1_FROZEN) {
{
struct hfi1_ctxtdata *uctxt;
struct hfi1_devdata *dd = ppd->dd;
- unsigned ctxt;
+ u16 ctxt;
int ret = 0;
unsigned long flags;
struct kref kref;
/* Device context index */
- unsigned ctxt;
+ u16 ctxt;
/*
* non-zero if ctxt can be shared, and defines the maximum number of
* sub-contexts for this device context.
int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
int hfi1_create_ctxts(struct hfi1_devdata *dd);
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u16 ctxt,
int numa);
void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
*/
int hfi1_create_ctxts(struct hfi1_devdata *dd)
{
- unsigned i;
+ u16 i;
int ret;
/* Control context has to be always 0 */
/*
* Common code for user and kernel context setup.
*/
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u16 ctxt,
int numa)
{
struct hfi1_devdata *dd = ppd->dd;
static void enable_chip(struct hfi1_devdata *dd)
{
u32 rcvmask;
- u32 i;
+ u16 i;
/* enable PIO send */
pio_send_control(dd, PSC_GLOBAL_ENABLE);
int hfi1_init(struct hfi1_devdata *dd, int reinit)
{
int ret = 0, pidx, lastfail = 0;
- unsigned i, len;
+ unsigned long len;
+ u16 i;
struct hfi1_ctxtdata *rcd;
struct hfi1_pportdata *ppd;
);
TRACE_EVENT(hfi1_receive_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
+ TP_PROTO(struct hfi1_devdata *dd, u16 ctxt),
TP_ARGS(dd, ctxt),
TP_STRUCT__entry(DD_DEV_ENTRY(dd)
__field(u32, ctxt)
extern uint extended_psn;
struct hfi1_user_sdma_pkt_q {
- unsigned ctxt;
+ u16 ctxt;
u16 subctxt;
u16 n_max_reqs;
atomic_t n_reqs;
struct hfi1_ctxtdata **vnic_ctxt)
{
struct hfi1_ctxtdata *uctxt;
- unsigned int ctxt;
+ u16 ctxt;
int ret;
if (dd->flags & HFI1_FROZEN)