return FLUSH_COMPLETE;
}
-static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try)
-{
- if (bcp->uvhub_version == UV_BAU_V1)
- return uv1_wait_completion(bau_desc, bcp, try);
- else
- return uv2_3_wait_completion(bau_desc, bcp, try);
-}
-
/*
* Our retries are blocked by all destination sw ack resources being
* in use, and a timeout is pending. In that case hardware immediately
write_mmr_activation(index);
try++;
- completion_stat = wait_completion(bau_desc, bcp, try);
+ completion_stat = ops.wait_completion(bau_desc, bcp, try);
handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
return 1;
}
-static const struct bau_operations uv123_bau_ops __initconst = {
+static const struct bau_operations uv1_bau_ops __initconst = {
+ .bau_gpa_to_offset = uv_gpa_to_offset,
+ .read_l_sw_ack = read_mmr_sw_ack,
+ .read_g_sw_ack = read_gmmr_sw_ack,
+ .write_l_sw_ack = write_mmr_sw_ack,
+ .write_g_sw_ack = write_gmmr_sw_ack,
+ .write_payload_first = write_mmr_payload_first,
+ .write_payload_last = write_mmr_payload_last,
+ .wait_completion = uv1_wait_completion,
+};
+
+static const struct bau_operations uv2_3_bau_ops __initconst = {
.bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack,
.read_g_sw_ack = read_gmmr_sw_ack,
.write_g_sw_ack = write_gmmr_sw_ack,
.write_payload_first = write_mmr_payload_first,
.write_payload_last = write_mmr_payload_last,
+ .wait_completion = uv2_3_wait_completion,
};
static const struct bau_operations uv4_bau_ops __initconst = {
.write_g_sw_ack = write_gmmr_proc_sw_ack,
.write_payload_first = write_mmr_proc_payload_first,
.write_payload_last = write_mmr_proc_payload_last,
+ .wait_completion = uv2_3_wait_completion,
};
/*
if (is_uv4_hub())
ops = uv4_bau_ops;
else if (is_uv3_hub())
- ops = uv123_bau_ops;
+ ops = uv2_3_bau_ops;
else if (is_uv2_hub())
- ops = uv123_bau_ops;
+ ops = uv2_3_bau_ops;
else if (is_uv1_hub())
- ops = uv123_bau_ops;
+ ops = uv1_bau_ops;
for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);