void *cpu, dma_addr_t dvma)
{
struct iommu *iommu;
- iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- iopte = iommu->page_table +
- ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long orig_state, hv_err, flags;
+ unsigned long orig_state, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_rx_get_state(lp->id,
- &lp->rx_head,
- &lp->rx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_rx_get_state(lp->id,
+ &lp->rx_head,
+ &lp->rx_tail,
+ &lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long flags, hv_err, orig_state;
+ unsigned long flags, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_tx_get_state(lp->id,
- &lp->tx_head,
- &lp->tx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_tx_get_state(lp->id,
+ &lp->tx_head,
+ &lp->tx_tail,
+ &lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
+ (void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
- unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
/* Do nothing. */
} else {
- ret = pci_sun4v_config_put(devhandle,
- HV_PCI_DEVICE_BUILD(bus, device, func),
- where, size, value);
+ /* We don't check for hypervisor errors here, but perhaps
+ * we should and influence our return value depending upon
+ * what kind of error is thrown.
+ */
+ pci_sun4v_config_put(devhandle,
+ HV_PCI_DEVICE_BUILD(bus, device, func),
+ where, size, value);
}
return PCIBIOS_SUCCESSFUL;
}
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
- unsigned long msiqid;
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
- msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
- int is_pbm_a, err;
+ int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
*/
regs = of_get_property(dp, "reg", NULL);
- is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
-
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
- unsigned long num_tsb_entries, sz, tsbsize;
+ unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
- tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0];
unsigned long ret;
ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
- if (val != HV_EOK)
+ if (ret != HV_EOK)
write_pcr(val);
}
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
+#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context)) {
unsigned long tstate = regs->tstate;
int result = AUDITSC_SUCCESS;
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
-
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
- unsigned long flags, rt, master_time_stamp, bound;
+ unsigned long flags, rt, master_time_stamp;
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
{
for (i = 0; i < NUM_ROUNDS; i++) {
delta = get_delta(&rt, &master_time_stamp);
- if (delta == 0) {
+ if (delta == 0)
done = 1; /* let's lock on to this... */
- bound = rt;
- }
if (!done) {
if (i > 0) {
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
void *pg_addr;
- int this_cpu;
u64 data0;
if (tlb_type == hypervisor)
return;
- this_cpu = get_cpu();
+ preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
}
__local_flush_dcache_page(page);
- put_cpu();
+ preempt_enable();
}
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{
- unsigned long fp, thread_base, ksp;
+ unsigned long fp, ksp;
struct thread_info *tp;
int count = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
flushw_all();
fp = ksp + STACK_BIAS;
- thread_base = (unsigned long) tp;
printk("Call Trace:\n");
do {