void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(void);
void dma_cleanup_tables(unsigned long *);
-void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int);
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
+
#endif
return pto;
}
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
{
unsigned long *sto, *pto;
unsigned int rtx, sx, px;
return &pto[px];
}
-void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr,
- dma_addr_t dma_addr, int flags)
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
{
- unsigned long *entry;
-
- entry = dma_walk_cpu_trans(dma_table, dma_addr);
- if (!entry) {
- WARN_ON_ONCE(1);
- return;
- }
-
if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(entry);
} else {
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
+ unsigned long *entry;
int i, rc = 0;
if (!nr_pages)
return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
- if (!zdev->dma_table)
+ if (!zdev->dma_table) {
+ rc = -EINVAL;
goto no_refresh;
+ }
for (i = 0; i < nr_pages; i++) {
- dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr,
- flags);
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto undo_cpu_trans;
+ }
+ dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE);
+undo_cpu_trans:
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+ flags = ZPCI_PTE_INVALID;
+ while (i-- > 0) {
+ page_addr -= PAGE_SIZE;
+ dma_addr -= PAGE_SIZE;
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry)
+ break;
+ dma_update_cpu_trans(entry, page_addr, flags);
+ }
+ }
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags, nr_pages, i;
+ unsigned long *entry;
int rc = 0;
if (dma_addr < s390_domain->domain.geometry.aperture_start ||
spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
for (i = 0; i < nr_pages; i++) {
- dma_update_cpu_trans(s390_domain->dma_table, page_addr,
- dma_addr, flags);
+ entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto undo_cpu_trans;
+ }
+ dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
break;
}
spin_unlock(&s390_domain->list_lock);
+
+undo_cpu_trans:
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+ flags = ZPCI_PTE_INVALID;
+ while (i-- > 0) {
+ page_addr -= PAGE_SIZE;
+ dma_addr -= PAGE_SIZE;
+ entry = dma_walk_cpu_trans(s390_domain->dma_table,
+ dma_addr);
+ if (!entry)
+ break;
+ dma_update_cpu_trans(entry, page_addr, flags);
+ }
+ }
spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
return rc;