NVKM_ENGINE_SEC,
NVKM_ENGINE_MSPDEC,
- NVKM_SUBDEV_NR,
+ NVKM_SUBDEV_NR
+};
+
+enum nvkm_device_type {
+ NVKM_DEVICE_PCI,
+ NVKM_DEVICE_AGP,
+ NVKM_DEVICE_PCIE,
+ NVKM_DEVICE_TEGRA,
};
struct nvkm_device {
const struct nvkm_device_func *func;
const struct nvkm_device_quirk *quirk;
struct device *dev;
+ enum nvkm_device_type type;
u64 handle;
const char *name;
const char *cfgopt;
struct mutex mutex;
int refcount;
- struct pci_dev *pdev;
- struct platform_device *platformdev;
-
void __iomem *pri;
struct nvkm_event event;
void (*fini)(struct nvkm_device *, bool suspend);
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
+ bool cpu_coherent;
};
struct nvkm_device_quirk {
_temp; \
})
-static inline bool
-nv_device_is_pci(struct nvkm_device *device)
-{
- return device->pdev != NULL;
-}
-
-static inline bool
-nv_device_is_cpu_coherent(struct nvkm_device *device)
-{
- return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
-}
-
-static inline struct device *
-nv_device_base(struct nvkm_device *device)
-{
- return nv_device_is_pci(device) ? &device->pdev->dev :
- &device->platformdev->dev;
-}
-
-struct platform_device;
-
-enum nv_bus_type {
- NVKM_BUS_PCI,
- NVKM_BUS_PLATFORM,
-};
-
void nvkm_device_del(struct nvkm_device **);
struct nvkm_device_oclass {
getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nv_device_is_pci(nvxx_device(device)))
+ if (nvxx_device(device)->func->pci)
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nv_device_is_pci(nvxx_device(device)))
+ if (nvxx_device(device)->func->pci)
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nv_device_is_pci(nvxx_device(device)))
+ if (!nvxx_device(device)->func->pci)
getparam->value = 3;
else
if (drm_pci_device_is_agp(dev))
return len;
}
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
+bool nouveau_acpi_rom_supported(struct device *dev)
{
acpi_status status;
acpi_handle dhandle, rom_handle;
- dhandle = ACPI_HANDLE(&pdev->dev);
+ dhandle = ACPI_HANDLE(dev);
if (!dhandle)
return false;
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline bool nouveau_is_optimus(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
#endif
#include "nouveau_agp.h"
#include "nouveau_reg.h"
+#include <core/pci.h>
+
#if __OS_HAS_AGP
MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
static int nouveau_agpmode = -1;
get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
{
struct nvif_device *device = &drm->device;
+ struct pci_dev *pdev = nvxx_device(device)->func->pci(nvxx_device(device))->pdev;
struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
int agpmode = nouveau_agpmode;
unsigned long mode = info->mode;
while (agpmode == -1 && quirk->hostbridge_vendor) {
if (info->id_vendor == quirk->hostbridge_vendor &&
info->id_device == quirk->hostbridge_device &&
- nvxx_device(device)->pdev->vendor == quirk->chip_vendor &&
- nvxx_device(device)->pdev->device == quirk->chip_device) {
+ pdev->vendor == quirk->chip_vendor &&
+ pdev->device == quirk->chip_device) {
agpmode = quirk->mode;
NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
agpmode);
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &drm->ttm.bdev;
- if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
+ if (!nvxx_device(&drm->device)->func->cpu_coherent)
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
nvbo->page_shift = 12;
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_device(nv_device_base(device),
- ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
+ dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
}
void
return;
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
- dma_sync_single_for_cpu(nv_device_base(device),
- ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_FROM_DEVICE);
}
int
drm = nouveau_bdev(ttm->bdev);
device = nvxx_device(&drm->device);
dev = drm->dev;
- pdev = nv_device_base(device);
+ pdev = device->dev;
/*
* Objects matching this condition have been marked as force_coherent,
* so use the DMA API for them.
*/
- if (!nv_device_is_cpu_coherent(device) &&
+ if (!nvxx_device(&drm->device)->func->cpu_coherent &&
ttm->caching_state == tt_uncached)
return ttm_dma_populate(ttm_dma, dev->dev);
drm = nouveau_bdev(ttm->bdev);
device = nvxx_device(&drm->device);
dev = drm->dev;
- pdev = nv_device_base(device);
+ pdev = device->dev;
/*
* Objects matching this condition have been marked as force_coherent,
* so use the DMA API for them.
*/
- if (!nv_device_is_cpu_coherent(device) &&
+ if (!nvxx_device(&drm->device)->func->cpu_coherent &&
ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
return;
struct nvif_device *device = &drm->device;
if (sysfs && sysfs->ctrl.priv) {
- device_remove_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+ device_remove_file(nvxx_device(device)->dev, &dev_attr_pstate);
nvif_object_fini(&sysfs->ctrl);
}
NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
&sysfs->ctrl);
if (ret == 0)
- device_create_file(nv_device_base(nvxx_device(device)), &dev_attr_pstate);
+ device_create_file(nvxx_device(device)->dev, &dev_attr_pstate);
return 0;
}
int ret;
bits = nvxx_mmu(&drm->device)->dma_bits;
- if (nv_device_is_pci(nvxx_device(&drm->device))) {
+ if (nvxx_device(&drm->device)->func->pci) {
if (drm->agp.stat == ENABLED ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
nv50_chan_destroy(&dmac->base);
if (dmac->ptr) {
- struct pci_dev *pdev = nvxx_device(device)->pdev;
- pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
+ struct device *dev = nvxx_device(device)->dev;
+ dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
}
mutex_init(&dmac->lock);
- dmac->ptr = pci_alloc_consistent(nvxx_device(device)->pdev,
- PAGE_SIZE, &dmac->handle);
+ dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
+ &dmac->handle, GFP_KERNEL);
if (!dmac->ptr)
return -ENOMEM;
int
nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
- void *dev, enum nv_bus_type type, u64 handle,
+ struct device *dev, enum nvkm_device_type type, u64 handle,
const char *name, const char *cfg, const char *dbg,
bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *device)
device->func = func;
device->quirk = quirk;
- switch (type) {
- case NVKM_BUS_PCI:
- device->pdev = dev;
- device->dev = &device->pdev->dev;
- break;
- case NVKM_BUS_PLATFORM:
- device->platformdev = dev;
- device->dev = &device->platformdev->dev;
- break;
- }
+ device->dev = dev;
+ device->type = type;
device->handle = handle;
device->cfgopt = cfg;
device->dbgopt = dbg;
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
+ .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
};
int
*pdevice = &pdev->device;
pdev->pdev = pci_dev;
- return nvkm_device_ctor(&nvkm_device_pci_func, quirk,
- pci_dev, NVKM_BUS_PCI,
+ return nvkm_device_ctor(&nvkm_device_pci_func, quirk, &pci_dev->dev,
+ pci_is_pcie(pci_dev) ? NVKM_DEVICE_PCIE :
+ pci_find_capability(pci_dev, PCI_CAP_ID_AGP) ?
+ NVKM_DEVICE_AGP : NVKM_DEVICE_PCI,
(u64)pci_domain_nr(pci_dev->bus) << 32 |
pci_dev->bus->number << 16 |
PCI_SLOT(pci_dev->devfn) << 8 |
int nvkm_device_ctor(const struct nvkm_device_func *,
const struct nvkm_device_quirk *,
- void *, enum nv_bus_type type, u64 handle,
+ struct device *, enum nvkm_device_type, u64 handle,
const char *name, const char *cfg, const char *dbg,
bool detect, bool mmio, u64 subdev_mask,
struct nvkm_device *);
.fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size,
+ .cpu_coherent = false,
};
int
tdev->pdev = pdev;
tdev->irq = -1;
- return nvkm_device_ctor(&nvkm_device_tegra_func, NULL, pdev,
- NVKM_BUS_PLATFORM, pdev->id, NULL,
+ return nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
+ NVKM_DEVICE_TEGRA, pdev->id, NULL,
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
}
args->v0.platform = NV_DEVICE_INFO_V0_IGP;
break;
default:
- if (device->pdev) {
- if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
- args->v0.platform = NV_DEVICE_INFO_V0_AGP;
- else
- if (pci_is_pcie(device->pdev))
- args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
- else
- args->v0.platform = NV_DEVICE_INFO_V0_PCI;
- } else {
+ switch (device->type) {
+ case NVKM_DEVICE_PCI:
+ args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+ break;
+ case NVKM_DEVICE_AGP:
+ args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+ break;
+ case NVKM_DEVICE_PCIE:
+ args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+ break;
+ case NVKM_DEVICE_TEGRA:
args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
}
break;
}
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret == 0) {
falcon->code.data = vmemdup(fw->data, fw->size);
falcon->code.size = fw->size;
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret) {
nvkm_error(subdev, "unable to load firmware data\n");
return -ENODEV;
snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
device->chipset, falcon->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret) {
nvkm_error(subdev, "unable to load firmware code\n");
return -ENODEV;
}
snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- ret = request_firmware(&fw, f, nv_device_base(device));
+ ret = request_firmware(&fw, f, device->dev);
if (ret) {
nvkm_error(subdev, "failed to load %s\n", fwname);
return ret;
snprintf(name, sizeof(name), "nouveau/nv84_xuc%03x",
xtensa->addr >> 12);
- ret = request_firmware(&fw, name, nv_device_base(device));
+ ret = request_firmware(&fw, name, device->dev);
if (ret) {
nvkm_warn(subdev, "unable to load firmware %s\n", name);
return ret;
static void *
shadow_fw_init(struct nvkm_bios *bios, const char *name)
{
- struct device *dev = &bios->subdev.device->pdev->dev;
+ struct device *dev = bios->subdev.device->dev;
const struct firmware *fw;
int ret = request_firmware(&fw, name, dev);
if (ret)
#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+bool nouveau_acpi_rom_supported(struct device *);
#else
static inline bool
-nouveau_acpi_rom_supported(struct pci_dev *pdev)
+nouveau_acpi_rom_supported(struct device *dev)
{
return false;
}
static void *
acpi_init(struct nvkm_bios *bios, const char *name)
{
- if (!nouveau_acpi_rom_supported(bios->subdev.device->pdev))
+ if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
return ERR_PTR(-ENODEV);
return NULL;
}
*/
#include "priv.h"
+#include <core/pci.h>
+
struct priv {
struct pci_dev *pdev;
void __iomem *rom;
static void *
pcirom_init(struct nvkm_bios *bios, const char *name)
{
- struct pci_dev *pdev = bios->subdev.device->pdev;
+ struct nvkm_device *device = bios->subdev.device;
struct priv *priv = NULL;
+ struct pci_dev *pdev;
int ret;
+ if (device->func->pci)
+ pdev = device->func->pci(device)->pdev;
+ else
+ return ERR_PTR(-ENODEV);
+
if (!(ret = pci_enable_rom(pdev))) {
if (ret = -ENOMEM,
(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
static void *
platform_init(struct nvkm_bios *bios, const char *name)
{
- struct pci_dev *pdev = bios->subdev.device->pdev;
+ struct nvkm_device *device = bios->subdev.device;
+ struct pci_dev *pdev;
struct priv *priv;
int ret = -ENOMEM;
+ if (device->func->pci)
+ pdev = device->func->pci(device)->pdev;
+ else
+ return ERR_PTR(-ENODEV);
+
if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
if (ret = -ENODEV,
(priv->rom = pci_platform_rom(pdev, &priv->size)))
struct nvkm_device *device = fb->base.subdev.device;
if (fb->r100c10_page) {
- dma_unmap_page(nv_device_base(device), fb->r100c10, PAGE_SIZE,
+ dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(fb->r100c10_page);
}
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(nv_device_base(device),
- fb->r100c10_page, 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(nv_device_base(device), fb->r100c10))
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
return -EFAULT;
}
struct nvkm_device *device = fb->base.subdev.device;
if (fb->r100c08_page) {
- dma_unmap_page(nv_device_base(device), fb->r100c08, PAGE_SIZE,
+ dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(fb->r100c08_page);
}
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(nv_device_base(device),
- fb->r100c08_page, 0, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(nv_device_base(device), fb->r100c08))
+ fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c08))
return -EFAULT;
} else {
nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
{
struct gk20a_instobj_dma *node = (void *)_node;
struct gk20a_instmem *imem = _node->imem;
- struct device *dev = nv_device_base(imem->base.subdev.device);
+ struct device *dev = imem->base.subdev.device->dev;
if (unlikely(!node->cpuaddr))
return;
nvkm_vm_ref(NULL, &mmu->vm, NULL);
}
if (mmu->nullp) {
- pci_free_consistent(device->pdev, 16 * 1024,
- mmu->nullp, mmu->null);
+ dma_free_coherent(device->dev, 16 * 1024,
+ mmu->nullp, mmu->null);
}
return mmu;
}
int
nv41_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+ if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, index, pmmu);
struct nvkm_device *device = mmu->base.subdev.device;
int ret;
- mmu->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &mmu->null);
+ mmu->nullp = dma_alloc_coherent(device->dev, 16 * 1024,
+ &mmu->null, GFP_KERNEL);
if (!mmu->nullp) {
nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
mmu->null = 0;
int
nv44_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
- if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP) ||
+ if (device->type == NVKM_DEVICE_AGP ||
!nvkm_boolopt(device->cfgopt, "NvPCIE", true))
return nv04_mmu_new(device, index, pmmu);
acpi_handle handle;
int rev;
- handle = ACPI_HANDLE(nv_device_base(device));
+ handle = ACPI_HANDLE(device->dev);
if (!handle)
return false;