/*////////////////////////////////////////////////////////////////////////// */
/* declarations for individual HPI entry points */
-hpi_handler_func HPI_1000;
hpi_handler_func HPI_6000;
hpi_handler_func HPI_6205;
-hpi_handler_func HPI_COMMON;
#endif /* _HPI_INTERNAL_H_ */
if (info->control_type) {
pC->p_info[info->control_index] = info;
cached++;
- } else /* dummy cache entry */
+ } else { /* dummy cache entry */
pC->p_info[info->control_index] = NULL;
+ }
byte_count += info->size_in32bit_words * 4;
unsigned int field_size;
};
-static struct pad_ofs_size pad_desc[] = {
+static const struct pad_ofs_size pad_desc[] = {
HPICMN_PAD_OFS_AND_SIZE(c_channel), /* HPI_PAD_CHANNEL_NAME */
HPICMN_PAD_OFS_AND_SIZE(c_artist), /* HPI_PAD_ARTIST */
HPICMN_PAD_OFS_AND_SIZE(c_title), /* HPI_PAD_TITLE */
}
}
+/** Allocate control cache.
+
+\return Cache pointer, or NULL if allocation fails.
+*/
struct hpi_control_cache *hpi_alloc_control_cache(const u32 control_count,
const u32 size_in_bytes, u8 *p_dsp_control_buffer)
{
struct hpi_message *phm, struct hpi_response *phr);
u16 hpi_validate_response(struct hpi_message *phm, struct hpi_response *phr);
+
+hpi_handler_func HPI_COMMON;
int __devinit asihpi_adapter_probe(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
- int err, idx, nm;
+ int idx, nm;
unsigned int memlen;
struct hpi_message hm;
struct hpi_response hr;
nm = HPI_MAX_ADAPTER_MEM_SPACES;
for (idx = 0; idx < nm; idx++) {
- HPI_DEBUG_LOG(INFO, "resource %d %s %08llx-%08llx %04llx\n",
- idx, pci_dev->resource[idx].name,
- (unsigned long long)pci_resource_start(pci_dev, idx),
- (unsigned long long)pci_resource_end(pci_dev, idx),
- (unsigned long long)pci_resource_flags(pci_dev, idx));
+ HPI_DEBUG_LOG(INFO, "resource %d %pR\n", idx,
+ &pci_dev->resource[idx]);
if (pci_resource_flags(pci_dev, idx) & IORESOURCE_MEM) {
memlen = pci_resource_len(pci_dev, idx);