Commit | Line | Data |
---|---|---|
a14eddda VK |
1 | /* |
2 | * User-space DMA and UIO based Redrapids Pocket Change CardBus driver | |
3 | * | |
4 | * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org> | |
5 | * | |
6 | * Licensed under GPL version 2 only. | |
7 | */ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/uio_driver.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/cdev.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/sysfs.h> | |
17 | #include <linux/poll.h> | |
18 | #include <linux/idr.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/ioctl.h> | |
22 | #include <linux/io.h> | |
8037cdad | 23 | #include <linux/sched.h> |
5a0e3ad6 | 24 | #include <linux/slab.h> |
a14eddda VK |
25 | |
26 | #include "poch.h" | |
27 | ||
28 | #include <asm/cacheflush.h> | |
29 | ||
30 | #ifndef PCI_VENDOR_ID_RRAPIDS | |
31 | #define PCI_VENDOR_ID_RRAPIDS 0x17D2 | |
32 | #endif | |
33 | ||
34 | #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE | |
35 | #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351 | |
36 | #endif | |
37 | ||
38 | #define POCH_NCHANNELS 2 | |
39 | ||
40 | #define MAX_POCH_CARDS 8 | |
41 | #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS) | |
42 | ||
43 | #define DRV_NAME "poch" | |
44 | #define PFX DRV_NAME ": " | |
45 | ||
46 | /* | |
47 | * BAR0 Bridge Register Definitions | |
48 | */ | |
49 | ||
50 | #define BRIDGE_REV_REG 0x0 | |
51 | #define BRIDGE_INT_MASK_REG 0x4 | |
52 | #define BRIDGE_INT_STAT_REG 0x8 | |
53 | ||
54 | #define BRIDGE_INT_ACTIVE (0x1 << 31) | |
55 | #define BRIDGE_INT_FPGA (0x1 << 2) | |
56 | #define BRIDGE_INT_TEMP_FAIL (0x1 << 1) | |
57 | #define BRIDGE_INT_TEMP_WARN (0x1 << 0) | |
58 | ||
59 | #define BRIDGE_FPGA_RESET_REG 0xC | |
60 | ||
61 | #define BRIDGE_CARD_POWER_REG 0x10 | |
62 | #define BRIDGE_CARD_POWER_EN (0x1 << 0) | |
63 | #define BRIDGE_CARD_POWER_PROG_DONE (0x1 << 31) | |
64 | ||
65 | #define BRIDGE_JTAG_REG 0x14 | |
66 | #define BRIDGE_DMA_GO_REG 0x18 | |
67 | #define BRIDGE_STAT_0_REG 0x1C | |
68 | #define BRIDGE_STAT_1_REG 0x20 | |
69 | #define BRIDGE_STAT_2_REG 0x24 | |
70 | #define BRIDGE_STAT_3_REG 0x28 | |
71 | #define BRIDGE_TEMP_STAT_REG 0x2C | |
72 | #define BRIDGE_TEMP_THRESH_REG 0x30 | |
73 | #define BRIDGE_EEPROM_REVSEL_REG 0x34 | |
74 | #define BRIDGE_CIS_STRUCT_REG 0x100 | |
75 | #define BRIDGE_BOARDREV_REG 0x124 | |
76 | ||
77 | /* | |
78 | * BAR1 FPGA Register Definitions | |
79 | */ | |
80 | ||
81 | #define FPGA_IFACE_REV_REG 0x0 | |
82 | #define FPGA_RX_BLOCK_SIZE_REG 0x8 | |
83 | #define FPGA_TX_BLOCK_SIZE_REG 0xC | |
84 | #define FPGA_RX_BLOCK_COUNT_REG 0x10 | |
85 | #define FPGA_TX_BLOCK_COUNT_REG 0x14 | |
86 | #define FPGA_RX_CURR_DMA_BLOCK_REG 0x18 | |
87 | #define FPGA_TX_CURR_DMA_BLOCK_REG 0x1C | |
88 | #define FPGA_RX_GROUP_COUNT_REG 0x20 | |
89 | #define FPGA_TX_GROUP_COUNT_REG 0x24 | |
90 | #define FPGA_RX_CURR_GROUP_REG 0x28 | |
91 | #define FPGA_TX_CURR_GROUP_REG 0x2C | |
92 | #define FPGA_RX_CURR_PCI_REG 0x38 | |
93 | #define FPGA_TX_CURR_PCI_REG 0x3C | |
94 | #define FPGA_RX_GROUP0_START_REG 0x40 | |
95 | #define FPGA_TX_GROUP0_START_REG 0xC0 | |
96 | #define FPGA_DMA_DESC_1_REG 0x140 | |
97 | #define FPGA_DMA_DESC_2_REG 0x144 | |
98 | #define FPGA_DMA_DESC_3_REG 0x148 | |
99 | #define FPGA_DMA_DESC_4_REG 0x14C | |
100 | ||
101 | #define FPGA_DMA_INT_STAT_REG 0x150 | |
102 | #define FPGA_DMA_INT_MASK_REG 0x154 | |
103 | #define FPGA_DMA_INT_RX (1 << 0) | |
104 | #define FPGA_DMA_INT_TX (1 << 1) | |
105 | ||
106 | #define FPGA_RX_GROUPS_PER_INT_REG 0x158 | |
107 | #define FPGA_TX_GROUPS_PER_INT_REG 0x15C | |
108 | #define FPGA_DMA_ADR_PAGE_REG 0x160 | |
109 | #define FPGA_FPGA_REV_REG 0x200 | |
110 | ||
111 | #define FPGA_ADC_CLOCK_CTL_REG 0x204 | |
112 | #define FPGA_ADC_CLOCK_CTL_OSC_EN (0x1 << 3) | |
113 | #define FPGA_ADC_CLOCK_LOCAL_CLK (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN) | |
114 | #define FPGA_ADC_CLOCK_EXT_SAMP_CLK 0X0 | |
115 | ||
116 | #define FPGA_ADC_DAC_EN_REG 0x208 | |
117 | #define FPGA_ADC_DAC_EN_DAC_OFF (0x1 << 1) | |
118 | #define FPGA_ADC_DAC_EN_ADC_OFF (0x1 << 0) | |
119 | ||
120 | #define FPGA_INT_STAT_REG 0x20C | |
121 | #define FPGA_INT_MASK_REG 0x210 | |
122 | #define FPGA_INT_PLL_UNLOCKED (0x1 << 9) | |
123 | #define FPGA_INT_DMA_CORE (0x1 << 8) | |
124 | #define FPGA_INT_TX_FF_EMPTY (0x1 << 7) | |
125 | #define FPGA_INT_RX_FF_EMPTY (0x1 << 6) | |
126 | #define FPGA_INT_TX_FF_OVRFLW (0x1 << 3) | |
127 | #define FPGA_INT_RX_FF_OVRFLW (0x1 << 2) | |
128 | #define FPGA_INT_TX_ACQ_DONE (0x1 << 1) | |
129 | #define FPGA_INT_RX_ACQ_DONE (0x1) | |
130 | ||
ca219995 VK |
131 | #define FPGA_RX_CTL_REG 0x214 |
132 | #define FPGA_RX_CTL_FIFO_FLUSH (0x1 << 9) | |
133 | #define FPGA_RX_CTL_SYNTH_DATA (0x1 << 8) | |
134 | #define FPGA_RX_CTL_CONT_CAP (0x0 << 1) | |
135 | #define FPGA_RX_CTL_SNAP_CAP (0x1 << 1) | |
a14eddda VK |
136 | |
137 | #define FPGA_RX_ARM_REG 0x21C | |
138 | ||
139 | #define FPGA_DOM_REG 0x224 | |
140 | #define FPGA_DOM_DCM_RESET (0x1 << 5) | |
141 | #define FPGA_DOM_SOFT_RESET (0x1 << 4) | |
142 | #define FPGA_DOM_DUAL_M_SG_DMA (0x0) | |
143 | #define FPGA_DOM_TARGET_ACCESS (0x1) | |
144 | ||
145 | #define FPGA_TX_CTL_REG 0x228 | |
146 | #define FPGA_TX_CTL_FIFO_FLUSH (0x1 << 9) | |
147 | #define FPGA_TX_CTL_OUTPUT_ZERO (0x0 << 2) | |
148 | #define FPGA_TX_CTL_OUTPUT_CARDBUS (0x1 << 2) | |
149 | #define FPGA_TX_CTL_OUTPUT_ADC (0x2 << 2) | |
150 | #define FPGA_TX_CTL_OUTPUT_SNAPSHOT (0x3 << 2) | |
151 | #define FPGA_TX_CTL_LOOPBACK (0x1 << 0) | |
152 | ||
153 | #define FPGA_ENDIAN_MODE_REG 0x22C | |
154 | #define FPGA_RX_FIFO_COUNT_REG 0x28C | |
155 | #define FPGA_TX_ENABLE_REG 0x298 | |
156 | #define FPGA_TX_TRIGGER_REG 0x29C | |
157 | #define FPGA_TX_DATAMEM_COUNT_REG 0x2A8 | |
158 | #define FPGA_CAP_FIFO_REG 0x300 | |
159 | #define FPGA_TX_SNAPSHOT_REG 0x8000 | |
160 | ||
161 | /* | |
162 | * Channel Index Definitions | |
163 | */ | |
164 | ||
165 | enum { | |
166 | CHNO_RX_CHANNEL, | |
167 | CHNO_TX_CHANNEL, | |
168 | }; | |
169 | ||
170 | struct poch_dev; | |
171 | ||
172 | enum channel_dir { | |
173 | CHANNEL_DIR_RX, | |
174 | CHANNEL_DIR_TX, | |
175 | }; | |
176 | ||
177 | struct poch_group_info { | |
178 | struct page *pg; | |
179 | dma_addr_t dma_addr; | |
180 | unsigned long user_offset; | |
181 | }; | |
182 | ||
183 | struct channel_info { | |
184 | unsigned int chno; | |
185 | ||
186 | atomic_t sys_block_size; | |
187 | atomic_t sys_group_size; | |
188 | atomic_t sys_group_count; | |
189 | ||
190 | enum channel_dir dir; | |
191 | ||
192 | unsigned long block_size; | |
193 | unsigned long group_size; | |
194 | unsigned long group_count; | |
195 | ||
196 | /* Contains the DMA address and VM offset of each group. */ | |
197 | struct poch_group_info *groups; | |
198 | ||
199 | /* Contains the header and circular buffer exported to userspace. */ | |
200 | spinlock_t group_offsets_lock; | |
a14eddda | 201 | |
b01faf05 VK |
202 | /* Last group consumed by user space. */ |
203 | unsigned int consumed; | |
a14eddda VK |
204 | /* Last group indicated as 'complete' to user space. */ |
205 | unsigned int transfer; | |
206 | ||
207 | wait_queue_head_t wq; | |
208 | ||
209 | union { | |
210 | unsigned int data_available; | |
211 | unsigned int space_available; | |
212 | }; | |
213 | ||
214 | void __iomem *bridge_iomem; | |
215 | void __iomem *fpga_iomem; | |
216 | spinlock_t *iomem_lock; | |
217 | ||
218 | atomic_t free; | |
219 | atomic_t inited; | |
220 | ||
221 | /* Error counters */ | |
222 | struct poch_counters counters; | |
223 | spinlock_t counters_lock; | |
224 | ||
225 | struct device *dev; | |
226 | }; | |
227 | ||
228 | struct poch_dev { | |
229 | struct uio_info uio; | |
230 | struct pci_dev *pci_dev; | |
231 | unsigned int nchannels; | |
232 | struct channel_info channels[POCH_NCHANNELS]; | |
233 | struct cdev cdev; | |
234 | ||
235 | /* Counts the no. of channels that have been opened. On first | |
236 | * open, the card is powered on. On last channel close, the | |
237 | * card is powered off. | |
238 | */ | |
239 | atomic_t usage; | |
240 | ||
241 | void __iomem *bridge_iomem; | |
242 | void __iomem *fpga_iomem; | |
243 | spinlock_t iomem_lock; | |
244 | ||
245 | struct device *dev; | |
246 | }; | |
247 | ||
16fbf4cb VK |
248 | static int synth_rx; |
249 | module_param(synth_rx, bool, 0600); | |
250 | MODULE_PARM_DESC(synth_rx, | |
251 | "Synthesize received values using a counter. Default: No"); | |
252 | ||
dad17401 VK |
253 | static int loopback; |
254 | module_param(loopback, bool, 0600); | |
255 | MODULE_PARM_DESC(loopback, | |
256 | "Enable hardware loopback of trasnmitted data. Default: No"); | |
257 | ||
a14eddda VK |
258 | static dev_t poch_first_dev; |
259 | static struct class *poch_cls; | |
260 | static DEFINE_IDR(poch_ids); | |
261 | ||
262 | static ssize_t store_block_size(struct device *dev, | |
263 | struct device_attribute *attr, | |
264 | const char *buf, size_t count) | |
265 | { | |
266 | struct channel_info *channel = dev_get_drvdata(dev); | |
267 | unsigned long block_size; | |
268 | ||
269 | sscanf(buf, "%lu", &block_size); | |
270 | atomic_set(&channel->sys_block_size, block_size); | |
271 | ||
272 | return count; | |
273 | } | |
274 | static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size); | |
275 | ||
276 | static ssize_t store_group_size(struct device *dev, | |
277 | struct device_attribute *attr, | |
278 | const char *buf, size_t count) | |
279 | { | |
280 | struct channel_info *channel = dev_get_drvdata(dev); | |
281 | unsigned long group_size; | |
282 | ||
283 | sscanf(buf, "%lu", &group_size); | |
284 | atomic_set(&channel->sys_group_size, group_size); | |
285 | ||
286 | return count; | |
287 | } | |
288 | static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size); | |
289 | ||
290 | static ssize_t store_group_count(struct device *dev, | |
291 | struct device_attribute *attr, | |
292 | const char *buf, size_t count) | |
293 | { | |
294 | struct channel_info *channel = dev_get_drvdata(dev); | |
295 | unsigned long group_count; | |
296 | ||
297 | sscanf(buf, "%lu", &group_count); | |
298 | atomic_set(&channel->sys_group_count, group_count); | |
299 | ||
300 | return count; | |
301 | } | |
302 | static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count); | |
303 | ||
304 | static ssize_t show_direction(struct device *dev, | |
305 | struct device_attribute *attr, char *buf) | |
306 | { | |
307 | struct channel_info *channel = dev_get_drvdata(dev); | |
308 | int len; | |
309 | ||
310 | len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx")); | |
311 | return len; | |
312 | } | |
313 | static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL); | |
314 | ||
ea0a337f VK |
315 | static unsigned long npages(unsigned long bytes) |
316 | { | |
317 | if (bytes % PAGE_SIZE == 0) | |
318 | return bytes / PAGE_SIZE; | |
319 | else | |
320 | return (bytes / PAGE_SIZE) + 1; | |
321 | } | |
322 | ||
a14eddda VK |
323 | static ssize_t show_mmap_size(struct device *dev, |
324 | struct device_attribute *attr, char *buf) | |
325 | { | |
326 | struct channel_info *channel = dev_get_drvdata(dev); | |
327 | int len; | |
328 | unsigned long mmap_size; | |
329 | unsigned long group_pages; | |
a14eddda VK |
330 | unsigned long total_group_pages; |
331 | ||
ea0a337f | 332 | group_pages = npages(channel->group_size); |
a14eddda VK |
333 | total_group_pages = group_pages * channel->group_count; |
334 | ||
92d45033 | 335 | mmap_size = total_group_pages * PAGE_SIZE; |
a14eddda VK |
336 | len = sprintf(buf, "%lu\n", mmap_size); |
337 | return len; | |
338 | } | |
339 | static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL); | |
340 | ||
341 | static struct device_attribute *poch_class_attrs[] = { | |
342 | &dev_attr_block_size, | |
343 | &dev_attr_group_size, | |
344 | &dev_attr_group_count, | |
345 | &dev_attr_dir, | |
346 | &dev_attr_mmap_size, | |
347 | }; | |
348 | ||
349 | static void poch_channel_free_groups(struct channel_info *channel) | |
350 | { | |
351 | unsigned long i; | |
352 | ||
353 | for (i = 0; i < channel->group_count; i++) { | |
354 | struct poch_group_info *group; | |
355 | unsigned int order; | |
356 | ||
357 | group = &channel->groups[i]; | |
358 | order = get_order(channel->group_size); | |
359 | if (group->pg) | |
360 | __free_pages(group->pg, order); | |
361 | } | |
362 | } | |
363 | ||
364 | static int poch_channel_alloc_groups(struct channel_info *channel) | |
365 | { | |
366 | unsigned long i; | |
367 | unsigned long group_pages; | |
a14eddda | 368 | |
ea0a337f | 369 | group_pages = npages(channel->group_size); |
a14eddda VK |
370 | |
371 | for (i = 0; i < channel->group_count; i++) { | |
372 | struct poch_group_info *group; | |
373 | unsigned int order; | |
374 | gfp_t gfp_mask; | |
375 | ||
376 | group = &channel->groups[i]; | |
377 | order = get_order(channel->group_size); | |
378 | ||
379 | /* | |
380 | * __GFP_COMP is required here since we are going to | |
381 | * perform non-linear mapping to userspace. For more | |
382 | * information read the vm_insert_page() function | |
383 | * comments. | |
384 | */ | |
385 | ||
386 | gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO; | |
387 | group->pg = alloc_pages(gfp_mask, order); | |
388 | if (!group->pg) { | |
389 | poch_channel_free_groups(channel); | |
390 | return -ENOMEM; | |
391 | } | |
392 | ||
393 | /* FIXME: This is the physical address not the bus | |
394 | * address! This won't work in architectures that | |
395 | * have an IOMMU. Can we use pci_map_single() for | |
396 | * this? | |
397 | */ | |
398 | group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE; | |
92d45033 | 399 | group->user_offset = (i * group_pages) * PAGE_SIZE; |
a14eddda | 400 | |
3ca67c1b VK |
401 | printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i, |
402 | group->user_offset); | |
a14eddda VK |
403 | } |
404 | ||
405 | return 0; | |
406 | } | |
407 | ||
7dadbbcf | 408 | static int channel_latch_attr(struct channel_info *channel) |
a14eddda VK |
409 | { |
410 | channel->group_count = atomic_read(&channel->sys_group_count); | |
411 | channel->group_size = atomic_read(&channel->sys_group_size); | |
412 | channel->block_size = atomic_read(&channel->sys_block_size); | |
7dadbbcf VK |
413 | |
414 | if (channel->group_count == 0) { | |
415 | printk(KERN_ERR PFX "invalid group count %lu", | |
416 | channel->group_count); | |
417 | return -EINVAL; | |
418 | } | |
419 | ||
420 | if (channel->group_size == 0 || | |
421 | channel->group_size < channel->block_size) { | |
422 | printk(KERN_ERR PFX "invalid group size %lu", | |
423 | channel->group_size); | |
424 | return -EINVAL; | |
425 | } | |
426 | ||
427 | if (channel->block_size == 0 || (channel->block_size % 8) != 0) { | |
428 | printk(KERN_ERR PFX "invalid block size %lu", | |
429 | channel->block_size); | |
430 | return -EINVAL; | |
431 | } | |
432 | ||
433 | if (channel->group_size % channel->block_size != 0) { | |
434 | printk(KERN_ERR PFX | |
435 | "group size should be multiple of block size"); | |
436 | return -EINVAL; | |
437 | } | |
438 | ||
439 | return 0; | |
a14eddda VK |
440 | } |
441 | ||
442 | /* | |
443 | * Configure DMA group registers | |
444 | */ | |
445 | static void channel_dma_init(struct channel_info *channel) | |
446 | { | |
447 | void __iomem *fpga = channel->fpga_iomem; | |
448 | u32 group_regs_base; | |
449 | u32 group_reg; | |
450 | unsigned int page; | |
451 | unsigned int group_in_page; | |
452 | unsigned long i; | |
453 | u32 block_size_reg; | |
454 | u32 block_count_reg; | |
455 | u32 group_count_reg; | |
456 | u32 groups_per_int_reg; | |
457 | u32 curr_pci_reg; | |
458 | ||
459 | if (channel->chno == CHNO_RX_CHANNEL) { | |
460 | group_regs_base = FPGA_RX_GROUP0_START_REG; | |
461 | block_size_reg = FPGA_RX_BLOCK_SIZE_REG; | |
462 | block_count_reg = FPGA_RX_BLOCK_COUNT_REG; | |
463 | group_count_reg = FPGA_RX_GROUP_COUNT_REG; | |
464 | groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG; | |
465 | curr_pci_reg = FPGA_RX_CURR_PCI_REG; | |
466 | } else { | |
467 | group_regs_base = FPGA_TX_GROUP0_START_REG; | |
468 | block_size_reg = FPGA_TX_BLOCK_SIZE_REG; | |
469 | block_count_reg = FPGA_TX_BLOCK_COUNT_REG; | |
470 | group_count_reg = FPGA_TX_GROUP_COUNT_REG; | |
471 | groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG; | |
472 | curr_pci_reg = FPGA_TX_CURR_PCI_REG; | |
473 | } | |
474 | ||
475 | printk(KERN_WARNING "block_size, group_size, group_count\n"); | |
95ead520 VK |
476 | /* |
477 | * Block size is represented in no. of 64 bit transfers. | |
478 | */ | |
479 | iowrite32(channel->block_size / 8, fpga + block_size_reg); | |
a14eddda VK |
480 | iowrite32(channel->group_size / channel->block_size, |
481 | fpga + block_count_reg); | |
482 | iowrite32(channel->group_count, fpga + group_count_reg); | |
483 | /* FIXME: Hardcoded groups per int. Get it from sysfs? */ | |
7e72a85e | 484 | iowrite32(16, fpga + groups_per_int_reg); |
a14eddda VK |
485 | |
486 | /* Unlock PCI address? Not defined in the data sheet, but used | |
487 | * in the reference code by Redrapids. | |
488 | */ | |
489 | iowrite32(0x1, fpga + curr_pci_reg); | |
490 | ||
491 | /* The DMA address page register is shared between the RX and | |
492 | * TX channels, so acquire lock. | |
493 | */ | |
a14eddda VK |
494 | for (i = 0; i < channel->group_count; i++) { |
495 | page = i / 32; | |
496 | group_in_page = i % 32; | |
497 | ||
498 | group_reg = group_regs_base + (group_in_page * 4); | |
499 | ||
1b8ee916 | 500 | spin_lock(channel->iomem_lock); |
a14eddda VK |
501 | iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG); |
502 | iowrite32(channel->groups[i].dma_addr, fpga + group_reg); | |
1b8ee916 | 503 | spin_unlock(channel->iomem_lock); |
a14eddda | 504 | } |
1b8ee916 | 505 | |
a14eddda VK |
506 | for (i = 0; i < channel->group_count; i++) { |
507 | page = i / 32; | |
508 | group_in_page = i % 32; | |
509 | ||
510 | group_reg = group_regs_base + (group_in_page * 4); | |
511 | ||
1b8ee916 | 512 | spin_lock(channel->iomem_lock); |
a14eddda VK |
513 | iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG); |
514 | printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i, | |
515 | ioread32(fpga + group_reg)); | |
1b8ee916 | 516 | spin_unlock(channel->iomem_lock); |
a14eddda | 517 | } |
a14eddda VK |
518 | |
519 | } | |
520 | ||
a14eddda VK |
521 | static void __poch_channel_clear_counters(struct channel_info *channel) |
522 | { | |
523 | channel->counters.pll_unlock = 0; | |
524 | channel->counters.fifo_empty = 0; | |
525 | channel->counters.fifo_overflow = 0; | |
526 | } | |
527 | ||
528 | static int poch_channel_init(struct channel_info *channel, | |
529 | struct poch_dev *poch_dev) | |
530 | { | |
531 | struct pci_dev *pdev = poch_dev->pci_dev; | |
532 | struct device *dev = &pdev->dev; | |
533 | unsigned long alloc_size; | |
534 | int ret; | |
535 | ||
536 | printk(KERN_WARNING "channel_latch_attr\n"); | |
537 | ||
7dadbbcf VK |
538 | ret = channel_latch_attr(channel); |
539 | if (ret != 0) | |
540 | goto out; | |
a14eddda | 541 | |
b01faf05 | 542 | channel->consumed = 0; |
a14eddda VK |
543 | channel->transfer = 0; |
544 | ||
545 | /* Allocate memory to hold group information. */ | |
546 | alloc_size = channel->group_count * sizeof(struct poch_group_info); | |
547 | channel->groups = kzalloc(alloc_size, GFP_KERNEL); | |
548 | if (!channel->groups) { | |
549 | dev_err(dev, "error allocating memory for group info\n"); | |
550 | ret = -ENOMEM; | |
551 | goto out; | |
552 | } | |
553 | ||
554 | printk(KERN_WARNING "poch_channel_alloc_groups\n"); | |
555 | ||
556 | ret = poch_channel_alloc_groups(channel); | |
557 | if (ret) { | |
558 | dev_err(dev, "error allocating groups of order %d\n", | |
559 | get_order(channel->group_size)); | |
560 | goto out_free_group_info; | |
561 | } | |
562 | ||
a14eddda VK |
563 | channel->fpga_iomem = poch_dev->fpga_iomem; |
564 | channel->bridge_iomem = poch_dev->bridge_iomem; | |
565 | channel->iomem_lock = &poch_dev->iomem_lock; | |
566 | spin_lock_init(&channel->counters_lock); | |
567 | ||
568 | __poch_channel_clear_counters(channel); | |
569 | ||
a14eddda VK |
570 | return 0; |
571 | ||
a14eddda VK |
572 | out_free_group_info: |
573 | kfree(channel->groups); | |
574 | out: | |
575 | return ret; | |
576 | } | |
577 | ||
578 | static int poch_wait_fpga_prog(void __iomem *bridge) | |
579 | { | |
580 | unsigned long total_wait; | |
581 | const unsigned long wait_period = 100; | |
582 | /* FIXME: Get the actual timeout */ | |
583 | const unsigned long prog_timeo = 10000; /* 10 Seconds */ | |
584 | u32 card_power; | |
585 | ||
586 | printk(KERN_WARNING "poch_wait_fpg_prog\n"); | |
587 | ||
588 | printk(KERN_INFO PFX "programming fpga ...\n"); | |
589 | total_wait = 0; | |
590 | while (1) { | |
591 | msleep(wait_period); | |
592 | total_wait += wait_period; | |
593 | ||
594 | card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG); | |
595 | if (card_power & BRIDGE_CARD_POWER_PROG_DONE) { | |
596 | printk(KERN_INFO PFX "programming done\n"); | |
597 | return 0; | |
598 | } | |
599 | if (total_wait > prog_timeo) { | |
600 | printk(KERN_ERR PFX | |
601 | "timed out while programming FPGA\n"); | |
602 | return -EIO; | |
603 | } | |
604 | } | |
605 | } | |
606 | ||
607 | static void poch_card_power_off(struct poch_dev *poch_dev) | |
608 | { | |
609 | void __iomem *bridge = poch_dev->bridge_iomem; | |
610 | u32 card_power; | |
611 | ||
612 | iowrite32(0, bridge + BRIDGE_INT_MASK_REG); | |
613 | iowrite32(0, bridge + BRIDGE_DMA_GO_REG); | |
614 | ||
615 | card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG); | |
616 | iowrite32(card_power & ~BRIDGE_CARD_POWER_EN, | |
617 | bridge + BRIDGE_CARD_POWER_REG); | |
618 | } | |
619 | ||
620 | enum clk_src { | |
621 | CLK_SRC_ON_BOARD, | |
622 | CLK_SRC_EXTERNAL | |
623 | }; | |
624 | ||
625 | static void poch_card_clock_on(void __iomem *fpga) | |
626 | { | |
627 | /* FIXME: Get this data through sysfs? */ | |
628 | enum clk_src clk_src = CLK_SRC_ON_BOARD; | |
629 | ||
630 | if (clk_src == CLK_SRC_ON_BOARD) { | |
631 | iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN, | |
632 | fpga + FPGA_ADC_CLOCK_CTL_REG); | |
633 | } else if (clk_src == CLK_SRC_EXTERNAL) { | |
634 | iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK, | |
635 | fpga + FPGA_ADC_CLOCK_CTL_REG); | |
636 | } | |
637 | } | |
638 | ||
639 | static int poch_card_power_on(struct poch_dev *poch_dev) | |
640 | { | |
641 | void __iomem *bridge = poch_dev->bridge_iomem; | |
642 | void __iomem *fpga = poch_dev->fpga_iomem; | |
643 | ||
644 | iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG); | |
645 | ||
646 | if (poch_wait_fpga_prog(bridge) != 0) { | |
647 | poch_card_power_off(poch_dev); | |
648 | return -EIO; | |
649 | } | |
650 | ||
651 | poch_card_clock_on(fpga); | |
652 | ||
653 | /* Sync to new clock, reset state machines, set DMA mode. */ | |
654 | iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET | |
655 | | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG); | |
656 | ||
657 | /* FIXME: The time required for sync. needs to be tuned. */ | |
658 | msleep(1000); | |
659 | ||
660 | return 0; | |
661 | } | |
662 | ||
663 | static void poch_channel_analog_on(struct channel_info *channel) | |
664 | { | |
665 | void __iomem *fpga = channel->fpga_iomem; | |
666 | u32 adc_dac_en; | |
667 | ||
668 | spin_lock(channel->iomem_lock); | |
669 | adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG); | |
670 | switch (channel->chno) { | |
671 | case CHNO_RX_CHANNEL: | |
672 | iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF, | |
673 | fpga + FPGA_ADC_DAC_EN_REG); | |
674 | break; | |
675 | case CHNO_TX_CHANNEL: | |
676 | iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF, | |
677 | fpga + FPGA_ADC_DAC_EN_REG); | |
678 | break; | |
679 | } | |
680 | spin_unlock(channel->iomem_lock); | |
681 | } | |
682 | ||
683 | static int poch_open(struct inode *inode, struct file *filp) | |
684 | { | |
685 | struct poch_dev *poch_dev; | |
686 | struct channel_info *channel; | |
687 | void __iomem *bridge; | |
688 | void __iomem *fpga; | |
689 | int chno; | |
690 | int usage; | |
691 | int ret; | |
692 | ||
693 | poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev); | |
694 | bridge = poch_dev->bridge_iomem; | |
695 | fpga = poch_dev->fpga_iomem; | |
696 | ||
697 | chno = iminor(inode) % poch_dev->nchannels; | |
698 | channel = &poch_dev->channels[chno]; | |
699 | ||
700 | if (!atomic_dec_and_test(&channel->free)) { | |
701 | atomic_inc(&channel->free); | |
702 | ret = -EBUSY; | |
703 | goto out; | |
704 | } | |
705 | ||
706 | usage = atomic_inc_return(&poch_dev->usage); | |
707 | ||
708 | printk(KERN_WARNING "poch_card_power_on\n"); | |
709 | ||
710 | if (usage == 1) { | |
711 | ret = poch_card_power_on(poch_dev); | |
712 | if (ret) | |
713 | goto out_dec_usage; | |
714 | } | |
715 | ||
716 | printk(KERN_INFO "CardBus Bridge Revision: %x\n", | |
717 | ioread32(bridge + BRIDGE_REV_REG)); | |
718 | printk(KERN_INFO "CardBus Interface Revision: %x\n", | |
719 | ioread32(fpga + FPGA_IFACE_REV_REG)); | |
720 | ||
721 | channel->chno = chno; | |
722 | filp->private_data = channel; | |
723 | ||
724 | printk(KERN_WARNING "poch_channel_init\n"); | |
725 | ||
726 | ret = poch_channel_init(channel, poch_dev); | |
727 | if (ret) | |
728 | goto out_power_off; | |
729 | ||
730 | poch_channel_analog_on(channel); | |
731 | ||
732 | printk(KERN_WARNING "channel_dma_init\n"); | |
733 | ||
734 | channel_dma_init(channel); | |
735 | ||
736 | printk(KERN_WARNING "poch_channel_analog_on\n"); | |
737 | ||
738 | if (usage == 1) { | |
739 | printk(KERN_WARNING "setting up DMA\n"); | |
740 | ||
741 | /* Initialize DMA Controller. */ | |
742 | iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG); | |
743 | iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG); | |
744 | ||
745 | ioread32(fpga + FPGA_DMA_INT_STAT_REG); | |
746 | ioread32(fpga + FPGA_INT_STAT_REG); | |
747 | ioread32(bridge + BRIDGE_INT_STAT_REG); | |
748 | ||
749 | /* Initialize Interrupts. FIXME: Enable temperature | |
750 | * handling We are enabling both Tx and Rx channel | |
751 | * interrupts here. Do we need to enable interrupts | |
752 | * only for the current channel? Anyways we won't get | |
753 | * the interrupt unless the DMA is activated. | |
754 | */ | |
755 | iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG); | |
756 | iowrite32(FPGA_INT_DMA_CORE | |
757 | | FPGA_INT_PLL_UNLOCKED | |
758 | | FPGA_INT_TX_FF_EMPTY | |
759 | | FPGA_INT_RX_FF_EMPTY | |
760 | | FPGA_INT_TX_FF_OVRFLW | |
761 | | FPGA_INT_RX_FF_OVRFLW, | |
762 | fpga + FPGA_INT_MASK_REG); | |
763 | iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX, | |
764 | fpga + FPGA_DMA_INT_MASK_REG); | |
765 | } | |
766 | ||
767 | if (channel->dir == CHANNEL_DIR_TX) { | |
768 | /* Flush TX FIFO and output data from cardbus. */ | |
dad17401 VK |
769 | u32 ctl_val = 0; |
770 | ||
771 | ctl_val |= FPGA_TX_CTL_FIFO_FLUSH; | |
772 | ctl_val |= FPGA_TX_CTL_OUTPUT_CARDBUS; | |
773 | if (loopback) | |
774 | ctl_val |= FPGA_TX_CTL_LOOPBACK; | |
775 | ||
776 | iowrite32(ctl_val, fpga + FPGA_TX_CTL_REG); | |
ca219995 VK |
777 | } else { |
778 | /* Flush RX FIFO and output data to cardbus. */ | |
16fbf4cb VK |
779 | u32 ctl_val = FPGA_RX_CTL_CONT_CAP | FPGA_RX_CTL_FIFO_FLUSH; |
780 | if (synth_rx) | |
781 | ctl_val |= FPGA_RX_CTL_SYNTH_DATA; | |
782 | ||
783 | iowrite32(ctl_val, fpga + FPGA_RX_CTL_REG); | |
a14eddda VK |
784 | } |
785 | ||
786 | atomic_inc(&channel->inited); | |
787 | ||
788 | return 0; | |
789 | ||
790 | out_power_off: | |
791 | if (usage == 1) | |
792 | poch_card_power_off(poch_dev); | |
793 | out_dec_usage: | |
794 | atomic_dec(&poch_dev->usage); | |
795 | atomic_inc(&channel->free); | |
796 | out: | |
797 | return ret; | |
798 | } | |
799 | ||
800 | static int poch_release(struct inode *inode, struct file *filp) | |
801 | { | |
802 | struct channel_info *channel = filp->private_data; | |
803 | struct poch_dev *poch_dev; | |
804 | int usage; | |
805 | ||
806 | poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev); | |
807 | ||
808 | usage = atomic_dec_return(&poch_dev->usage); | |
809 | if (usage == 0) { | |
810 | printk(KERN_WARNING "poch_card_power_off\n"); | |
811 | poch_card_power_off(poch_dev); | |
812 | } | |
813 | ||
814 | atomic_dec(&channel->inited); | |
a14eddda VK |
815 | poch_channel_free_groups(channel); |
816 | kfree(channel->groups); | |
817 | atomic_inc(&channel->free); | |
818 | ||
819 | return 0; | |
820 | } | |
821 | ||
822 | /* | |
92d45033 | 823 | * Map the the group buffers, to user space. |
a14eddda VK |
824 | */ |
825 | static int poch_mmap(struct file *filp, struct vm_area_struct *vma) | |
826 | { | |
827 | struct channel_info *channel = filp->private_data; | |
828 | ||
829 | unsigned long start; | |
830 | unsigned long size; | |
831 | ||
832 | unsigned long group_pages; | |
a14eddda VK |
833 | unsigned long total_group_pages; |
834 | ||
835 | int pg_num; | |
836 | struct page *pg; | |
837 | ||
838 | int i; | |
839 | int ret; | |
840 | ||
841 | printk(KERN_WARNING "poch_mmap\n"); | |
842 | ||
843 | if (vma->vm_pgoff) { | |
844 | printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff); | |
845 | return -EINVAL; | |
846 | } | |
847 | ||
ea0a337f | 848 | group_pages = npages(channel->group_size); |
a14eddda VK |
849 | total_group_pages = group_pages * channel->group_count; |
850 | ||
851 | size = vma->vm_end - vma->vm_start; | |
92d45033 | 852 | if (size != total_group_pages * PAGE_SIZE) { |
a14eddda VK |
853 | printk(KERN_WARNING PFX "required %lu bytes\n", size); |
854 | return -EINVAL; | |
855 | } | |
856 | ||
857 | start = vma->vm_start; | |
858 | ||
a14eddda VK |
859 | for (i = 0; i < channel->group_count; i++) { |
860 | pg = channel->groups[i].pg; | |
861 | for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) { | |
862 | printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n", | |
863 | pg_num, i, start); | |
864 | ret = vm_insert_page(vma, start, pg); | |
865 | if (ret) { | |
866 | printk(KERN_DEBUG PFX | |
867 | "vm_insert 2 failed at %d\n", pg_num); | |
868 | return ret; | |
869 | } | |
870 | start += PAGE_SIZE; | |
871 | } | |
872 | } | |
873 | ||
874 | return 0; | |
875 | } | |
876 | ||
877 | /* | |
878 | * Check whether there is some group that the user space has not | |
879 | * consumed yet. When the user space consumes a group, it sets it to | |
880 | * -1. Cosuming could be reading data in case of RX and filling a | |
881 | * buffer in case of TX. | |
882 | */ | |
883 | static int poch_channel_available(struct channel_info *channel) | |
884 | { | |
92d45033 | 885 | int available = 0; |
a14eddda VK |
886 | |
887 | spin_lock_irq(&channel->group_offsets_lock); | |
888 | ||
92d45033 VK |
889 | if (channel->consumed != channel->transfer) |
890 | available = 1; | |
a14eddda VK |
891 | |
892 | spin_unlock_irq(&channel->group_offsets_lock); | |
893 | ||
92d45033 | 894 | return available; |
a14eddda VK |
895 | } |
896 | ||
897 | static unsigned int poch_poll(struct file *filp, poll_table *pt) | |
898 | { | |
899 | struct channel_info *channel = filp->private_data; | |
900 | unsigned int ret = 0; | |
901 | ||
902 | poll_wait(filp, &channel->wq, pt); | |
903 | ||
904 | if (poch_channel_available(channel)) { | |
905 | if (channel->dir == CHANNEL_DIR_RX) | |
906 | ret = POLLIN | POLLRDNORM; | |
907 | else | |
908 | ret = POLLOUT | POLLWRNORM; | |
909 | } | |
910 | ||
911 | return ret; | |
912 | } | |
913 | ||
914 | static int poch_ioctl(struct inode *inode, struct file *filp, | |
915 | unsigned int cmd, unsigned long arg) | |
916 | { | |
917 | struct channel_info *channel = filp->private_data; | |
918 | void __iomem *fpga = channel->fpga_iomem; | |
919 | void __iomem *bridge = channel->bridge_iomem; | |
920 | void __user *argp = (void __user *)arg; | |
921 | struct vm_area_struct *vms; | |
922 | struct poch_counters counters; | |
923 | int ret; | |
924 | ||
925 | switch (cmd) { | |
926 | case POCH_IOC_TRANSFER_START: | |
927 | switch (channel->chno) { | |
928 | case CHNO_TX_CHANNEL: | |
929 | printk(KERN_INFO PFX "ioctl: Tx start\n"); | |
930 | iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG); | |
931 | iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG); | |
932 | ||
933 | /* FIXME: Does it make sense to do a DMA GO | |
934 | * twice, once in Tx and once in Rx. | |
935 | */ | |
936 | iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG); | |
937 | break; | |
938 | case CHNO_RX_CHANNEL: | |
939 | printk(KERN_INFO PFX "ioctl: Rx start\n"); | |
940 | iowrite32(0x1, fpga + FPGA_RX_ARM_REG); | |
941 | iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG); | |
942 | break; | |
943 | } | |
944 | break; | |
945 | case POCH_IOC_TRANSFER_STOP: | |
946 | switch (channel->chno) { | |
947 | case CHNO_TX_CHANNEL: | |
948 | printk(KERN_INFO PFX "ioctl: Tx stop\n"); | |
949 | iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG); | |
950 | iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG); | |
951 | iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG); | |
952 | break; | |
953 | case CHNO_RX_CHANNEL: | |
954 | printk(KERN_INFO PFX "ioctl: Rx stop\n"); | |
955 | iowrite32(0x0, fpga + FPGA_RX_ARM_REG); | |
956 | iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG); | |
957 | break; | |
958 | } | |
959 | break; | |
b01faf05 VK |
960 | case POCH_IOC_CONSUME: |
961 | { | |
962 | int available; | |
963 | int nfetch; | |
964 | unsigned int from; | |
965 | unsigned int count; | |
966 | unsigned int i, j; | |
967 | struct poch_consume consume; | |
968 | struct poch_consume *uconsume; | |
969 | ||
970 | uconsume = argp; | |
971 | ret = copy_from_user(&consume, uconsume, sizeof(consume)); | |
972 | if (ret) | |
973 | return ret; | |
974 | ||
975 | spin_lock_irq(&channel->group_offsets_lock); | |
976 | ||
977 | channel->consumed += consume.nflush; | |
978 | channel->consumed %= channel->group_count; | |
979 | ||
980 | available = channel->transfer - channel->consumed; | |
981 | if (available < 0) | |
982 | available += channel->group_count; | |
983 | ||
984 | from = channel->consumed; | |
985 | ||
986 | spin_unlock_irq(&channel->group_offsets_lock); | |
987 | ||
988 | nfetch = consume.nfetch; | |
989 | count = min(available, nfetch); | |
990 | ||
991 | for (i = 0; i < count; i++) { | |
992 | j = (from + i) % channel->group_count; | |
993 | ret = put_user(channel->groups[j].user_offset, | |
994 | &consume.offsets[i]); | |
995 | if (ret) | |
996 | return -EFAULT; | |
997 | } | |
998 | ||
999 | ret = put_user(count, &uconsume->nfetch); | |
1000 | if (ret) | |
1001 | return -EFAULT; | |
1002 | ||
1003 | break; | |
1004 | } | |
a14eddda | 1005 | case POCH_IOC_GET_COUNTERS: |
dcbbcefb | 1006 | if (!access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters))) |
a14eddda VK |
1007 | return -EFAULT; |
1008 | ||
1009 | spin_lock_irq(&channel->counters_lock); | |
1010 | counters = channel->counters; | |
1011 | __poch_channel_clear_counters(channel); | |
1012 | spin_unlock_irq(&channel->counters_lock); | |
1013 | ||
1014 | ret = copy_to_user(argp, &counters, | |
1015 | sizeof(struct poch_counters)); | |
1016 | if (ret) | |
1017 | return ret; | |
1018 | ||
1019 | break; | |
1020 | case POCH_IOC_SYNC_GROUP_FOR_USER: | |
1021 | case POCH_IOC_SYNC_GROUP_FOR_DEVICE: | |
1022 | vms = find_vma(current->mm, arg); | |
1023 | if (!vms) | |
1024 | /* Address not mapped. */ | |
1025 | return -EINVAL; | |
1026 | if (vms->vm_file != filp) | |
1027 | /* Address mapped from different device/file. */ | |
1028 | return -EINVAL; | |
1029 | ||
1030 | flush_cache_range(vms, arg, arg + channel->group_size); | |
1031 | break; | |
1032 | } | |
1033 | return 0; | |
1034 | } | |
1035 | ||
1036 | static struct file_operations poch_fops = { | |
1037 | .owner = THIS_MODULE, | |
1038 | .open = poch_open, | |
1039 | .release = poch_release, | |
1040 | .ioctl = poch_ioctl, | |
1041 | .poll = poch_poll, | |
1042 | .mmap = poch_mmap | |
1043 | }; | |
1044 | ||
1045 | static void poch_irq_dma(struct channel_info *channel) | |
1046 | { | |
1047 | u32 prev_transfer; | |
1048 | u32 curr_transfer; | |
1049 | long groups_done; | |
1050 | unsigned long i, j; | |
1051 | struct poch_group_info *groups; | |
a14eddda VK |
1052 | u32 curr_group_reg; |
1053 | ||
1054 | if (!atomic_read(&channel->inited)) | |
1055 | return; | |
1056 | ||
1057 | prev_transfer = channel->transfer; | |
1058 | ||
1059 | if (channel->chno == CHNO_RX_CHANNEL) | |
1060 | curr_group_reg = FPGA_RX_CURR_GROUP_REG; | |
1061 | else | |
1062 | curr_group_reg = FPGA_TX_CURR_GROUP_REG; | |
1063 | ||
1064 | curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg); | |
1065 | ||
1066 | groups_done = curr_transfer - prev_transfer; | |
1067 | /* Check wrap over, and handle it. */ | |
1068 | if (groups_done <= 0) | |
1069 | groups_done += channel->group_count; | |
1070 | ||
a14eddda VK |
1071 | groups = channel->groups; |
1072 | ||
1073 | spin_lock(&channel->group_offsets_lock); | |
1074 | ||
1075 | for (i = 0; i < groups_done; i++) { | |
1076 | j = (prev_transfer + i) % channel->group_count; | |
b01faf05 VK |
1077 | |
1078 | channel->transfer += 1; | |
1079 | channel->transfer %= channel->group_count; | |
1080 | ||
1081 | if (channel->transfer == channel->consumed) { | |
1082 | channel->consumed += 1; | |
1083 | channel->consumed %= channel->group_count; | |
1084 | } | |
a14eddda VK |
1085 | } |
1086 | ||
1087 | spin_unlock(&channel->group_offsets_lock); | |
1088 | ||
a14eddda VK |
1089 | wake_up_interruptible(&channel->wq); |
1090 | } | |
1091 | ||
1092 | static irqreturn_t poch_irq_handler(int irq, void *p) | |
1093 | { | |
1094 | struct poch_dev *poch_dev = p; | |
1095 | void __iomem *bridge = poch_dev->bridge_iomem; | |
1096 | void __iomem *fpga = poch_dev->fpga_iomem; | |
1097 | struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL]; | |
1098 | struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL]; | |
1099 | u32 bridge_stat; | |
1100 | u32 fpga_stat; | |
1101 | u32 dma_stat; | |
1102 | ||
1103 | bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG); | |
1104 | fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG); | |
1105 | dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG); | |
1106 | ||
1107 | ioread32(fpga + FPGA_DMA_INT_STAT_REG); | |
1108 | ioread32(fpga + FPGA_INT_STAT_REG); | |
1109 | ioread32(bridge + BRIDGE_INT_STAT_REG); | |
1110 | ||
1111 | if (bridge_stat & BRIDGE_INT_FPGA) { | |
1112 | if (fpga_stat & FPGA_INT_DMA_CORE) { | |
1113 | if (dma_stat & FPGA_DMA_INT_RX) | |
1114 | poch_irq_dma(channel_rx); | |
1115 | if (dma_stat & FPGA_DMA_INT_TX) | |
1116 | poch_irq_dma(channel_tx); | |
1117 | } | |
1118 | if (fpga_stat & FPGA_INT_PLL_UNLOCKED) { | |
1119 | channel_tx->counters.pll_unlock++; | |
1120 | channel_rx->counters.pll_unlock++; | |
1121 | if (printk_ratelimit()) | |
1122 | printk(KERN_WARNING PFX "PLL unlocked\n"); | |
1123 | } | |
1124 | if (fpga_stat & FPGA_INT_TX_FF_EMPTY) | |
1125 | channel_tx->counters.fifo_empty++; | |
1126 | if (fpga_stat & FPGA_INT_TX_FF_OVRFLW) | |
1127 | channel_tx->counters.fifo_overflow++; | |
1128 | if (fpga_stat & FPGA_INT_RX_FF_EMPTY) | |
1129 | channel_rx->counters.fifo_empty++; | |
1130 | if (fpga_stat & FPGA_INT_RX_FF_OVRFLW) | |
1131 | channel_rx->counters.fifo_overflow++; | |
1132 | ||
1133 | /* | |
1134 | * FIXME: These errors should be notified through the | |
1135 | * poll interface as POLLERR. | |
1136 | */ | |
1137 | ||
1138 | /* Re-enable interrupts. */ | |
1139 | iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG); | |
1140 | ||
1141 | return IRQ_HANDLED; | |
1142 | } | |
1143 | ||
1144 | return IRQ_NONE; | |
1145 | } | |
1146 | ||
1147 | static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id) | |
1148 | { | |
1149 | int i, j; | |
1150 | int nattrs; | |
1151 | struct channel_info *channel; | |
1152 | dev_t devno; | |
1153 | ||
1154 | if (poch_dev->dev == NULL) | |
1155 | return; | |
1156 | ||
1157 | for (i = 0; i < poch_dev->nchannels; i++) { | |
1158 | channel = &poch_dev->channels[i]; | |
1159 | devno = poch_first_dev + (id * poch_dev->nchannels) + i; | |
1160 | ||
1161 | if (!channel->dev) | |
1162 | continue; | |
1163 | ||
1164 | nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]); | |
1165 | for (j = 0; j < nattrs; j++) | |
1166 | device_remove_file(channel->dev, poch_class_attrs[j]); | |
1167 | ||
1168 | device_unregister(channel->dev); | |
1169 | } | |
1170 | ||
1171 | device_unregister(poch_dev->dev); | |
1172 | } | |
1173 | ||
1174 | static int __devinit poch_class_dev_register(struct poch_dev *poch_dev, | |
1175 | int id) | |
1176 | { | |
1177 | struct device *dev = &poch_dev->pci_dev->dev; | |
1178 | int i, j; | |
1179 | int nattrs; | |
1180 | int ret; | |
1181 | struct channel_info *channel; | |
1182 | dev_t devno; | |
1183 | ||
1184 | poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev, | |
1185 | MKDEV(0, 0), NULL, "poch%d", id); | |
1186 | if (IS_ERR(poch_dev->dev)) { | |
1187 | dev_err(dev, "error creating parent class device"); | |
1188 | ret = PTR_ERR(poch_dev->dev); | |
1189 | poch_dev->dev = NULL; | |
1190 | return ret; | |
1191 | } | |
1192 | ||
1193 | for (i = 0; i < poch_dev->nchannels; i++) { | |
1194 | channel = &poch_dev->channels[i]; | |
1195 | ||
1196 | devno = poch_first_dev + (id * poch_dev->nchannels) + i; | |
1197 | channel->dev = device_create(poch_cls, poch_dev->dev, devno, | |
1198 | NULL, "ch%d", i); | |
1199 | if (IS_ERR(channel->dev)) { | |
1200 | dev_err(dev, "error creating channel class device"); | |
1201 | ret = PTR_ERR(channel->dev); | |
1202 | channel->dev = NULL; | |
1203 | poch_class_dev_unregister(poch_dev, id); | |
1204 | return ret; | |
1205 | } | |
1206 | ||
1207 | dev_set_drvdata(channel->dev, channel); | |
1208 | nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]); | |
1209 | for (j = 0; j < nattrs; j++) { | |
1210 | ret = device_create_file(channel->dev, | |
1211 | poch_class_attrs[j]); | |
1212 | if (ret) { | |
1213 | dev_err(dev, "error creating attribute file"); | |
1214 | poch_class_dev_unregister(poch_dev, id); | |
1215 | return ret; | |
1216 | } | |
1217 | } | |
1218 | } | |
1219 | ||
1220 | return 0; | |
1221 | } | |
1222 | ||
1223 | static int __devinit poch_pci_probe(struct pci_dev *pdev, | |
1224 | const struct pci_device_id *pci_id) | |
1225 | { | |
1226 | struct device *dev = &pdev->dev; | |
1227 | struct poch_dev *poch_dev; | |
1228 | struct uio_info *uio; | |
1229 | int ret; | |
1230 | int id; | |
1231 | int i; | |
1232 | ||
1233 | poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL); | |
1234 | if (!poch_dev) { | |
1235 | dev_err(dev, "error allocating priv. data memory\n"); | |
1236 | return -ENOMEM; | |
1237 | } | |
1238 | ||
1239 | poch_dev->pci_dev = pdev; | |
1240 | uio = &poch_dev->uio; | |
1241 | ||
1242 | pci_set_drvdata(pdev, poch_dev); | |
1243 | ||
1244 | spin_lock_init(&poch_dev->iomem_lock); | |
1245 | ||
1246 | poch_dev->nchannels = POCH_NCHANNELS; | |
1247 | poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX; | |
1248 | poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX; | |
1249 | ||
1250 | for (i = 0; i < poch_dev->nchannels; i++) { | |
1251 | init_waitqueue_head(&poch_dev->channels[i].wq); | |
1252 | atomic_set(&poch_dev->channels[i].free, 1); | |
1253 | atomic_set(&poch_dev->channels[i].inited, 0); | |
1254 | } | |
1255 | ||
1256 | ret = pci_enable_device(pdev); | |
1257 | if (ret) { | |
1258 | dev_err(dev, "error enabling device\n"); | |
1259 | goto out_free; | |
1260 | } | |
1261 | ||
1262 | ret = pci_request_regions(pdev, "poch"); | |
1263 | if (ret) { | |
1264 | dev_err(dev, "error requesting resources\n"); | |
1265 | goto out_disable; | |
1266 | } | |
1267 | ||
1268 | uio->mem[0].addr = pci_resource_start(pdev, 1); | |
1269 | if (!uio->mem[0].addr) { | |
1270 | dev_err(dev, "invalid BAR1\n"); | |
1271 | ret = -ENODEV; | |
1272 | goto out_release; | |
1273 | } | |
1274 | ||
1275 | uio->mem[0].size = pci_resource_len(pdev, 1); | |
1276 | uio->mem[0].memtype = UIO_MEM_PHYS; | |
1277 | ||
1278 | uio->name = "poch"; | |
1279 | uio->version = "0.0.1"; | |
1280 | uio->irq = -1; | |
1281 | ret = uio_register_device(dev, uio); | |
1282 | if (ret) { | |
1283 | dev_err(dev, "error register UIO device: %d\n", ret); | |
1284 | goto out_release; | |
1285 | } | |
1286 | ||
1287 | poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0), | |
1288 | pci_resource_len(pdev, 0)); | |
1289 | if (poch_dev->bridge_iomem == NULL) { | |
1290 | dev_err(dev, "error mapping bridge (bar0) registers\n"); | |
1291 | ret = -ENOMEM; | |
1292 | goto out_uio_unreg; | |
1293 | } | |
1294 | ||
1295 | poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1), | |
1296 | pci_resource_len(pdev, 1)); | |
1297 | if (poch_dev->fpga_iomem == NULL) { | |
1298 | dev_err(dev, "error mapping fpga (bar1) registers\n"); | |
1299 | ret = -ENOMEM; | |
1300 | goto out_bar0_unmap; | |
1301 | } | |
1302 | ||
1303 | ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED, | |
e9133972 | 1304 | dev_name(dev), poch_dev); |
a14eddda VK |
1305 | if (ret) { |
1306 | dev_err(dev, "error requesting IRQ %u\n", pdev->irq); | |
1307 | ret = -ENOMEM; | |
1308 | goto out_bar1_unmap; | |
1309 | } | |
1310 | ||
1311 | if (!idr_pre_get(&poch_ids, GFP_KERNEL)) { | |
1312 | dev_err(dev, "error allocating memory ids\n"); | |
1313 | ret = -ENOMEM; | |
1314 | goto out_free_irq; | |
1315 | } | |
1316 | ||
1317 | idr_get_new(&poch_ids, poch_dev, &id); | |
1318 | if (id >= MAX_POCH_CARDS) { | |
1319 | dev_err(dev, "minors exhausted\n"); | |
1320 | ret = -EBUSY; | |
1321 | goto out_free_irq; | |
1322 | } | |
1323 | ||
1324 | cdev_init(&poch_dev->cdev, &poch_fops); | |
1325 | poch_dev->cdev.owner = THIS_MODULE; | |
1326 | ret = cdev_add(&poch_dev->cdev, | |
1327 | poch_first_dev + (id * poch_dev->nchannels), | |
1328 | poch_dev->nchannels); | |
1329 | if (ret) { | |
1330 | dev_err(dev, "error register character device\n"); | |
1331 | goto out_idr_remove; | |
1332 | } | |
1333 | ||
1334 | ret = poch_class_dev_register(poch_dev, id); | |
1335 | if (ret) | |
1336 | goto out_cdev_del; | |
1337 | ||
1338 | return 0; | |
1339 | ||
1340 | out_cdev_del: | |
1341 | cdev_del(&poch_dev->cdev); | |
1342 | out_idr_remove: | |
1343 | idr_remove(&poch_ids, id); | |
1344 | out_free_irq: | |
1345 | free_irq(pdev->irq, poch_dev); | |
1346 | out_bar1_unmap: | |
1347 | iounmap(poch_dev->fpga_iomem); | |
1348 | out_bar0_unmap: | |
1349 | iounmap(poch_dev->bridge_iomem); | |
1350 | out_uio_unreg: | |
1351 | uio_unregister_device(uio); | |
1352 | out_release: | |
1353 | pci_release_regions(pdev); | |
1354 | out_disable: | |
1355 | pci_disable_device(pdev); | |
1356 | out_free: | |
1357 | kfree(poch_dev); | |
1358 | return ret; | |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * FIXME: We are yet to handle the hot unplug case. | |
1363 | */ | |
1364 | static void poch_pci_remove(struct pci_dev *pdev) | |
1365 | { | |
1366 | struct poch_dev *poch_dev = pci_get_drvdata(pdev); | |
1367 | struct uio_info *uio = &poch_dev->uio; | |
1368 | unsigned int minor = MINOR(poch_dev->cdev.dev); | |
1369 | unsigned int id = minor / poch_dev->nchannels; | |
1370 | ||
a14eddda VK |
1371 | poch_class_dev_unregister(poch_dev, id); |
1372 | cdev_del(&poch_dev->cdev); | |
1373 | idr_remove(&poch_ids, id); | |
1374 | free_irq(pdev->irq, poch_dev); | |
7dadbbcf VK |
1375 | iounmap(poch_dev->fpga_iomem); |
1376 | iounmap(poch_dev->bridge_iomem); | |
a14eddda VK |
1377 | uio_unregister_device(uio); |
1378 | pci_release_regions(pdev); | |
1379 | pci_disable_device(pdev); | |
1380 | pci_set_drvdata(pdev, NULL); | |
1381 | iounmap(uio->mem[0].internal_addr); | |
1382 | ||
1383 | kfree(poch_dev); | |
1384 | } | |
1385 | ||
1386 | static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = { | |
1387 | { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS, | |
1388 | PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) }, | |
1389 | { 0, } | |
1390 | }; | |
1391 | ||
1392 | static struct pci_driver poch_pci_driver = { | |
1393 | .name = DRV_NAME, | |
1394 | .id_table = poch_pci_ids, | |
1395 | .probe = poch_pci_probe, | |
1396 | .remove = poch_pci_remove, | |
1397 | }; | |
1398 | ||
1399 | static int __init poch_init_module(void) | |
1400 | { | |
1401 | int ret = 0; | |
1402 | ||
1403 | ret = alloc_chrdev_region(&poch_first_dev, 0, | |
1404 | MAX_POCH_DEVICES, DRV_NAME); | |
1405 | if (ret) { | |
1406 | printk(KERN_ERR PFX "error allocating device no."); | |
1407 | return ret; | |
1408 | } | |
1409 | ||
1410 | poch_cls = class_create(THIS_MODULE, "pocketchange"); | |
1411 | if (IS_ERR(poch_cls)) { | |
1412 | ret = PTR_ERR(poch_cls); | |
1413 | goto out_unreg_chrdev; | |
1414 | } | |
1415 | ||
1416 | ret = pci_register_driver(&poch_pci_driver); | |
1417 | if (ret) { | |
1418 | printk(KERN_ERR PFX "error register PCI device"); | |
1419 | goto out_class_destroy; | |
1420 | } | |
1421 | ||
1422 | return 0; | |
1423 | ||
1424 | out_class_destroy: | |
1425 | class_destroy(poch_cls); | |
1426 | ||
1427 | out_unreg_chrdev: | |
1428 | unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES); | |
1429 | ||
1430 | return ret; | |
1431 | } | |
1432 | ||
1433 | static void __exit poch_exit_module(void) | |
1434 | { | |
1435 | pci_unregister_driver(&poch_pci_driver); | |
1436 | class_destroy(poch_cls); | |
1437 | unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES); | |
1438 | } | |
1439 | ||
1440 | module_init(poch_init_module); | |
1441 | module_exit(poch_exit_module); | |
1442 | ||
1443 | MODULE_LICENSE("GPL v2"); |