include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / media / dvb / pt1 / pt1.c
1 /*
2 * driver for Earthsoft PT1
3 *
4 * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
5 *
6 * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7 * by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pci.h>
29 #include <linux/kthread.h>
30 #include <linux/freezer.h>
31
32 #include "dvbdev.h"
33 #include "dvb_demux.h"
34 #include "dmxdev.h"
35 #include "dvb_net.h"
36 #include "dvb_frontend.h"
37
38 #include "va1j5jf8007t.h"
39 #include "va1j5jf8007s.h"
40
41 #define DRIVER_NAME "earth-pt1"
42
43 #define PT1_PAGE_SHIFT 12
44 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
45 #define PT1_NR_UPACKETS 1024
46 #define PT1_NR_BUFS 511
47
48 struct pt1_buffer_page {
49 __le32 upackets[PT1_NR_UPACKETS];
50 };
51
52 struct pt1_table_page {
53 __le32 next_pfn;
54 __le32 buf_pfns[PT1_NR_BUFS];
55 };
56
57 struct pt1_buffer {
58 struct pt1_buffer_page *page;
59 dma_addr_t addr;
60 };
61
62 struct pt1_table {
63 struct pt1_table_page *page;
64 dma_addr_t addr;
65 struct pt1_buffer bufs[PT1_NR_BUFS];
66 };
67
68 #define PT1_NR_ADAPS 4
69
70 struct pt1_adapter;
71
72 struct pt1 {
73 struct pci_dev *pdev;
74 void __iomem *regs;
75 struct i2c_adapter i2c_adap;
76 int i2c_running;
77 struct pt1_adapter *adaps[PT1_NR_ADAPS];
78 struct pt1_table *tables;
79 struct task_struct *kthread;
80 };
81
82 struct pt1_adapter {
83 struct pt1 *pt1;
84 int index;
85
86 u8 *buf;
87 int upacket_count;
88 int packet_count;
89
90 struct dvb_adapter adap;
91 struct dvb_demux demux;
92 int users;
93 struct dmxdev dmxdev;
94 struct dvb_net net;
95 struct dvb_frontend *fe;
96 int (*orig_set_voltage)(struct dvb_frontend *fe,
97 fe_sec_voltage_t voltage);
98 };
99
100 #define pt1_printk(level, pt1, format, arg...) \
101 dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
102
103 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
104 {
105 writel(data, pt1->regs + reg * 4);
106 }
107
108 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
109 {
110 return readl(pt1->regs + reg * 4);
111 }
112
113 static int pt1_nr_tables = 64;
114 module_param_named(nr_tables, pt1_nr_tables, int, 0);
115
116 static void pt1_increment_table_count(struct pt1 *pt1)
117 {
118 pt1_write_reg(pt1, 0, 0x00000020);
119 }
120
121 static void pt1_init_table_count(struct pt1 *pt1)
122 {
123 pt1_write_reg(pt1, 0, 0x00000010);
124 }
125
126 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
127 {
128 pt1_write_reg(pt1, 5, first_pfn);
129 pt1_write_reg(pt1, 0, 0x0c000040);
130 }
131
132 static void pt1_unregister_tables(struct pt1 *pt1)
133 {
134 pt1_write_reg(pt1, 0, 0x08080000);
135 }
136
137 static int pt1_sync(struct pt1 *pt1)
138 {
139 int i;
140 for (i = 0; i < 57; i++) {
141 if (pt1_read_reg(pt1, 0) & 0x20000000)
142 return 0;
143 pt1_write_reg(pt1, 0, 0x00000008);
144 }
145 pt1_printk(KERN_ERR, pt1, "could not sync\n");
146 return -EIO;
147 }
148
149 static u64 pt1_identify(struct pt1 *pt1)
150 {
151 int i;
152 u64 id;
153 id = 0;
154 for (i = 0; i < 57; i++) {
155 id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
156 pt1_write_reg(pt1, 0, 0x00000008);
157 }
158 return id;
159 }
160
161 static int pt1_unlock(struct pt1 *pt1)
162 {
163 int i;
164 pt1_write_reg(pt1, 0, 0x00000008);
165 for (i = 0; i < 3; i++) {
166 if (pt1_read_reg(pt1, 0) & 0x80000000)
167 return 0;
168 schedule_timeout_uninterruptible((HZ + 999) / 1000);
169 }
170 pt1_printk(KERN_ERR, pt1, "could not unlock\n");
171 return -EIO;
172 }
173
174 static int pt1_reset_pci(struct pt1 *pt1)
175 {
176 int i;
177 pt1_write_reg(pt1, 0, 0x01010000);
178 pt1_write_reg(pt1, 0, 0x01000000);
179 for (i = 0; i < 10; i++) {
180 if (pt1_read_reg(pt1, 0) & 0x00000001)
181 return 0;
182 schedule_timeout_uninterruptible((HZ + 999) / 1000);
183 }
184 pt1_printk(KERN_ERR, pt1, "could not reset PCI\n");
185 return -EIO;
186 }
187
188 static int pt1_reset_ram(struct pt1 *pt1)
189 {
190 int i;
191 pt1_write_reg(pt1, 0, 0x02020000);
192 pt1_write_reg(pt1, 0, 0x02000000);
193 for (i = 0; i < 10; i++) {
194 if (pt1_read_reg(pt1, 0) & 0x00000002)
195 return 0;
196 schedule_timeout_uninterruptible((HZ + 999) / 1000);
197 }
198 pt1_printk(KERN_ERR, pt1, "could not reset RAM\n");
199 return -EIO;
200 }
201
202 static int pt1_do_enable_ram(struct pt1 *pt1)
203 {
204 int i, j;
205 u32 status;
206 status = pt1_read_reg(pt1, 0) & 0x00000004;
207 pt1_write_reg(pt1, 0, 0x00000002);
208 for (i = 0; i < 10; i++) {
209 for (j = 0; j < 1024; j++) {
210 if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
211 return 0;
212 }
213 schedule_timeout_uninterruptible((HZ + 999) / 1000);
214 }
215 pt1_printk(KERN_ERR, pt1, "could not enable RAM\n");
216 return -EIO;
217 }
218
219 static int pt1_enable_ram(struct pt1 *pt1)
220 {
221 int i, ret;
222 schedule_timeout_uninterruptible((HZ + 999) / 1000);
223 for (i = 0; i < 10; i++) {
224 ret = pt1_do_enable_ram(pt1);
225 if (ret < 0)
226 return ret;
227 }
228 return 0;
229 }
230
231 static void pt1_disable_ram(struct pt1 *pt1)
232 {
233 pt1_write_reg(pt1, 0, 0x0b0b0000);
234 }
235
236 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
237 {
238 pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
239 }
240
241 static void pt1_init_streams(struct pt1 *pt1)
242 {
243 int i;
244 for (i = 0; i < PT1_NR_ADAPS; i++)
245 pt1_set_stream(pt1, i, 0);
246 }
247
248 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
249 {
250 u32 upacket;
251 int i;
252 int index;
253 struct pt1_adapter *adap;
254 int offset;
255 u8 *buf;
256
257 if (!page->upackets[PT1_NR_UPACKETS - 1])
258 return 0;
259
260 for (i = 0; i < PT1_NR_UPACKETS; i++) {
261 upacket = le32_to_cpu(page->upackets[i]);
262 index = (upacket >> 29) - 1;
263 if (index < 0 || index >= PT1_NR_ADAPS)
264 continue;
265
266 adap = pt1->adaps[index];
267 if (upacket >> 25 & 1)
268 adap->upacket_count = 0;
269 else if (!adap->upacket_count)
270 continue;
271
272 buf = adap->buf;
273 offset = adap->packet_count * 188 + adap->upacket_count * 3;
274 buf[offset] = upacket >> 16;
275 buf[offset + 1] = upacket >> 8;
276 if (adap->upacket_count != 62)
277 buf[offset + 2] = upacket;
278
279 if (++adap->upacket_count >= 63) {
280 adap->upacket_count = 0;
281 if (++adap->packet_count >= 21) {
282 dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
283 adap->packet_count = 0;
284 }
285 }
286 }
287
288 page->upackets[PT1_NR_UPACKETS - 1] = 0;
289 return 1;
290 }
291
292 static int pt1_thread(void *data)
293 {
294 struct pt1 *pt1;
295 int table_index;
296 int buf_index;
297 struct pt1_buffer_page *page;
298
299 pt1 = data;
300 set_freezable();
301
302 table_index = 0;
303 buf_index = 0;
304
305 while (!kthread_should_stop()) {
306 try_to_freeze();
307
308 page = pt1->tables[table_index].bufs[buf_index].page;
309 if (!pt1_filter(pt1, page)) {
310 schedule_timeout_interruptible((HZ + 999) / 1000);
311 continue;
312 }
313
314 if (++buf_index >= PT1_NR_BUFS) {
315 pt1_increment_table_count(pt1);
316 buf_index = 0;
317 if (++table_index >= pt1_nr_tables)
318 table_index = 0;
319 }
320 }
321
322 return 0;
323 }
324
325 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
326 {
327 dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
328 }
329
330 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
331 {
332 void *page;
333 dma_addr_t addr;
334
335 page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
336 GFP_KERNEL);
337 if (page == NULL)
338 return NULL;
339
340 BUG_ON(addr & (PT1_PAGE_SIZE - 1));
341 BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
342
343 *addrp = addr;
344 *pfnp = addr >> PT1_PAGE_SHIFT;
345 return page;
346 }
347
348 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
349 {
350 pt1_free_page(pt1, buf->page, buf->addr);
351 }
352
353 static int
354 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp)
355 {
356 struct pt1_buffer_page *page;
357 dma_addr_t addr;
358
359 page = pt1_alloc_page(pt1, &addr, pfnp);
360 if (page == NULL)
361 return -ENOMEM;
362
363 page->upackets[PT1_NR_UPACKETS - 1] = 0;
364
365 buf->page = page;
366 buf->addr = addr;
367 return 0;
368 }
369
370 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
371 {
372 int i;
373
374 for (i = 0; i < PT1_NR_BUFS; i++)
375 pt1_cleanup_buffer(pt1, &table->bufs[i]);
376
377 pt1_free_page(pt1, table->page, table->addr);
378 }
379
380 static int
381 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
382 {
383 struct pt1_table_page *page;
384 dma_addr_t addr;
385 int i, ret;
386 u32 buf_pfn;
387
388 page = pt1_alloc_page(pt1, &addr, pfnp);
389 if (page == NULL)
390 return -ENOMEM;
391
392 for (i = 0; i < PT1_NR_BUFS; i++) {
393 ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
394 if (ret < 0)
395 goto err;
396
397 page->buf_pfns[i] = cpu_to_le32(buf_pfn);
398 }
399
400 pt1_increment_table_count(pt1);
401 table->page = page;
402 table->addr = addr;
403 return 0;
404
405 err:
406 while (i--)
407 pt1_cleanup_buffer(pt1, &table->bufs[i]);
408
409 pt1_free_page(pt1, page, addr);
410 return ret;
411 }
412
413 static void pt1_cleanup_tables(struct pt1 *pt1)
414 {
415 struct pt1_table *tables;
416 int i;
417
418 tables = pt1->tables;
419 pt1_unregister_tables(pt1);
420
421 for (i = 0; i < pt1_nr_tables; i++)
422 pt1_cleanup_table(pt1, &tables[i]);
423
424 vfree(tables);
425 }
426
427 static int pt1_init_tables(struct pt1 *pt1)
428 {
429 struct pt1_table *tables;
430 int i, ret;
431 u32 first_pfn, pfn;
432
433 tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
434 if (tables == NULL)
435 return -ENOMEM;
436
437 pt1_init_table_count(pt1);
438
439 i = 0;
440 if (pt1_nr_tables) {
441 ret = pt1_init_table(pt1, &tables[0], &first_pfn);
442 if (ret)
443 goto err;
444 i++;
445 }
446
447 while (i < pt1_nr_tables) {
448 ret = pt1_init_table(pt1, &tables[i], &pfn);
449 if (ret)
450 goto err;
451 tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
452 i++;
453 }
454
455 tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
456
457 pt1_register_tables(pt1, first_pfn);
458 pt1->tables = tables;
459 return 0;
460
461 err:
462 while (i--)
463 pt1_cleanup_table(pt1, &tables[i]);
464
465 vfree(tables);
466 return ret;
467 }
468
469 static int pt1_start_feed(struct dvb_demux_feed *feed)
470 {
471 struct pt1_adapter *adap;
472 adap = container_of(feed->demux, struct pt1_adapter, demux);
473 if (!adap->users++)
474 pt1_set_stream(adap->pt1, adap->index, 1);
475 return 0;
476 }
477
478 static int pt1_stop_feed(struct dvb_demux_feed *feed)
479 {
480 struct pt1_adapter *adap;
481 adap = container_of(feed->demux, struct pt1_adapter, demux);
482 if (!--adap->users)
483 pt1_set_stream(adap->pt1, adap->index, 0);
484 return 0;
485 }
486
487 static void
488 pt1_set_power(struct pt1 *pt1, int power, int lnb, int reset)
489 {
490 pt1_write_reg(pt1, 1, power | lnb << 1 | !reset << 3);
491 }
492
493 static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
494 {
495 struct pt1_adapter *adap;
496 int lnb;
497
498 adap = container_of(fe->dvb, struct pt1_adapter, adap);
499
500 switch (voltage) {
501 case SEC_VOLTAGE_13: /* actually 11V */
502 lnb = 2;
503 break;
504 case SEC_VOLTAGE_18: /* actually 15V */
505 lnb = 3;
506 break;
507 case SEC_VOLTAGE_OFF:
508 lnb = 0;
509 break;
510 default:
511 return -EINVAL;
512 }
513
514 pt1_set_power(adap->pt1, 1, lnb, 0);
515
516 if (adap->orig_set_voltage)
517 return adap->orig_set_voltage(fe, voltage);
518 else
519 return 0;
520 }
521
522 static void pt1_free_adapter(struct pt1_adapter *adap)
523 {
524 dvb_unregister_frontend(adap->fe);
525 dvb_net_release(&adap->net);
526 adap->demux.dmx.close(&adap->demux.dmx);
527 dvb_dmxdev_release(&adap->dmxdev);
528 dvb_dmx_release(&adap->demux);
529 dvb_unregister_adapter(&adap->adap);
530 free_page((unsigned long)adap->buf);
531 kfree(adap);
532 }
533
534 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
535
536 static struct pt1_adapter *
537 pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
538 {
539 struct pt1_adapter *adap;
540 void *buf;
541 struct dvb_adapter *dvb_adap;
542 struct dvb_demux *demux;
543 struct dmxdev *dmxdev;
544 int ret;
545
546 adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
547 if (!adap) {
548 ret = -ENOMEM;
549 goto err;
550 }
551
552 adap->pt1 = pt1;
553
554 adap->orig_set_voltage = fe->ops.set_voltage;
555 fe->ops.set_voltage = pt1_set_voltage;
556
557 buf = (u8 *)__get_free_page(GFP_KERNEL);
558 if (!buf) {
559 ret = -ENOMEM;
560 goto err_kfree;
561 }
562
563 adap->buf = buf;
564 adap->upacket_count = 0;
565 adap->packet_count = 0;
566
567 dvb_adap = &adap->adap;
568 dvb_adap->priv = adap;
569 ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
570 &pt1->pdev->dev, adapter_nr);
571 if (ret < 0)
572 goto err_free_page;
573
574 demux = &adap->demux;
575 demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
576 demux->priv = adap;
577 demux->feednum = 256;
578 demux->filternum = 256;
579 demux->start_feed = pt1_start_feed;
580 demux->stop_feed = pt1_stop_feed;
581 demux->write_to_decoder = NULL;
582 ret = dvb_dmx_init(demux);
583 if (ret < 0)
584 goto err_unregister_adapter;
585
586 dmxdev = &adap->dmxdev;
587 dmxdev->filternum = 256;
588 dmxdev->demux = &demux->dmx;
589 dmxdev->capabilities = 0;
590 ret = dvb_dmxdev_init(dmxdev, dvb_adap);
591 if (ret < 0)
592 goto err_dmx_release;
593
594 dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
595
596 ret = dvb_register_frontend(dvb_adap, fe);
597 if (ret < 0)
598 goto err_net_release;
599 adap->fe = fe;
600
601 return adap;
602
603 err_net_release:
604 dvb_net_release(&adap->net);
605 adap->demux.dmx.close(&adap->demux.dmx);
606 dvb_dmxdev_release(&adap->dmxdev);
607 err_dmx_release:
608 dvb_dmx_release(demux);
609 err_unregister_adapter:
610 dvb_unregister_adapter(dvb_adap);
611 err_free_page:
612 free_page((unsigned long)buf);
613 err_kfree:
614 kfree(adap);
615 err:
616 return ERR_PTR(ret);
617 }
618
619 static void pt1_cleanup_adapters(struct pt1 *pt1)
620 {
621 int i;
622 for (i = 0; i < PT1_NR_ADAPS; i++)
623 pt1_free_adapter(pt1->adaps[i]);
624 }
625
626 struct pt1_config {
627 struct va1j5jf8007s_config va1j5jf8007s_config;
628 struct va1j5jf8007t_config va1j5jf8007t_config;
629 };
630
631 static const struct pt1_config pt1_configs[2] = {
632 {
633 { .demod_address = 0x1b },
634 { .demod_address = 0x1a },
635 }, {
636 { .demod_address = 0x19 },
637 { .demod_address = 0x18 },
638 },
639 };
640
641 static int pt1_init_adapters(struct pt1 *pt1)
642 {
643 int i, j;
644 struct i2c_adapter *i2c_adap;
645 const struct pt1_config *config;
646 struct dvb_frontend *fe[4];
647 struct pt1_adapter *adap;
648 int ret;
649
650 i = 0;
651 j = 0;
652
653 i2c_adap = &pt1->i2c_adap;
654 do {
655 config = &pt1_configs[i / 2];
656
657 fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
658 i2c_adap);
659 if (!fe[i]) {
660 ret = -ENODEV; /* This does not sound nice... */
661 goto err;
662 }
663 i++;
664
665 fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
666 i2c_adap);
667 if (!fe[i]) {
668 ret = -ENODEV;
669 goto err;
670 }
671 i++;
672
673 ret = va1j5jf8007s_prepare(fe[i - 2]);
674 if (ret < 0)
675 goto err;
676
677 ret = va1j5jf8007t_prepare(fe[i - 1]);
678 if (ret < 0)
679 goto err;
680
681 } while (i < 4);
682
683 do {
684 adap = pt1_alloc_adapter(pt1, fe[j]);
685 if (IS_ERR(adap))
686 goto err;
687 adap->index = j;
688 pt1->adaps[j] = adap;
689 } while (++j < 4);
690
691 return 0;
692
693 err:
694 while (i-- > j)
695 fe[i]->ops.release(fe[i]);
696
697 while (j--)
698 pt1_free_adapter(pt1->adaps[j]);
699
700 return ret;
701 }
702
703 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
704 int clock, int data, int next_addr)
705 {
706 pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
707 !clock << 11 | !data << 10 | next_addr);
708 }
709
710 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
711 {
712 pt1_i2c_emit(pt1, addr, 1, 0, 0, data, addr + 1);
713 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
714 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
715 *addrp = addr + 3;
716 }
717
718 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
719 {
720 pt1_i2c_emit(pt1, addr, 1, 0, 0, 1, addr + 1);
721 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
722 pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
723 pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
724 *addrp = addr + 4;
725 }
726
727 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
728 {
729 int i;
730 for (i = 0; i < 8; i++)
731 pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
732 pt1_i2c_write_bit(pt1, addr, &addr, 1);
733 *addrp = addr;
734 }
735
736 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
737 {
738 int i;
739 for (i = 0; i < 8; i++)
740 pt1_i2c_read_bit(pt1, addr, &addr);
741 pt1_i2c_write_bit(pt1, addr, &addr, last);
742 *addrp = addr;
743 }
744
745 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
746 {
747 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
748 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
749 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
750 *addrp = addr + 3;
751 }
752
753 static void
754 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
755 {
756 int i;
757 pt1_i2c_prepare(pt1, addr, &addr);
758 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
759 for (i = 0; i < msg->len; i++)
760 pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
761 *addrp = addr;
762 }
763
764 static void
765 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
766 {
767 int i;
768 pt1_i2c_prepare(pt1, addr, &addr);
769 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
770 for (i = 0; i < msg->len; i++)
771 pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
772 *addrp = addr;
773 }
774
775 static int pt1_i2c_end(struct pt1 *pt1, int addr)
776 {
777 pt1_i2c_emit(pt1, addr, 1, 0, 0, 0, addr + 1);
778 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
779 pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
780
781 pt1_write_reg(pt1, 0, 0x00000004);
782 do {
783 if (signal_pending(current))
784 return -EINTR;
785 schedule_timeout_interruptible((HZ + 999) / 1000);
786 } while (pt1_read_reg(pt1, 0) & 0x00000080);
787 return 0;
788 }
789
790 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
791 {
792 int addr;
793 addr = 0;
794
795 pt1_i2c_emit(pt1, addr, 0, 0, 1, 1, addr /* itself */);
796 addr = addr + 1;
797
798 if (!pt1->i2c_running) {
799 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
800 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
801 addr = addr + 2;
802 pt1->i2c_running = 1;
803 }
804 *addrp = addr;
805 }
806
807 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
808 {
809 struct pt1 *pt1;
810 int i;
811 struct i2c_msg *msg, *next_msg;
812 int addr, ret;
813 u16 len;
814 u32 word;
815
816 pt1 = i2c_get_adapdata(adap);
817
818 for (i = 0; i < num; i++) {
819 msg = &msgs[i];
820 if (msg->flags & I2C_M_RD)
821 return -ENOTSUPP;
822
823 if (i + 1 < num)
824 next_msg = &msgs[i + 1];
825 else
826 next_msg = NULL;
827
828 if (next_msg && next_msg->flags & I2C_M_RD) {
829 i++;
830
831 len = next_msg->len;
832 if (len > 4)
833 return -ENOTSUPP;
834
835 pt1_i2c_begin(pt1, &addr);
836 pt1_i2c_write_msg(pt1, addr, &addr, msg);
837 pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
838 ret = pt1_i2c_end(pt1, addr);
839 if (ret < 0)
840 return ret;
841
842 word = pt1_read_reg(pt1, 2);
843 while (len--) {
844 next_msg->buf[len] = word;
845 word >>= 8;
846 }
847 } else {
848 pt1_i2c_begin(pt1, &addr);
849 pt1_i2c_write_msg(pt1, addr, &addr, msg);
850 ret = pt1_i2c_end(pt1, addr);
851 if (ret < 0)
852 return ret;
853 }
854 }
855
856 return num;
857 }
858
859 static u32 pt1_i2c_func(struct i2c_adapter *adap)
860 {
861 return I2C_FUNC_I2C;
862 }
863
864 static const struct i2c_algorithm pt1_i2c_algo = {
865 .master_xfer = pt1_i2c_xfer,
866 .functionality = pt1_i2c_func,
867 };
868
869 static void pt1_i2c_wait(struct pt1 *pt1)
870 {
871 int i;
872 for (i = 0; i < 128; i++)
873 pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
874 }
875
876 static void pt1_i2c_init(struct pt1 *pt1)
877 {
878 int i;
879 for (i = 0; i < 1024; i++)
880 pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
881 }
882
883 static void __devexit pt1_remove(struct pci_dev *pdev)
884 {
885 struct pt1 *pt1;
886 void __iomem *regs;
887
888 pt1 = pci_get_drvdata(pdev);
889 regs = pt1->regs;
890
891 kthread_stop(pt1->kthread);
892 pt1_cleanup_tables(pt1);
893 pt1_cleanup_adapters(pt1);
894 pt1_disable_ram(pt1);
895 pt1_set_power(pt1, 0, 0, 1);
896 i2c_del_adapter(&pt1->i2c_adap);
897 pci_set_drvdata(pdev, NULL);
898 kfree(pt1);
899 pci_iounmap(pdev, regs);
900 pci_release_regions(pdev);
901 pci_disable_device(pdev);
902 }
903
904 static int __devinit
905 pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
906 {
907 int ret;
908 void __iomem *regs;
909 struct pt1 *pt1;
910 struct i2c_adapter *i2c_adap;
911 struct task_struct *kthread;
912
913 ret = pci_enable_device(pdev);
914 if (ret < 0)
915 goto err;
916
917 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
918 if (ret < 0)
919 goto err_pci_disable_device;
920
921 pci_set_master(pdev);
922
923 ret = pci_request_regions(pdev, DRIVER_NAME);
924 if (ret < 0)
925 goto err_pci_disable_device;
926
927 regs = pci_iomap(pdev, 0, 0);
928 if (!regs) {
929 ret = -EIO;
930 goto err_pci_release_regions;
931 }
932
933 pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
934 if (!pt1) {
935 ret = -ENOMEM;
936 goto err_pci_iounmap;
937 }
938
939 pt1->pdev = pdev;
940 pt1->regs = regs;
941 pci_set_drvdata(pdev, pt1);
942
943 i2c_adap = &pt1->i2c_adap;
944 i2c_adap->class = I2C_CLASS_TV_DIGITAL;
945 i2c_adap->algo = &pt1_i2c_algo;
946 i2c_adap->algo_data = NULL;
947 i2c_adap->dev.parent = &pdev->dev;
948 i2c_set_adapdata(i2c_adap, pt1);
949 ret = i2c_add_adapter(i2c_adap);
950 if (ret < 0)
951 goto err_kfree;
952
953 pt1_set_power(pt1, 0, 0, 1);
954
955 pt1_i2c_init(pt1);
956 pt1_i2c_wait(pt1);
957
958 ret = pt1_sync(pt1);
959 if (ret < 0)
960 goto err_i2c_del_adapter;
961
962 pt1_identify(pt1);
963
964 ret = pt1_unlock(pt1);
965 if (ret < 0)
966 goto err_i2c_del_adapter;
967
968 ret = pt1_reset_pci(pt1);
969 if (ret < 0)
970 goto err_i2c_del_adapter;
971
972 ret = pt1_reset_ram(pt1);
973 if (ret < 0)
974 goto err_i2c_del_adapter;
975
976 ret = pt1_enable_ram(pt1);
977 if (ret < 0)
978 goto err_i2c_del_adapter;
979
980 pt1_init_streams(pt1);
981
982 pt1_set_power(pt1, 1, 0, 1);
983 schedule_timeout_uninterruptible((HZ + 49) / 50);
984
985 pt1_set_power(pt1, 1, 0, 0);
986 schedule_timeout_uninterruptible((HZ + 999) / 1000);
987
988 ret = pt1_init_adapters(pt1);
989 if (ret < 0)
990 goto err_pt1_disable_ram;
991
992 ret = pt1_init_tables(pt1);
993 if (ret < 0)
994 goto err_pt1_cleanup_adapters;
995
996 kthread = kthread_run(pt1_thread, pt1, "pt1");
997 if (IS_ERR(kthread)) {
998 ret = PTR_ERR(kthread);
999 goto err_pt1_cleanup_tables;
1000 }
1001
1002 pt1->kthread = kthread;
1003 return 0;
1004
1005 err_pt1_cleanup_tables:
1006 pt1_cleanup_tables(pt1);
1007 err_pt1_cleanup_adapters:
1008 pt1_cleanup_adapters(pt1);
1009 err_pt1_disable_ram:
1010 pt1_disable_ram(pt1);
1011 pt1_set_power(pt1, 0, 0, 1);
1012 err_i2c_del_adapter:
1013 i2c_del_adapter(i2c_adap);
1014 err_kfree:
1015 pci_set_drvdata(pdev, NULL);
1016 kfree(pt1);
1017 err_pci_iounmap:
1018 pci_iounmap(pdev, regs);
1019 err_pci_release_regions:
1020 pci_release_regions(pdev);
1021 err_pci_disable_device:
1022 pci_disable_device(pdev);
1023 err:
1024 return ret;
1025
1026 }
1027
1028 static struct pci_device_id pt1_id_table[] = {
1029 { PCI_DEVICE(0x10ee, 0x211a) },
1030 { },
1031 };
1032 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1033
1034 static struct pci_driver pt1_driver = {
1035 .name = DRIVER_NAME,
1036 .probe = pt1_probe,
1037 .remove = __devexit_p(pt1_remove),
1038 .id_table = pt1_id_table,
1039 };
1040
1041
1042 static int __init pt1_init(void)
1043 {
1044 return pci_register_driver(&pt1_driver);
1045 }
1046
1047
1048 static void __exit pt1_cleanup(void)
1049 {
1050 pci_unregister_driver(&pt1_driver);
1051 }
1052
1053 module_init(pt1_init);
1054 module_exit(pt1_cleanup);
1055
1056 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1057 MODULE_DESCRIPTION("Earthsoft PT1 Driver");
1058 MODULE_LICENSE("GPL");