tun: Allow tap device to send/receive UFO packets.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / connector / cn_queue.c
CommitLineData
7672d0b5
EP
1/*
2 * cn_queue.c
1a5645bc 3 *
7672d0b5
EP
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 * All rights reserved.
1a5645bc 6 *
7672d0b5
EP
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/list.h>
26#include <linux/workqueue.h>
27#include <linux/spinlock.h>
28#include <linux/slab.h>
29#include <linux/skbuff.h>
30#include <linux/suspend.h>
31#include <linux/connector.h>
32#include <linux/delay.h>
33
1a5645bc
FW
34
35/*
36 * This job is sent to the kevent workqueue.
37 * While no event is once sent to any callback, the connector workqueue
38 * is not created to avoid a useless waiting kernel task.
39 * Once the first event is received, we create this dedicated workqueue which
40 * is necessary because the flow of data can be high and we don't want
41 * to encumber keventd with that.
42 */
43static void cn_queue_create(struct work_struct *work)
44{
45 struct cn_queue_dev *dev;
46
47 dev = container_of(work, struct cn_queue_dev, wq_creation);
48
49 dev->cn_queue = create_singlethread_workqueue(dev->name);
50 /* If we fail, we will use keventd for all following connector jobs */
51 WARN_ON(!dev->cn_queue);
52}
53
54/*
55 * Queue a data sent to a callback.
56 * If the connector workqueue is already created, we queue the job on it.
57 * Otherwise, we queue the job to kevent and queue the connector workqueue
58 * creation too.
59 */
60int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
61{
62 struct cn_queue_dev *pdev = cbq->pdev;
63
64 if (likely(pdev->cn_queue))
65 return queue_work(pdev->cn_queue, work);
66
67 /* Don't create the connector workqueue twice */
68 if (atomic_inc_return(&pdev->wq_requested) == 1)
69 schedule_work(&pdev->wq_creation);
70 else
71 atomic_dec(&pdev->wq_requested);
72
73 return schedule_work(work);
74}
75
c4028958 76void cn_queue_wrapper(struct work_struct *work)
7672d0b5 77{
c4028958 78 struct cn_callback_entry *cbq =
a240d9f1 79 container_of(work, struct cn_callback_entry, work);
c4028958 80 struct cn_callback_data *d = &cbq->data;
7672d0b5 81
acd042bb
EP
82 d->callback(d->callback_priv);
83
84 d->destruct_data(d->ddata);
85 d->ddata = NULL;
86
87 kfree(d->free);
7672d0b5
EP
88}
89
acd042bb 90static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *))
7672d0b5
EP
91{
92 struct cn_callback_entry *cbq;
93
94 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
95 if (!cbq) {
96 printk(KERN_ERR "Failed to create new callback queue.\n");
97 return NULL;
98 }
99
acd042bb
EP
100 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
101 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
102 cbq->data.callback = callback;
1a5645bc 103
a240d9f1 104 INIT_WORK(&cbq->work, &cn_queue_wrapper);
7672d0b5
EP
105 return cbq;
106}
107
108static void cn_queue_free_callback(struct cn_callback_entry *cbq)
109{
1a5645bc
FW
110 /* The first jobs have been sent to kevent, flush them too */
111 flush_scheduled_work();
112 if (cbq->pdev->cn_queue)
113 flush_workqueue(cbq->pdev->cn_queue);
7672d0b5
EP
114
115 kfree(cbq);
116}
117
118int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
119{
120 return ((i1->idx == i2->idx) && (i1->val == i2->val));
121}
122
acd042bb 123int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *))
7672d0b5
EP
124{
125 struct cn_callback_entry *cbq, *__cbq;
126 int found = 0;
127
acd042bb 128 cbq = cn_queue_alloc_callback_entry(name, id, callback);
7672d0b5
EP
129 if (!cbq)
130 return -ENOMEM;
131
132 atomic_inc(&dev->refcnt);
133 cbq->pdev = dev;
134
135 spin_lock_bh(&dev->queue_lock);
136 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
acd042bb 137 if (cn_cb_equal(&__cbq->id.id, id)) {
7672d0b5
EP
138 found = 1;
139 break;
140 }
141 }
142 if (!found)
143 list_add_tail(&cbq->callback_entry, &dev->queue_list);
144 spin_unlock_bh(&dev->queue_lock);
145
146 if (found) {
7672d0b5 147 cn_queue_free_callback(cbq);
cf585ae8 148 atomic_dec(&dev->refcnt);
7672d0b5
EP
149 return -EINVAL;
150 }
151
7672d0b5 152 cbq->seq = 0;
acd042bb 153 cbq->group = cbq->id.id.idx;
7672d0b5
EP
154
155 return 0;
156}
157
158void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
159{
160 struct cn_callback_entry *cbq, *n;
161 int found = 0;
162
163 spin_lock_bh(&dev->queue_lock);
164 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
acd042bb 165 if (cn_cb_equal(&cbq->id.id, id)) {
7672d0b5
EP
166 list_del(&cbq->callback_entry);
167 found = 1;
168 break;
169 }
170 }
171 spin_unlock_bh(&dev->queue_lock);
172
173 if (found) {
174 cn_queue_free_callback(cbq);
cec6f7f3 175 atomic_dec(&dev->refcnt);
7672d0b5
EP
176 }
177}
178
179struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
180{
181 struct cn_queue_dev *dev;
182
183 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
184 if (!dev)
185 return NULL;
186
187 snprintf(dev->name, sizeof(dev->name), "%s", name);
188 atomic_set(&dev->refcnt, 0);
189 INIT_LIST_HEAD(&dev->queue_list);
190 spin_lock_init(&dev->queue_lock);
1a5645bc 191 init_waitqueue_head(&dev->wq_created);
7672d0b5
EP
192
193 dev->nls = nls;
7672d0b5 194
1a5645bc 195 INIT_WORK(&dev->wq_creation, cn_queue_create);
7672d0b5
EP
196
197 return dev;
198}
199
200void cn_queue_free_dev(struct cn_queue_dev *dev)
201{
202 struct cn_callback_entry *cbq, *n;
1a5645bc
FW
203 long timeout;
204 DEFINE_WAIT(wait);
205
206 /* Flush the first pending jobs queued on kevent */
207 flush_scheduled_work();
208
209 /* If the connector workqueue creation is still pending, wait for it */
210 prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
211 if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
212 timeout = schedule_timeout(HZ * 2);
213 if (!timeout && !dev->cn_queue)
214 WARN_ON(1);
215 }
216 finish_wait(&dev->wq_created, &wait);
7672d0b5 217
1a5645bc
FW
218 if (dev->cn_queue) {
219 flush_workqueue(dev->cn_queue);
220 destroy_workqueue(dev->cn_queue);
221 }
7672d0b5
EP
222
223 spin_lock_bh(&dev->queue_lock);
224 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
225 list_del(&cbq->callback_entry);
226 spin_unlock_bh(&dev->queue_lock);
227
228 while (atomic_read(&dev->refcnt)) {
229 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
230 dev->name, atomic_read(&dev->refcnt));
231 msleep(1000);
232 }
233
234 kfree(dev);
235 dev = NULL;
236}