Merge branch 'omap-serial' of git://git.linaro.org/people/rmk/linux-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / virtio_console.c
CommitLineData
a23ea924
RR
1/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
5084f893
AS
3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
31610434
RR
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
fb08bd27 20#include <linux/cdev.h>
d99393ef 21#include <linux/debugfs.h>
5e38483b 22#include <linux/completion.h>
fb08bd27 23#include <linux/device.h>
31610434 24#include <linux/err.h>
a08fa92d 25#include <linux/freezer.h>
2030fa49 26#include <linux/fs.h>
eb5e89fc
MH
27#include <linux/splice.h>
28#include <linux/pagemap.h>
31610434 29#include <linux/init.h>
38edf58d 30#include <linux/list.h>
2030fa49
AS
31#include <linux/poll.h>
32#include <linux/sched.h>
5a0e3ad6 33#include <linux/slab.h>
38edf58d 34#include <linux/spinlock.h>
31610434
RR
35#include <linux/virtio.h>
36#include <linux/virtio_console.h>
2030fa49 37#include <linux/wait.h>
17634ba2 38#include <linux/workqueue.h>
c22405c9 39#include <linux/module.h>
51df0acc 40#include "../tty/hvc/hvc_console.h"
31610434 41
38edf58d
AS
42/*
43 * This is a global struct for storing common data for all the devices
44 * this driver handles.
45 *
46 * Mainly, it has a linked list for all the consoles in one place so
47 * that callbacks from hvc for get_chars(), put_chars() work properly
48 * across multiple devices and multiple ports per device.
49 */
50struct ports_driver_data {
fb08bd27
AS
51 /* Used for registering chardevs */
52 struct class *class;
53
d99393ef
AS
54 /* Used for exporting per-port information to debugfs */
55 struct dentry *debugfs_dir;
56
6bdf2afd
AS
57 /* List of all the devices we're handling */
58 struct list_head portdevs;
59
fb08bd27
AS
60 /* Number of devices this driver is handling */
61 unsigned int index;
62
d8a02bd5
RR
63 /*
64 * This is used to keep track of the number of hvc consoles
65 * spawned by this driver. This number is given as the first
66 * argument to hvc_alloc(). To correctly map an initial
67 * console spawned via hvc_instantiate to the console being
68 * hooked up via hvc_alloc, we need to pass the same vtermno.
69 *
70 * We also just assume the first console being initialised was
71 * the first one that got used as the initial console.
72 */
73 unsigned int next_vtermno;
74
38edf58d
AS
75 /* All the console devices handled by this driver */
76 struct list_head consoles;
77};
78static struct ports_driver_data pdrvdata;
79
80DEFINE_SPINLOCK(pdrvdata_lock);
5e38483b 81DECLARE_COMPLETION(early_console_added);
38edf58d 82
4f23c573
AS
83/* This struct holds information that's relevant only for console ports */
84struct console {
85 /* We'll place all consoles in a list in the pdrvdata struct */
86 struct list_head list;
87
88 /* The hvc device associated with this console port */
89 struct hvc_struct *hvc;
90
9778829c
AS
91 /* The size of the console */
92 struct winsize ws;
93
4f23c573
AS
94 /*
95 * This number identifies the number that we used to register
96 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
97 * number passed on by the hvc callbacks to us to
98 * differentiate between the other console ports handled by
99 * this driver
100 */
101 u32 vtermno;
102};
103
fdb9a054
AS
104struct port_buffer {
105 char *buf;
106
107 /* size of the buffer in *buf above */
108 size_t size;
109
110 /* used length of the buffer */
111 size_t len;
112 /* offset in the buf from which to consume data */
113 size_t offset;
114};
115
17634ba2
AS
116/*
117 * This is a per-device struct that stores data common to all the
118 * ports for that device (vdev->priv).
119 */
120struct ports_device {
6bdf2afd
AS
121 /* Next portdev in the list, head is in the pdrvdata struct */
122 struct list_head list;
123
17634ba2
AS
124 /*
125 * Workqueue handlers where we process deferred work after
126 * notification
127 */
128 struct work_struct control_work;
129
130 struct list_head ports;
131
132 /* To protect the list of ports */
133 spinlock_t ports_lock;
134
135 /* To protect the vq operations for the control channel */
136 spinlock_t cvq_lock;
137
138 /* The current config space is stored here */
b99fa815 139 struct virtio_console_config config;
17634ba2
AS
140
141 /* The virtio device we're associated with */
142 struct virtio_device *vdev;
143
144 /*
145 * A couple of virtqueues for the control channel: one for
146 * guest->host transfers, one for host->guest transfers
147 */
148 struct virtqueue *c_ivq, *c_ovq;
149
150 /* Array of per-port IO virtqueues */
151 struct virtqueue **in_vqs, **out_vqs;
fb08bd27
AS
152
153 /* Used for numbering devices for sysfs and debugfs */
154 unsigned int drv_index;
155
156 /* Major number for this device. Ports will be created as minors. */
157 int chr_major;
17634ba2
AS
158};
159
17e5b4f2
AS
160struct port_stats {
161 unsigned long bytes_sent, bytes_received, bytes_discarded;
162};
163
1c85bf35 164/* This struct holds the per-port data */
21206ede 165struct port {
17634ba2
AS
166 /* Next port in the list, head is in the ports_device */
167 struct list_head list;
168
1c85bf35
AS
169 /* Pointer to the parent virtio_console device */
170 struct ports_device *portdev;
fdb9a054
AS
171
172 /* The current buffer from which data has to be fed to readers */
173 struct port_buffer *inbuf;
21206ede 174
203baab8
AS
175 /*
176 * To protect the operations on the in_vq associated with this
177 * port. Has to be a spinlock because it can be called from
178 * interrupt context (get_char()).
179 */
180 spinlock_t inbuf_lock;
181
cdfadfc1
AS
182 /* Protect the operations on the out_vq. */
183 spinlock_t outvq_lock;
184
1c85bf35
AS
185 /* The IO vqs for this port */
186 struct virtqueue *in_vq, *out_vq;
187
d99393ef
AS
188 /* File in the debugfs directory that exposes this port's information */
189 struct dentry *debugfs_file;
190
17e5b4f2
AS
191 /*
192 * Keep count of the bytes sent, received and discarded for
193 * this port for accounting and debugging purposes. These
194 * counts are not reset across port open / close events.
195 */
196 struct port_stats stats;
197
4f23c573
AS
198 /*
199 * The entries in this struct will be valid if this port is
200 * hooked up to an hvc console
201 */
202 struct console cons;
17634ba2 203
fb08bd27 204 /* Each port associates with a separate char device */
d22a6989 205 struct cdev *cdev;
fb08bd27
AS
206 struct device *dev;
207
b353a6b8
AS
208 /* Reference-counting to handle port hot-unplugs and file operations */
209 struct kref kref;
210
2030fa49
AS
211 /* A waitqueue for poll() or blocking read operations */
212 wait_queue_head_t waitqueue;
213
431edb8a
AS
214 /* The 'name' of the port that we expose via sysfs properties */
215 char *name;
216
3eae0ade
AS
217 /* We can notify apps of host connect / disconnect events via SIGIO */
218 struct fasync_struct *async_queue;
219
17634ba2
AS
220 /* The 'id' to identify the port with the Host */
221 u32 id;
2030fa49 222
cdfadfc1
AS
223 bool outvq_full;
224
2030fa49
AS
225 /* Is the host device open */
226 bool host_connected;
3c7969cc
AS
227
228 /* We should allow only one process to open a port */
229 bool guest_connected;
21206ede 230};
31610434 231
971f3390
RR
232/* This is the very early arch-specified put chars function. */
233static int (*early_put_chars)(u32, const char *, int);
234
38edf58d
AS
235static struct port *find_port_by_vtermno(u32 vtermno)
236{
237 struct port *port;
4f23c573 238 struct console *cons;
38edf58d
AS
239 unsigned long flags;
240
241 spin_lock_irqsave(&pdrvdata_lock, flags);
4f23c573
AS
242 list_for_each_entry(cons, &pdrvdata.consoles, list) {
243 if (cons->vtermno == vtermno) {
244 port = container_of(cons, struct port, cons);
38edf58d 245 goto out;
4f23c573 246 }
38edf58d
AS
247 }
248 port = NULL;
249out:
250 spin_unlock_irqrestore(&pdrvdata_lock, flags);
251 return port;
252}
253
04950cdf
AS
254static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
255 dev_t dev)
256{
257 struct port *port;
258 unsigned long flags;
259
260 spin_lock_irqsave(&portdev->ports_lock, flags);
261 list_for_each_entry(port, &portdev->ports, list)
d22a6989 262 if (port->cdev->dev == dev)
04950cdf
AS
263 goto out;
264 port = NULL;
265out:
266 spin_unlock_irqrestore(&portdev->ports_lock, flags);
267
268 return port;
269}
270
271static struct port *find_port_by_devt(dev_t dev)
272{
273 struct ports_device *portdev;
274 struct port *port;
275 unsigned long flags;
276
277 spin_lock_irqsave(&pdrvdata_lock, flags);
278 list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
279 port = find_port_by_devt_in_portdev(portdev, dev);
280 if (port)
281 goto out;
282 }
283 port = NULL;
284out:
285 spin_unlock_irqrestore(&pdrvdata_lock, flags);
286 return port;
287}
288
17634ba2
AS
289static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
290{
291 struct port *port;
292 unsigned long flags;
293
294 spin_lock_irqsave(&portdev->ports_lock, flags);
295 list_for_each_entry(port, &portdev->ports, list)
296 if (port->id == id)
297 goto out;
298 port = NULL;
299out:
300 spin_unlock_irqrestore(&portdev->ports_lock, flags);
301
302 return port;
303}
304
203baab8
AS
305static struct port *find_port_by_vq(struct ports_device *portdev,
306 struct virtqueue *vq)
307{
308 struct port *port;
203baab8
AS
309 unsigned long flags;
310
17634ba2
AS
311 spin_lock_irqsave(&portdev->ports_lock, flags);
312 list_for_each_entry(port, &portdev->ports, list)
203baab8
AS
313 if (port->in_vq == vq || port->out_vq == vq)
314 goto out;
203baab8
AS
315 port = NULL;
316out:
17634ba2 317 spin_unlock_irqrestore(&portdev->ports_lock, flags);
203baab8
AS
318 return port;
319}
320
17634ba2
AS
321static bool is_console_port(struct port *port)
322{
323 if (port->cons.hvc)
324 return true;
325 return false;
326}
327
328static inline bool use_multiport(struct ports_device *portdev)
329{
330 /*
331 * This condition can be true when put_chars is called from
332 * early_init
333 */
334 if (!portdev->vdev)
335 return 0;
336 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
337}
338
fdb9a054
AS
339static void free_buf(struct port_buffer *buf)
340{
341 kfree(buf->buf);
342 kfree(buf);
343}
344
345static struct port_buffer *alloc_buf(size_t buf_size)
346{
347 struct port_buffer *buf;
348
349 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
350 if (!buf)
351 goto fail;
352 buf->buf = kzalloc(buf_size, GFP_KERNEL);
353 if (!buf->buf)
354 goto free_buf;
355 buf->len = 0;
356 buf->offset = 0;
357 buf->size = buf_size;
358 return buf;
359
360free_buf:
361 kfree(buf);
362fail:
363 return NULL;
364}
365
a3cde449 366/* Callers should take appropriate locks */
defde669 367static struct port_buffer *get_inbuf(struct port *port)
a3cde449
AS
368{
369 struct port_buffer *buf;
a3cde449
AS
370 unsigned int len;
371
d25a9dda
AS
372 if (port->inbuf)
373 return port->inbuf;
374
375 buf = virtqueue_get_buf(port->in_vq, &len);
a3cde449
AS
376 if (buf) {
377 buf->len = len;
378 buf->offset = 0;
17e5b4f2 379 port->stats.bytes_received += len;
a3cde449
AS
380 }
381 return buf;
382}
383
e27b5198
AS
384/*
385 * Create a scatter-gather list representing our input buffer and put
386 * it in the queue.
387 *
388 * Callers should take appropriate locks.
389 */
203baab8 390static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
e27b5198
AS
391{
392 struct scatterlist sg[1];
203baab8 393 int ret;
1c85bf35 394
e27b5198
AS
395 sg_init_one(sg, buf->buf, buf->size);
396
f96fde41 397 ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
505b0451 398 virtqueue_kick(vq);
203baab8
AS
399 return ret;
400}
401
88f251ac
AS
402/* Discard any unread data this port has. Callers lockers. */
403static void discard_port_data(struct port *port)
404{
405 struct port_buffer *buf;
2d24cdaa 406 unsigned int err;
88f251ac 407
d7a62cd0
AS
408 if (!port->portdev) {
409 /* Device has been unplugged. vqs are already gone. */
410 return;
411 }
2d24cdaa 412 buf = get_inbuf(port);
88f251ac 413
ce072a0c 414 err = 0;
d6933561 415 while (buf) {
17e5b4f2 416 port->stats.bytes_discarded += buf->len - buf->offset;
2d24cdaa 417 if (add_inbuf(port->in_vq, buf) < 0) {
ce072a0c 418 err++;
d6933561
AS
419 free_buf(buf);
420 }
2d24cdaa
AS
421 port->inbuf = NULL;
422 buf = get_inbuf(port);
88f251ac 423 }
ce072a0c 424 if (err)
d6933561 425 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
ce072a0c 426 err);
88f251ac
AS
427}
428
203baab8
AS
429static bool port_has_data(struct port *port)
430{
431 unsigned long flags;
432 bool ret;
433
d25a9dda 434 ret = false;
203baab8 435 spin_lock_irqsave(&port->inbuf_lock, flags);
d6933561 436 port->inbuf = get_inbuf(port);
d25a9dda 437 if (port->inbuf)
d6933561 438 ret = true;
d25a9dda 439
203baab8 440 spin_unlock_irqrestore(&port->inbuf_lock, flags);
203baab8
AS
441 return ret;
442}
443
3425e706
AS
444static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
445 unsigned int event, unsigned int value)
17634ba2
AS
446{
447 struct scatterlist sg[1];
448 struct virtio_console_control cpkt;
449 struct virtqueue *vq;
604b2ad7 450 unsigned int len;
17634ba2 451
3425e706 452 if (!use_multiport(portdev))
17634ba2
AS
453 return 0;
454
3425e706 455 cpkt.id = port_id;
17634ba2
AS
456 cpkt.event = event;
457 cpkt.value = value;
458
3425e706 459 vq = portdev->c_ovq;
17634ba2
AS
460
461 sg_init_one(sg, &cpkt, sizeof(cpkt));
f96fde41 462 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
505b0451
MT
463 virtqueue_kick(vq);
464 while (!virtqueue_get_buf(vq, &len))
17634ba2
AS
465 cpu_relax();
466 }
467 return 0;
468}
469
3425e706
AS
470static ssize_t send_control_msg(struct port *port, unsigned int event,
471 unsigned int value)
472{
84ec06c5
AS
473 /* Did the port get unplugged before userspace closed it? */
474 if (port->portdev)
475 return __send_control_msg(port->portdev, port->id, event, value);
476 return 0;
3425e706
AS
477}
478
eb5e89fc
MH
479struct buffer_token {
480 union {
481 void *buf;
482 struct scatterlist *sg;
483 } u;
8ca84a50
MH
484 /* If sgpages == 0 then buf is used, else sg is used */
485 unsigned int sgpages;
eb5e89fc
MH
486};
487
8ca84a50 488static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages)
eb5e89fc
MH
489{
490 int i;
491 struct page *page;
492
8ca84a50 493 for (i = 0; i < nrpages; i++) {
eb5e89fc
MH
494 page = sg_page(&sg[i]);
495 if (!page)
496 break;
497 put_page(page);
498 }
499 kfree(sg);
500}
501
cdfadfc1
AS
502/* Callers must take the port->outvq_lock */
503static void reclaim_consumed_buffers(struct port *port)
504{
eb5e89fc 505 struct buffer_token *tok;
cdfadfc1
AS
506 unsigned int len;
507
d7a62cd0
AS
508 if (!port->portdev) {
509 /* Device has been unplugged. vqs are already gone. */
510 return;
511 }
eb5e89fc
MH
512 while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
513 if (tok->sgpages)
8ca84a50 514 reclaim_sg_pages(tok->u.sg, tok->sgpages);
eb5e89fc
MH
515 else
516 kfree(tok->u.buf);
517 kfree(tok);
cdfadfc1
AS
518 port->outvq_full = false;
519 }
520}
521
eb5e89fc
MH
522static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
523 int nents, size_t in_count,
524 struct buffer_token *tok, bool nonblock)
f997f00b 525{
f997f00b
AS
526 struct virtqueue *out_vq;
527 ssize_t ret;
cdfadfc1 528 unsigned long flags;
f997f00b
AS
529 unsigned int len;
530
531 out_vq = port->out_vq;
532
cdfadfc1
AS
533 spin_lock_irqsave(&port->outvq_lock, flags);
534
535 reclaim_consumed_buffers(port);
536
eb5e89fc 537 ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
f997f00b
AS
538
539 /* Tell Host to go! */
505b0451 540 virtqueue_kick(out_vq);
f997f00b
AS
541
542 if (ret < 0) {
9ff4cfab 543 in_count = 0;
cdfadfc1 544 goto done;
f997f00b
AS
545 }
546
cdfadfc1
AS
547 if (ret == 0)
548 port->outvq_full = true;
549
550 if (nonblock)
551 goto done;
552
553 /*
554 * Wait till the host acknowledges it pushed out the data we
531295e6
AS
555 * sent. This is done for data from the hvc_console; the tty
556 * operations are performed with spinlocks held so we can't
557 * sleep here. An alternative would be to copy the data to a
558 * buffer and relax the spinning requirement. The downside is
559 * we need to kmalloc a GFP_ATOMIC buffer each time the
560 * console driver writes something out.
cdfadfc1 561 */
505b0451 562 while (!virtqueue_get_buf(out_vq, &len))
f997f00b 563 cpu_relax();
cdfadfc1
AS
564done:
565 spin_unlock_irqrestore(&port->outvq_lock, flags);
17e5b4f2
AS
566
567 port->stats.bytes_sent += in_count;
cdfadfc1
AS
568 /*
569 * We're expected to return the amount of data we wrote -- all
570 * of it
571 */
9ff4cfab 572 return in_count;
f997f00b
AS
573}
574
eb5e89fc
MH
575static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
576 bool nonblock)
577{
578 struct scatterlist sg[1];
579 struct buffer_token *tok;
580
581 tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
582 if (!tok)
583 return -ENOMEM;
8ca84a50 584 tok->sgpages = 0;
eb5e89fc
MH
585 tok->u.buf = in_buf;
586
587 sg_init_one(sg, in_buf, in_count);
588
589 return __send_to_port(port, sg, 1, in_count, tok, nonblock);
590}
591
592static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
593 size_t in_count, bool nonblock)
594{
595 struct buffer_token *tok;
596
597 tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
598 if (!tok)
599 return -ENOMEM;
8ca84a50 600 tok->sgpages = nents;
eb5e89fc
MH
601 tok->u.sg = sg;
602
603 return __send_to_port(port, sg, nents, in_count, tok, nonblock);
604}
605
203baab8
AS
606/*
607 * Give out the data that's requested from the buffer that we have
608 * queued up.
609 */
b766ceed
AS
610static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
611 bool to_user)
203baab8
AS
612{
613 struct port_buffer *buf;
614 unsigned long flags;
615
616 if (!out_count || !port_has_data(port))
617 return 0;
618
619 buf = port->inbuf;
b766ceed 620 out_count = min(out_count, buf->len - buf->offset);
203baab8 621
b766ceed
AS
622 if (to_user) {
623 ssize_t ret;
624
625 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
626 if (ret)
627 return -EFAULT;
628 } else {
629 memcpy(out_buf, buf->buf + buf->offset, out_count);
630 }
203baab8 631
203baab8
AS
632 buf->offset += out_count;
633
634 if (buf->offset == buf->len) {
635 /*
636 * We're done using all the data in this buffer.
637 * Re-queue so that the Host can send us more data.
638 */
639 spin_lock_irqsave(&port->inbuf_lock, flags);
640 port->inbuf = NULL;
641
642 if (add_inbuf(port->in_vq, buf) < 0)
fb08bd27 643 dev_warn(port->dev, "failed add_buf\n");
203baab8
AS
644
645 spin_unlock_irqrestore(&port->inbuf_lock, flags);
646 }
b766ceed 647 /* Return the number of bytes actually copied */
203baab8 648 return out_count;
e27b5198
AS
649}
650
2030fa49 651/* The condition that must be true for polling to end */
60caacd3 652static bool will_read_block(struct port *port)
2030fa49 653{
3709ea7a
AS
654 if (!port->guest_connected) {
655 /* Port got hot-unplugged. Let's exit. */
656 return false;
657 }
60caacd3 658 return !port_has_data(port) && port->host_connected;
2030fa49
AS
659}
660
cdfadfc1
AS
661static bool will_write_block(struct port *port)
662{
663 bool ret;
664
60e5e0b8
AS
665 if (!port->guest_connected) {
666 /* Port got hot-unplugged. Let's exit. */
667 return false;
668 }
cdfadfc1
AS
669 if (!port->host_connected)
670 return true;
671
672 spin_lock_irq(&port->outvq_lock);
673 /*
674 * Check if the Host has consumed any buffers since we last
675 * sent data (this is only applicable for nonblocking ports).
676 */
677 reclaim_consumed_buffers(port);
678 ret = port->outvq_full;
679 spin_unlock_irq(&port->outvq_lock);
680
681 return ret;
682}
683
2030fa49
AS
684static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
685 size_t count, loff_t *offp)
686{
687 struct port *port;
688 ssize_t ret;
689
690 port = filp->private_data;
691
692 if (!port_has_data(port)) {
693 /*
694 * If nothing's connected on the host just return 0 in
695 * case of list_empty; this tells the userspace app
696 * that there's no connection
697 */
698 if (!port->host_connected)
699 return 0;
700 if (filp->f_flags & O_NONBLOCK)
701 return -EAGAIN;
702
a08fa92d
AS
703 ret = wait_event_freezable(port->waitqueue,
704 !will_read_block(port));
2030fa49
AS
705 if (ret < 0)
706 return ret;
707 }
b3dddb9e
AS
708 /* Port got hot-unplugged. */
709 if (!port->guest_connected)
710 return -ENODEV;
2030fa49
AS
711 /*
712 * We could've received a disconnection message while we were
713 * waiting for more data.
714 *
715 * This check is not clubbed in the if() statement above as we
716 * might receive some data as well as the host could get
717 * disconnected after we got woken up from our wait. So we
718 * really want to give off whatever data we have and only then
719 * check for host_connected.
720 */
721 if (!port_has_data(port) && !port->host_connected)
722 return 0;
723
724 return fill_readbuf(port, ubuf, count, true);
725}
726
efe75d24
MH
727static int wait_port_writable(struct port *port, bool nonblock)
728{
729 int ret;
730
731 if (will_write_block(port)) {
732 if (nonblock)
733 return -EAGAIN;
734
735 ret = wait_event_freezable(port->waitqueue,
736 !will_write_block(port));
737 if (ret < 0)
738 return ret;
739 }
740 /* Port got hot-unplugged. */
741 if (!port->guest_connected)
742 return -ENODEV;
743
744 return 0;
745}
746
2030fa49
AS
747static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
748 size_t count, loff_t *offp)
749{
750 struct port *port;
751 char *buf;
752 ssize_t ret;
cdfadfc1 753 bool nonblock;
2030fa49 754
65745422
AS
755 /* Userspace could be out to fool us */
756 if (!count)
757 return 0;
758
2030fa49
AS
759 port = filp->private_data;
760
cdfadfc1
AS
761 nonblock = filp->f_flags & O_NONBLOCK;
762
efe75d24
MH
763 ret = wait_port_writable(port, nonblock);
764 if (ret < 0)
765 return ret;
cdfadfc1 766
2030fa49
AS
767 count = min((size_t)(32 * 1024), count);
768
769 buf = kmalloc(count, GFP_KERNEL);
770 if (!buf)
771 return -ENOMEM;
772
773 ret = copy_from_user(buf, ubuf, count);
774 if (ret) {
775 ret = -EFAULT;
776 goto free_buf;
777 }
778
531295e6
AS
779 /*
780 * We now ask send_buf() to not spin for generic ports -- we
781 * can re-use the same code path that non-blocking file
782 * descriptors take for blocking file descriptors since the
783 * wait is already done and we're certain the write will go
784 * through to the host.
785 */
786 nonblock = true;
cdfadfc1
AS
787 ret = send_buf(port, buf, count, nonblock);
788
789 if (nonblock && ret > 0)
790 goto out;
791
2030fa49
AS
792free_buf:
793 kfree(buf);
cdfadfc1 794out:
2030fa49
AS
795 return ret;
796}
797
eb5e89fc
MH
798struct sg_list {
799 unsigned int n;
8ca84a50 800 unsigned int size;
eb5e89fc
MH
801 size_t len;
802 struct scatterlist *sg;
803};
804
805static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
806 struct splice_desc *sd)
807{
808 struct sg_list *sgl = sd->u.data;
ec8fc870 809 unsigned int offset, len;
eb5e89fc 810
8ca84a50 811 if (sgl->n == sgl->size)
eb5e89fc
MH
812 return 0;
813
814 /* Try lock this page */
815 if (buf->ops->steal(pipe, buf) == 0) {
816 /* Get reference and unlock page for moving */
817 get_page(buf->page);
818 unlock_page(buf->page);
819
820 len = min(buf->len, sd->len);
821 sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
ec8fc870
MH
822 } else {
823 /* Failback to copying a page */
824 struct page *page = alloc_page(GFP_KERNEL);
825 char *src = buf->ops->map(pipe, buf, 1);
826 char *dst;
827
828 if (!page)
829 return -ENOMEM;
830 dst = kmap(page);
831
832 offset = sd->pos & ~PAGE_MASK;
833
834 len = sd->len;
835 if (len + offset > PAGE_SIZE)
836 len = PAGE_SIZE - offset;
837
838 memcpy(dst + offset, src + buf->offset, len);
839
840 kunmap(page);
841 buf->ops->unmap(pipe, buf, src);
842
843 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
eb5e89fc 844 }
ec8fc870
MH
845 sgl->n++;
846 sgl->len += len;
eb5e89fc
MH
847
848 return len;
849}
850
851/* Faster zero-copy write by splicing */
852static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
853 struct file *filp, loff_t *ppos,
854 size_t len, unsigned int flags)
855{
856 struct port *port = filp->private_data;
857 struct sg_list sgl;
858 ssize_t ret;
859 struct splice_desc sd = {
860 .total_len = len,
861 .flags = flags,
862 .pos = *ppos,
863 .u.data = &sgl,
864 };
865
efe75d24
MH
866 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
867 if (ret < 0)
868 return ret;
869
eb5e89fc
MH
870 sgl.n = 0;
871 sgl.len = 0;
8ca84a50
MH
872 sgl.size = pipe->nrbufs;
873 sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL);
eb5e89fc
MH
874 if (unlikely(!sgl.sg))
875 return -ENOMEM;
876
8ca84a50 877 sg_init_table(sgl.sg, sgl.size);
eb5e89fc
MH
878 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
879 if (likely(ret > 0))
880 ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
881
882 return ret;
883}
884
2030fa49
AS
885static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
886{
887 struct port *port;
888 unsigned int ret;
889
890 port = filp->private_data;
891 poll_wait(filp, &port->waitqueue, wait);
892
8529a504
AS
893 if (!port->guest_connected) {
894 /* Port got unplugged */
895 return POLLHUP;
896 }
2030fa49 897 ret = 0;
6df7aadc 898 if (!will_read_block(port))
2030fa49 899 ret |= POLLIN | POLLRDNORM;
cdfadfc1 900 if (!will_write_block(port))
2030fa49
AS
901 ret |= POLLOUT;
902 if (!port->host_connected)
903 ret |= POLLHUP;
904
905 return ret;
906}
907
b353a6b8
AS
908static void remove_port(struct kref *kref);
909
2030fa49
AS
910static int port_fops_release(struct inode *inode, struct file *filp)
911{
912 struct port *port;
913
914 port = filp->private_data;
915
916 /* Notify host of port being closed */
917 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
918
88f251ac 919 spin_lock_irq(&port->inbuf_lock);
3c7969cc
AS
920 port->guest_connected = false;
921
88f251ac
AS
922 discard_port_data(port);
923
924 spin_unlock_irq(&port->inbuf_lock);
925
cdfadfc1
AS
926 spin_lock_irq(&port->outvq_lock);
927 reclaim_consumed_buffers(port);
928 spin_unlock_irq(&port->outvq_lock);
929
b353a6b8
AS
930 /*
931 * Locks aren't necessary here as a port can't be opened after
932 * unplug, and if a port isn't unplugged, a kref would already
933 * exist for the port. Plus, taking ports_lock here would
934 * create a dependency on other locks taken by functions
935 * inside remove_port if we're the last holder of the port,
936 * creating many problems.
937 */
938 kref_put(&port->kref, remove_port);
939
2030fa49
AS
940 return 0;
941}
942
943static int port_fops_open(struct inode *inode, struct file *filp)
944{
945 struct cdev *cdev = inode->i_cdev;
946 struct port *port;
8ad37e83 947 int ret;
2030fa49 948
04950cdf 949 port = find_port_by_devt(cdev->dev);
2030fa49
AS
950 filp->private_data = port;
951
b353a6b8
AS
952 /* Prevent against a port getting hot-unplugged at the same time */
953 spin_lock_irq(&port->portdev->ports_lock);
954 kref_get(&port->kref);
955 spin_unlock_irq(&port->portdev->ports_lock);
956
2030fa49
AS
957 /*
958 * Don't allow opening of console port devices -- that's done
959 * via /dev/hvc
960 */
8ad37e83
AS
961 if (is_console_port(port)) {
962 ret = -ENXIO;
963 goto out;
964 }
2030fa49 965
3c7969cc
AS
966 /* Allow only one process to open a particular port at a time */
967 spin_lock_irq(&port->inbuf_lock);
968 if (port->guest_connected) {
969 spin_unlock_irq(&port->inbuf_lock);
8ad37e83
AS
970 ret = -EMFILE;
971 goto out;
3c7969cc
AS
972 }
973
974 port->guest_connected = true;
975 spin_unlock_irq(&port->inbuf_lock);
976
cdfadfc1
AS
977 spin_lock_irq(&port->outvq_lock);
978 /*
979 * There might be a chance that we missed reclaiming a few
980 * buffers in the window of the port getting previously closed
981 * and opening now.
982 */
983 reclaim_consumed_buffers(port);
984 spin_unlock_irq(&port->outvq_lock);
985
299fb61c
AS
986 nonseekable_open(inode, filp);
987
2030fa49
AS
988 /* Notify host of port being opened */
989 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
990
991 return 0;
8ad37e83 992out:
b353a6b8 993 kref_put(&port->kref, remove_port);
8ad37e83 994 return ret;
2030fa49
AS
995}
996
3eae0ade
AS
997static int port_fops_fasync(int fd, struct file *filp, int mode)
998{
999 struct port *port;
1000
1001 port = filp->private_data;
1002 return fasync_helper(fd, filp, mode, &port->async_queue);
1003}
1004
2030fa49
AS
1005/*
1006 * The file operations that we support: programs in the guest can open
1007 * a console device, read from it, write to it, poll for data and
1008 * close it. The devices are at
1009 * /dev/vport<device number>p<port number>
1010 */
1011static const struct file_operations port_fops = {
1012 .owner = THIS_MODULE,
1013 .open = port_fops_open,
1014 .read = port_fops_read,
1015 .write = port_fops_write,
eb5e89fc 1016 .splice_write = port_fops_splice_write,
2030fa49
AS
1017 .poll = port_fops_poll,
1018 .release = port_fops_release,
3eae0ade 1019 .fasync = port_fops_fasync,
299fb61c 1020 .llseek = no_llseek,
2030fa49
AS
1021};
1022
a23ea924
RR
1023/*
1024 * The put_chars() callback is pretty straightforward.
31610434 1025 *
a23ea924
RR
1026 * We turn the characters into a scatter-gather list, add it to the
1027 * output queue and then kick the Host. Then we sit here waiting for
1028 * it to finish: inefficient in theory, but in practice
1029 * implementations will do it immediately (lguest's Launcher does).
1030 */
31610434
RR
1031static int put_chars(u32 vtermno, const char *buf, int count)
1032{
21206ede 1033 struct port *port;
38edf58d 1034
162a689a
FD
1035 if (unlikely(early_put_chars))
1036 return early_put_chars(vtermno, buf, count);
1037
38edf58d
AS
1038 port = find_port_by_vtermno(vtermno);
1039 if (!port)
6dc69f97 1040 return -EPIPE;
31610434 1041
cdfadfc1 1042 return send_buf(port, (void *)buf, count, false);
31610434
RR
1043}
1044
a23ea924
RR
1045/*
1046 * get_chars() is the callback from the hvc_console infrastructure
1047 * when an interrupt is received.
31610434 1048 *
203baab8
AS
1049 * We call out to fill_readbuf that gets us the required data from the
1050 * buffers that are queued up.
a23ea924 1051 */
31610434
RR
1052static int get_chars(u32 vtermno, char *buf, int count)
1053{
21206ede
RR
1054 struct port *port;
1055
6dc69f97
AS
1056 /* If we've not set up the port yet, we have no input to give. */
1057 if (unlikely(early_put_chars))
1058 return 0;
1059
38edf58d
AS
1060 port = find_port_by_vtermno(vtermno);
1061 if (!port)
6dc69f97 1062 return -EPIPE;
21206ede 1063
31610434 1064 /* If we don't have an input queue yet, we can't get input. */
21206ede 1065 BUG_ON(!port->in_vq);
31610434 1066
b766ceed 1067 return fill_readbuf(port, buf, count, false);
31610434 1068}
31610434 1069
cb06e367 1070static void resize_console(struct port *port)
c2983458 1071{
cb06e367 1072 struct virtio_device *vdev;
c2983458 1073
2de16a49 1074 /* The port could have been hot-unplugged */
9778829c 1075 if (!port || !is_console_port(port))
2de16a49
AS
1076 return;
1077
cb06e367 1078 vdev = port->portdev->vdev;
9778829c
AS
1079 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
1080 hvc_resize(port->cons.hvc, port->cons.ws);
c2983458
CB
1081}
1082
38edf58d 1083/* We set the configuration at this point, since we now have a tty */
91fcad19
CB
1084static int notifier_add_vio(struct hvc_struct *hp, int data)
1085{
38edf58d
AS
1086 struct port *port;
1087
1088 port = find_port_by_vtermno(hp->vtermno);
1089 if (!port)
1090 return -EINVAL;
1091
91fcad19 1092 hp->irq_requested = 1;
cb06e367 1093 resize_console(port);
c2983458 1094
91fcad19
CB
1095 return 0;
1096}
1097
1098static void notifier_del_vio(struct hvc_struct *hp, int data)
1099{
1100 hp->irq_requested = 0;
1101}
1102
17634ba2 1103/* The operations for console ports. */
1dff3996 1104static const struct hv_ops hv_ops = {
971f3390
RR
1105 .get_chars = get_chars,
1106 .put_chars = put_chars,
1107 .notifier_add = notifier_add_vio,
1108 .notifier_del = notifier_del_vio,
1109 .notifier_hangup = notifier_del_vio,
1110};
1111
1112/*
1113 * Console drivers are initialized very early so boot messages can go
1114 * out, so we do things slightly differently from the generic virtio
1115 * initialization of the net and block drivers.
1116 *
1117 * At this stage, the console is output-only. It's too early to set
1118 * up a virtqueue, so we let the drivers do some boutique early-output
1119 * thing.
1120 */
1121int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
1122{
1123 early_put_chars = put_chars;
1124 return hvc_instantiate(0, 0, &hv_ops);
1125}
1126
17634ba2 1127int init_port_console(struct port *port)
cfa6d379
AS
1128{
1129 int ret;
1130
1131 /*
1132 * The Host's telling us this port is a console port. Hook it
1133 * up with an hvc console.
1134 *
1135 * To set up and manage our virtual console, we call
1136 * hvc_alloc().
1137 *
1138 * The first argument of hvc_alloc() is the virtual console
1139 * number. The second argument is the parameter for the
1140 * notification mechanism (like irq number). We currently
1141 * leave this as zero, virtqueues have implicit notifications.
1142 *
1143 * The third argument is a "struct hv_ops" containing the
1144 * put_chars() get_chars(), notifier_add() and notifier_del()
1145 * pointers. The final argument is the output buffer size: we
1146 * can do any size, so we put PAGE_SIZE here.
1147 */
1148 port->cons.vtermno = pdrvdata.next_vtermno;
1149
1150 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
1151 if (IS_ERR(port->cons.hvc)) {
1152 ret = PTR_ERR(port->cons.hvc);
298add72
AS
1153 dev_err(port->dev,
1154 "error %d allocating hvc for port\n", ret);
cfa6d379
AS
1155 port->cons.hvc = NULL;
1156 return ret;
1157 }
1158 spin_lock_irq(&pdrvdata_lock);
1159 pdrvdata.next_vtermno++;
1160 list_add_tail(&port->cons.list, &pdrvdata.consoles);
1161 spin_unlock_irq(&pdrvdata_lock);
3c7969cc 1162 port->guest_connected = true;
cfa6d379 1163
1d05160b
AS
1164 /*
1165 * Start using the new console output if this is the first
1166 * console to come up.
1167 */
1168 if (early_put_chars)
1169 early_put_chars = NULL;
1170
2030fa49
AS
1171 /* Notify host of port being opened */
1172 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1173
cfa6d379
AS
1174 return 0;
1175}
1176
431edb8a
AS
1177static ssize_t show_port_name(struct device *dev,
1178 struct device_attribute *attr, char *buffer)
1179{
1180 struct port *port;
1181
1182 port = dev_get_drvdata(dev);
1183
1184 return sprintf(buffer, "%s\n", port->name);
1185}
1186
1187static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
1188
1189static struct attribute *port_sysfs_entries[] = {
1190 &dev_attr_name.attr,
1191 NULL
1192};
1193
1194static struct attribute_group port_attribute_group = {
1195 .name = NULL, /* put in device directory */
1196 .attrs = port_sysfs_entries,
1197};
1198
d99393ef
AS
1199static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1200 size_t count, loff_t *offp)
1201{
1202 struct port *port;
1203 char *buf;
1204 ssize_t ret, out_offset, out_count;
1205
1206 out_count = 1024;
1207 buf = kmalloc(out_count, GFP_KERNEL);
1208 if (!buf)
1209 return -ENOMEM;
1210
1211 port = filp->private_data;
1212 out_offset = 0;
1213 out_offset += snprintf(buf + out_offset, out_count,
1214 "name: %s\n", port->name ? port->name : "");
1215 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1216 "guest_connected: %d\n", port->guest_connected);
1217 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1218 "host_connected: %d\n", port->host_connected);
cdfadfc1
AS
1219 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1220 "outvq_full: %d\n", port->outvq_full);
17e5b4f2
AS
1221 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1222 "bytes_sent: %lu\n", port->stats.bytes_sent);
1223 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1224 "bytes_received: %lu\n",
1225 port->stats.bytes_received);
1226 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1227 "bytes_discarded: %lu\n",
1228 port->stats.bytes_discarded);
d99393ef
AS
1229 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1230 "is_console: %s\n",
1231 is_console_port(port) ? "yes" : "no");
1232 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1233 "console_vtermno: %u\n", port->cons.vtermno);
1234
1235 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
1236 kfree(buf);
1237 return ret;
1238}
1239
1240static const struct file_operations port_debugfs_ops = {
1241 .owner = THIS_MODULE,
234e3405 1242 .open = simple_open,
d99393ef
AS
1243 .read = debugfs_read,
1244};
1245
9778829c
AS
1246static void set_console_size(struct port *port, u16 rows, u16 cols)
1247{
1248 if (!port || !is_console_port(port))
1249 return;
1250
1251 port->cons.ws.ws_row = rows;
1252 port->cons.ws.ws_col = cols;
1253}
1254
c446f8fc
AS
1255static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1256{
1257 struct port_buffer *buf;
1258 unsigned int nr_added_bufs;
1259 int ret;
1260
1261 nr_added_bufs = 0;
1262 do {
1263 buf = alloc_buf(PAGE_SIZE);
1264 if (!buf)
1265 break;
1266
1267 spin_lock_irq(lock);
1268 ret = add_inbuf(vq, buf);
1269 if (ret < 0) {
1270 spin_unlock_irq(lock);
1271 free_buf(buf);
1272 break;
1273 }
1274 nr_added_bufs++;
1275 spin_unlock_irq(lock);
1276 } while (ret > 0);
1277
1278 return nr_added_bufs;
1279}
1280
3eae0ade
AS
1281static void send_sigio_to_port(struct port *port)
1282{
1283 if (port->async_queue && port->guest_connected)
1284 kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1285}
1286
c446f8fc
AS
1287static int add_port(struct ports_device *portdev, u32 id)
1288{
1289 char debugfs_name[16];
1290 struct port *port;
1291 struct port_buffer *buf;
1292 dev_t devt;
1293 unsigned int nr_added_bufs;
1294 int err;
1295
1296 port = kmalloc(sizeof(*port), GFP_KERNEL);
1297 if (!port) {
1298 err = -ENOMEM;
1299 goto fail;
1300 }
b353a6b8 1301 kref_init(&port->kref);
c446f8fc
AS
1302
1303 port->portdev = portdev;
1304 port->id = id;
1305
1306 port->name = NULL;
1307 port->inbuf = NULL;
1308 port->cons.hvc = NULL;
3eae0ade 1309 port->async_queue = NULL;
c446f8fc 1310
9778829c
AS
1311 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1312
c446f8fc 1313 port->host_connected = port->guest_connected = false;
17e5b4f2 1314 port->stats = (struct port_stats) { 0 };
c446f8fc 1315
cdfadfc1
AS
1316 port->outvq_full = false;
1317
c446f8fc
AS
1318 port->in_vq = portdev->in_vqs[port->id];
1319 port->out_vq = portdev->out_vqs[port->id];
1320
d22a6989
AS
1321 port->cdev = cdev_alloc();
1322 if (!port->cdev) {
1323 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1324 err = -ENOMEM;
1325 goto free_port;
1326 }
1327 port->cdev->ops = &port_fops;
c446f8fc
AS
1328
1329 devt = MKDEV(portdev->chr_major, id);
d22a6989 1330 err = cdev_add(port->cdev, devt, 1);
c446f8fc
AS
1331 if (err < 0) {
1332 dev_err(&port->portdev->vdev->dev,
1333 "Error %d adding cdev for port %u\n", err, id);
d22a6989 1334 goto free_cdev;
c446f8fc
AS
1335 }
1336 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1337 devt, port, "vport%up%u",
1338 port->portdev->drv_index, id);
1339 if (IS_ERR(port->dev)) {
1340 err = PTR_ERR(port->dev);
1341 dev_err(&port->portdev->vdev->dev,
1342 "Error %d creating device for port %u\n",
1343 err, id);
1344 goto free_cdev;
1345 }
1346
1347 spin_lock_init(&port->inbuf_lock);
cdfadfc1 1348 spin_lock_init(&port->outvq_lock);
c446f8fc
AS
1349 init_waitqueue_head(&port->waitqueue);
1350
1351 /* Fill the in_vq with buffers so the host can send us data. */
1352 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1353 if (!nr_added_bufs) {
1354 dev_err(port->dev, "Error allocating inbufs\n");
1355 err = -ENOMEM;
1356 goto free_device;
1357 }
1358
1359 /*
1360 * If we're not using multiport support, this has to be a console port
1361 */
1362 if (!use_multiport(port->portdev)) {
1363 err = init_port_console(port);
1364 if (err)
1365 goto free_inbufs;
1366 }
1367
1368 spin_lock_irq(&portdev->ports_lock);
1369 list_add_tail(&port->list, &port->portdev->ports);
1370 spin_unlock_irq(&portdev->ports_lock);
1371
1372 /*
1373 * Tell the Host we're set so that it can send us various
1374 * configuration parameters for this port (eg, port name,
1375 * caching, whether this is a console port, etc.)
1376 */
1377 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1378
1379 if (pdrvdata.debugfs_dir) {
1380 /*
1381 * Finally, create the debugfs file that we can use to
1382 * inspect a port's state at any time
1383 */
1384 sprintf(debugfs_name, "vport%up%u",
1385 port->portdev->drv_index, id);
1386 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1387 pdrvdata.debugfs_dir,
1388 port,
1389 &port_debugfs_ops);
1390 }
1391 return 0;
1392
1393free_inbufs:
1394 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1395 free_buf(buf);
1396free_device:
1397 device_destroy(pdrvdata.class, port->dev->devt);
1398free_cdev:
d22a6989 1399 cdev_del(port->cdev);
c446f8fc
AS
1400free_port:
1401 kfree(port);
1402fail:
1403 /* The host might want to notify management sw about port add failure */
0643e4c6 1404 __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
c446f8fc
AS
1405 return err;
1406}
1407
b353a6b8
AS
1408/* No users remain, remove all port-specific data. */
1409static void remove_port(struct kref *kref)
1410{
1411 struct port *port;
1412
1413 port = container_of(kref, struct port, kref);
1414
1415 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1416 device_destroy(pdrvdata.class, port->dev->devt);
1417 cdev_del(port->cdev);
1418
1419 kfree(port->name);
1420
1421 debugfs_remove(port->debugfs_file);
1422
1423 kfree(port);
1424}
1425
a0e2dbfc
AS
1426static void remove_port_data(struct port *port)
1427{
1428 struct port_buffer *buf;
1429
1430 /* Remove unused data this port might have received. */
1431 discard_port_data(port);
1432
1433 reclaim_consumed_buffers(port);
1434
1435 /* Remove buffers we queued up for the Host to send us data in. */
1436 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1437 free_buf(buf);
1438}
1439
b353a6b8
AS
1440/*
1441 * Port got unplugged. Remove port from portdev's list and drop the
1442 * kref reference. If no userspace has this port opened, it will
1443 * result in immediate removal the port.
1444 */
1445static void unplug_port(struct port *port)
1f7aa42d 1446{
b353a6b8
AS
1447 spin_lock_irq(&port->portdev->ports_lock);
1448 list_del(&port->list);
1449 spin_unlock_irq(&port->portdev->ports_lock);
1450
0047634d
AS
1451 if (port->guest_connected) {
1452 port->guest_connected = false;
1453 port->host_connected = false;
1454 wake_up_interruptible(&port->waitqueue);
a461e11e
AS
1455
1456 /* Let the app know the port is going down. */
1457 send_sigio_to_port(port);
0047634d
AS
1458 }
1459
1f7aa42d
AS
1460 if (is_console_port(port)) {
1461 spin_lock_irq(&pdrvdata_lock);
1462 list_del(&port->cons.list);
1463 spin_unlock_irq(&pdrvdata_lock);
1464 hvc_remove(port->cons.hvc);
1465 }
1f7aa42d 1466
a0e2dbfc 1467 remove_port_data(port);
a9cdd485 1468
b353a6b8
AS
1469 /*
1470 * We should just assume the device itself has gone off --
1471 * else a close on an open port later will try to send out a
1472 * control message.
1473 */
1474 port->portdev = NULL;
d99393ef 1475
b353a6b8
AS
1476 /*
1477 * Locks around here are not necessary - a port can't be
1478 * opened after we removed the port struct from ports_list
1479 * above.
1480 */
1481 kref_put(&port->kref, remove_port);
1f7aa42d
AS
1482}
1483
17634ba2
AS
1484/* Any private messages that the Host and Guest want to share */
1485static void handle_control_message(struct ports_device *portdev,
1486 struct port_buffer *buf)
1487{
1488 struct virtio_console_control *cpkt;
1489 struct port *port;
431edb8a
AS
1490 size_t name_size;
1491 int err;
17634ba2
AS
1492
1493 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1494
1495 port = find_port_by_id(portdev, cpkt->id);
f909f850 1496 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
17634ba2
AS
1497 /* No valid header at start of buffer. Drop it. */
1498 dev_dbg(&portdev->vdev->dev,
1499 "Invalid index %u in control packet\n", cpkt->id);
1500 return;
1501 }
1502
1503 switch (cpkt->event) {
f909f850
AS
1504 case VIRTIO_CONSOLE_PORT_ADD:
1505 if (port) {
1d05160b
AS
1506 dev_dbg(&portdev->vdev->dev,
1507 "Port %u already added\n", port->id);
f909f850
AS
1508 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1509 break;
1510 }
1511 if (cpkt->id >= portdev->config.max_nr_ports) {
1512 dev_warn(&portdev->vdev->dev,
1513 "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
1514 cpkt->id, portdev->config.max_nr_ports - 1);
1515 break;
1516 }
1517 add_port(portdev, cpkt->id);
1518 break;
1519 case VIRTIO_CONSOLE_PORT_REMOVE:
b353a6b8 1520 unplug_port(port);
f909f850 1521 break;
17634ba2
AS
1522 case VIRTIO_CONSOLE_CONSOLE_PORT:
1523 if (!cpkt->value)
1524 break;
1525 if (is_console_port(port))
1526 break;
1527
1528 init_port_console(port);
5e38483b 1529 complete(&early_console_added);
17634ba2
AS
1530 /*
1531 * Could remove the port here in case init fails - but
1532 * have to notify the host first.
1533 */
1534 break;
8345adbf
AS
1535 case VIRTIO_CONSOLE_RESIZE: {
1536 struct {
1537 __u16 rows;
1538 __u16 cols;
1539 } size;
1540
17634ba2
AS
1541 if (!is_console_port(port))
1542 break;
8345adbf
AS
1543
1544 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1545 sizeof(size));
1546 set_console_size(port, size.rows, size.cols);
1547
17634ba2
AS
1548 port->cons.hvc->irq_requested = 1;
1549 resize_console(port);
1550 break;
8345adbf 1551 }
2030fa49
AS
1552 case VIRTIO_CONSOLE_PORT_OPEN:
1553 port->host_connected = cpkt->value;
1554 wake_up_interruptible(&port->waitqueue);
cdfadfc1
AS
1555 /*
1556 * If the host port got closed and the host had any
1557 * unconsumed buffers, we'll be able to reclaim them
1558 * now.
1559 */
1560 spin_lock_irq(&port->outvq_lock);
1561 reclaim_consumed_buffers(port);
1562 spin_unlock_irq(&port->outvq_lock);
3eae0ade
AS
1563
1564 /*
1565 * If the guest is connected, it'll be interested in
1566 * knowing the host connection state changed.
1567 */
1568 send_sigio_to_port(port);
2030fa49 1569 break;
431edb8a 1570 case VIRTIO_CONSOLE_PORT_NAME:
291024ef
AS
1571 /*
1572 * If we woke up after hibernation, we can get this
1573 * again. Skip it in that case.
1574 */
1575 if (port->name)
1576 break;
1577
431edb8a
AS
1578 /*
1579 * Skip the size of the header and the cpkt to get the size
1580 * of the name that was sent
1581 */
1582 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1583
1584 port->name = kmalloc(name_size, GFP_KERNEL);
1585 if (!port->name) {
1586 dev_err(port->dev,
1587 "Not enough space to store port name\n");
1588 break;
1589 }
1590 strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1591 name_size - 1);
1592 port->name[name_size - 1] = 0;
1593
1594 /*
1595 * Since we only have one sysfs attribute, 'name',
1596 * create it only if we have a name for the port.
1597 */
1598 err = sysfs_create_group(&port->dev->kobj,
1599 &port_attribute_group);
ec64213c 1600 if (err) {
431edb8a
AS
1601 dev_err(port->dev,
1602 "Error %d creating sysfs device attributes\n",
1603 err);
ec64213c
AS
1604 } else {
1605 /*
1606 * Generate a udev event so that appropriate
1607 * symlinks can be created based on udev
1608 * rules.
1609 */
1610 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1611 }
431edb8a 1612 break;
17634ba2
AS
1613 }
1614}
1615
1616static void control_work_handler(struct work_struct *work)
1617{
1618 struct ports_device *portdev;
1619 struct virtqueue *vq;
1620 struct port_buffer *buf;
1621 unsigned int len;
1622
1623 portdev = container_of(work, struct ports_device, control_work);
1624 vq = portdev->c_ivq;
1625
1626 spin_lock(&portdev->cvq_lock);
505b0451 1627 while ((buf = virtqueue_get_buf(vq, &len))) {
17634ba2
AS
1628 spin_unlock(&portdev->cvq_lock);
1629
1630 buf->len = len;
1631 buf->offset = 0;
1632
1633 handle_control_message(portdev, buf);
1634
1635 spin_lock(&portdev->cvq_lock);
1636 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1637 dev_warn(&portdev->vdev->dev,
1638 "Error adding buffer to queue\n");
1639 free_buf(buf);
1640 }
1641 }
1642 spin_unlock(&portdev->cvq_lock);
1643}
1644
2770c5ea
AS
1645static void out_intr(struct virtqueue *vq)
1646{
1647 struct port *port;
1648
1649 port = find_port_by_vq(vq->vdev->priv, vq);
1650 if (!port)
1651 return;
1652
1653 wake_up_interruptible(&port->waitqueue);
1654}
1655
17634ba2
AS
1656static void in_intr(struct virtqueue *vq)
1657{
1658 struct port *port;
1659 unsigned long flags;
1660
1661 port = find_port_by_vq(vq->vdev->priv, vq);
1662 if (!port)
1663 return;
1664
1665 spin_lock_irqsave(&port->inbuf_lock, flags);
d25a9dda 1666 port->inbuf = get_inbuf(port);
17634ba2 1667
88f251ac
AS
1668 /*
1669 * Don't queue up data when port is closed. This condition
1670 * can be reached when a console port is not yet connected (no
1671 * tty is spawned) and the host sends out data to console
1672 * ports. For generic serial ports, the host won't
1673 * (shouldn't) send data till the guest is connected.
1674 */
1675 if (!port->guest_connected)
1676 discard_port_data(port);
1677
17634ba2
AS
1678 spin_unlock_irqrestore(&port->inbuf_lock, flags);
1679
2030fa49
AS
1680 wake_up_interruptible(&port->waitqueue);
1681
55f6bcce
AS
1682 /* Send a SIGIO indicating new data in case the process asked for it */
1683 send_sigio_to_port(port);
1684
17634ba2
AS
1685 if (is_console_port(port) && hvc_poll(port->cons.hvc))
1686 hvc_kick();
1687}
1688
1689static void control_intr(struct virtqueue *vq)
1690{
1691 struct ports_device *portdev;
1692
1693 portdev = vq->vdev->priv;
1694 schedule_work(&portdev->control_work);
1695}
1696
7f5d810d
AS
1697static void config_intr(struct virtio_device *vdev)
1698{
1699 struct ports_device *portdev;
1700
1701 portdev = vdev->priv;
99f905f8 1702
4038f5b7 1703 if (!use_multiport(portdev)) {
9778829c
AS
1704 struct port *port;
1705 u16 rows, cols;
1706
1707 vdev->config->get(vdev,
1708 offsetof(struct virtio_console_config, cols),
1709 &cols, sizeof(u16));
1710 vdev->config->get(vdev,
1711 offsetof(struct virtio_console_config, rows),
1712 &rows, sizeof(u16));
1713
1714 port = find_port_by_id(portdev, 0);
1715 set_console_size(port, rows, cols);
1716
4038f5b7
AS
1717 /*
1718 * We'll use this way of resizing only for legacy
1719 * support. For newer userspace
1720 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1721 * to indicate console size changes so that it can be
1722 * done per-port.
1723 */
9778829c 1724 resize_console(port);
4038f5b7 1725 }
7f5d810d
AS
1726}
1727
2658a79a
AS
1728static int init_vqs(struct ports_device *portdev)
1729{
1730 vq_callback_t **io_callbacks;
1731 char **io_names;
1732 struct virtqueue **vqs;
17634ba2 1733 u32 i, j, nr_ports, nr_queues;
2658a79a
AS
1734 int err;
1735
17634ba2
AS
1736 nr_ports = portdev->config.max_nr_ports;
1737 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
2658a79a
AS
1738
1739 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
2658a79a 1740 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
2658a79a 1741 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
2658a79a
AS
1742 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1743 GFP_KERNEL);
2658a79a
AS
1744 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1745 GFP_KERNEL);
22e132ff 1746 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
286f9a22 1747 !portdev->out_vqs) {
2658a79a 1748 err = -ENOMEM;
22e132ff 1749 goto free;
2658a79a
AS
1750 }
1751
17634ba2
AS
1752 /*
1753 * For backward compat (newer host but older guest), the host
1754 * spawns a console port first and also inits the vqs for port
1755 * 0 before others.
1756 */
1757 j = 0;
1758 io_callbacks[j] = in_intr;
2770c5ea 1759 io_callbacks[j + 1] = out_intr;
17634ba2
AS
1760 io_names[j] = "input";
1761 io_names[j + 1] = "output";
1762 j += 2;
1763
1764 if (use_multiport(portdev)) {
1765 io_callbacks[j] = control_intr;
1766 io_callbacks[j + 1] = NULL;
1767 io_names[j] = "control-i";
1768 io_names[j + 1] = "control-o";
1769
1770 for (i = 1; i < nr_ports; i++) {
1771 j += 2;
1772 io_callbacks[j] = in_intr;
2770c5ea 1773 io_callbacks[j + 1] = out_intr;
17634ba2
AS
1774 io_names[j] = "input";
1775 io_names[j + 1] = "output";
1776 }
1777 }
2658a79a
AS
1778 /* Find the queues. */
1779 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
1780 io_callbacks,
1781 (const char **)io_names);
1782 if (err)
22e132ff 1783 goto free;
2658a79a 1784
17634ba2 1785 j = 0;
2658a79a
AS
1786 portdev->in_vqs[0] = vqs[0];
1787 portdev->out_vqs[0] = vqs[1];
17634ba2
AS
1788 j += 2;
1789 if (use_multiport(portdev)) {
1790 portdev->c_ivq = vqs[j];
1791 portdev->c_ovq = vqs[j + 1];
1792
1793 for (i = 1; i < nr_ports; i++) {
1794 j += 2;
1795 portdev->in_vqs[i] = vqs[j];
1796 portdev->out_vqs[i] = vqs[j + 1];
1797 }
1798 }
2658a79a 1799 kfree(io_names);
22e132ff 1800 kfree(io_callbacks);
2658a79a
AS
1801 kfree(vqs);
1802
1803 return 0;
1804
22e132ff 1805free:
2658a79a 1806 kfree(portdev->out_vqs);
2658a79a 1807 kfree(portdev->in_vqs);
22e132ff
JS
1808 kfree(io_names);
1809 kfree(io_callbacks);
2658a79a 1810 kfree(vqs);
22e132ff 1811
2658a79a
AS
1812 return err;
1813}
1814
fb08bd27
AS
1815static const struct file_operations portdev_fops = {
1816 .owner = THIS_MODULE,
1817};
1818
a0e2dbfc
AS
1819static void remove_vqs(struct ports_device *portdev)
1820{
1821 portdev->vdev->config->del_vqs(portdev->vdev);
1822 kfree(portdev->in_vqs);
1823 kfree(portdev->out_vqs);
1824}
1825
1826static void remove_controlq_data(struct ports_device *portdev)
1827{
1828 struct port_buffer *buf;
1829 unsigned int len;
1830
1831 if (!use_multiport(portdev))
1832 return;
1833
1834 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1835 free_buf(buf);
1836
1837 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1838 free_buf(buf);
1839}
1840
1c85bf35
AS
1841/*
1842 * Once we're further in boot, we get probed like any other virtio
1843 * device.
17634ba2
AS
1844 *
1845 * If the host also supports multiple console ports, we check the
1846 * config space to see how many ports the host has spawned. We
1847 * initialize each port found.
1c85bf35 1848 */
2223cbec 1849static int virtcons_probe(struct virtio_device *vdev)
1c85bf35 1850{
1c85bf35
AS
1851 struct ports_device *portdev;
1852 int err;
17634ba2 1853 bool multiport;
5e38483b
CB
1854 bool early = early_put_chars != NULL;
1855
1856 /* Ensure to read early_put_chars now */
1857 barrier();
1c85bf35
AS
1858
1859 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1860 if (!portdev) {
1861 err = -ENOMEM;
1862 goto fail;
1863 }
1864
1865 /* Attach this portdev to this virtio_device, and vice-versa. */
1866 portdev->vdev = vdev;
1867 vdev->priv = portdev;
1868
fb08bd27
AS
1869 spin_lock_irq(&pdrvdata_lock);
1870 portdev->drv_index = pdrvdata.index++;
1871 spin_unlock_irq(&pdrvdata_lock);
1872
1873 portdev->chr_major = register_chrdev(0, "virtio-portsdev",
1874 &portdev_fops);
1875 if (portdev->chr_major < 0) {
1876 dev_err(&vdev->dev,
1877 "Error %d registering chrdev for device %u\n",
1878 portdev->chr_major, portdev->drv_index);
1879 err = portdev->chr_major;
1880 goto free;
1881 }
1882
17634ba2 1883 multiport = false;
17634ba2 1884 portdev->config.max_nr_ports = 1;
51c6d61a
SL
1885 if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
1886 offsetof(struct virtio_console_config,
1887 max_nr_ports),
1888 &portdev->config.max_nr_ports) == 0)
17634ba2 1889 multiport = true;
17634ba2 1890
2658a79a
AS
1891 err = init_vqs(portdev);
1892 if (err < 0) {
1893 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
fb08bd27 1894 goto free_chrdev;
2658a79a 1895 }
1c85bf35 1896
17634ba2
AS
1897 spin_lock_init(&portdev->ports_lock);
1898 INIT_LIST_HEAD(&portdev->ports);
1899
1900 if (multiport) {
335a64a5
AS
1901 unsigned int nr_added_bufs;
1902
17634ba2
AS
1903 spin_lock_init(&portdev->cvq_lock);
1904 INIT_WORK(&portdev->control_work, &control_work_handler);
1905
335a64a5
AS
1906 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
1907 if (!nr_added_bufs) {
22a29eac
AS
1908 dev_err(&vdev->dev,
1909 "Error allocating buffers for control queue\n");
1910 err = -ENOMEM;
1911 goto free_vqs;
1912 }
1d05160b
AS
1913 } else {
1914 /*
1915 * For backward compatibility: Create a console port
1916 * if we're running on older host.
1917 */
1918 add_port(portdev, 0);
17634ba2
AS
1919 }
1920
6bdf2afd
AS
1921 spin_lock_irq(&pdrvdata_lock);
1922 list_add_tail(&portdev->list, &pdrvdata.portdevs);
1923 spin_unlock_irq(&pdrvdata_lock);
1924
f909f850
AS
1925 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1926 VIRTIO_CONSOLE_DEVICE_READY, 1);
5e38483b
CB
1927
1928 /*
1929 * If there was an early virtio console, assume that there are no
1930 * other consoles. We need to wait until the hvc_alloc matches the
1931 * hvc_instantiate, otherwise tty_open will complain, resulting in
1932 * a "Warning: unable to open an initial console" boot failure.
1933 * Without multiport this is done in add_port above. With multiport
1934 * this might take some host<->guest communication - thus we have to
1935 * wait.
1936 */
1937 if (multiport && early)
1938 wait_for_completion(&early_console_added);
1939
31610434
RR
1940 return 0;
1941
22a29eac 1942free_vqs:
0643e4c6
JL
1943 /* The host might want to notify mgmt sw about device add failure */
1944 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1945 VIRTIO_CONSOLE_DEVICE_READY, 0);
a0e2dbfc 1946 remove_vqs(portdev);
fb08bd27
AS
1947free_chrdev:
1948 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
31610434 1949free:
1c85bf35 1950 kfree(portdev);
31610434
RR
1951fail:
1952 return err;
1953}
1954
7177876f
AS
1955static void virtcons_remove(struct virtio_device *vdev)
1956{
1957 struct ports_device *portdev;
1958 struct port *port, *port2;
7177876f
AS
1959
1960 portdev = vdev->priv;
1961
6bdf2afd
AS
1962 spin_lock_irq(&pdrvdata_lock);
1963 list_del(&portdev->list);
1964 spin_unlock_irq(&pdrvdata_lock);
1965
02238959
AS
1966 /* Disable interrupts for vqs */
1967 vdev->config->reset(vdev);
1968 /* Finish up work that's lined up */
7177876f 1969 cancel_work_sync(&portdev->control_work);
7177876f
AS
1970
1971 list_for_each_entry_safe(port, port2, &portdev->ports, list)
b353a6b8 1972 unplug_port(port);
7177876f
AS
1973
1974 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1975
e062013c
AS
1976 /*
1977 * When yanking out a device, we immediately lose the
1978 * (device-side) queues. So there's no point in keeping the
1979 * guest side around till we drop our final reference. This
1980 * also means that any ports which are in an open state will
1981 * have to just stop using the port, as the vqs are going
1982 * away.
1983 */
a0e2dbfc
AS
1984 remove_controlq_data(portdev);
1985 remove_vqs(portdev);
7177876f
AS
1986 kfree(portdev);
1987}
1988
31610434
RR
1989static struct virtio_device_id id_table[] = {
1990 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
1991 { 0 },
1992};
1993
c2983458
CB
1994static unsigned int features[] = {
1995 VIRTIO_CONSOLE_F_SIZE,
b99fa815 1996 VIRTIO_CONSOLE_F_MULTIPORT,
c2983458
CB
1997};
1998
2b8f41d8
AS
1999#ifdef CONFIG_PM
2000static int virtcons_freeze(struct virtio_device *vdev)
2001{
2002 struct ports_device *portdev;
2003 struct port *port;
2004
2005 portdev = vdev->priv;
2006
2007 vdev->config->reset(vdev);
2008
c743d09d 2009 virtqueue_disable_cb(portdev->c_ivq);
2b8f41d8 2010 cancel_work_sync(&portdev->control_work);
c743d09d
AS
2011 /*
2012 * Once more: if control_work_handler() was running, it would
2013 * enable the cb as the last step.
2014 */
2015 virtqueue_disable_cb(portdev->c_ivq);
2b8f41d8
AS
2016 remove_controlq_data(portdev);
2017
2018 list_for_each_entry(port, &portdev->ports, list) {
c743d09d
AS
2019 virtqueue_disable_cb(port->in_vq);
2020 virtqueue_disable_cb(port->out_vq);
2b8f41d8
AS
2021 /*
2022 * We'll ask the host later if the new invocation has
2023 * the port opened or closed.
2024 */
2025 port->host_connected = false;
2026 remove_port_data(port);
2027 }
2028 remove_vqs(portdev);
2029
2030 return 0;
2031}
2032
2033static int virtcons_restore(struct virtio_device *vdev)
2034{
2035 struct ports_device *portdev;
2036 struct port *port;
2037 int ret;
2038
2039 portdev = vdev->priv;
2040
2041 ret = init_vqs(portdev);
2042 if (ret)
2043 return ret;
2044
2045 if (use_multiport(portdev))
2046 fill_queue(portdev->c_ivq, &portdev->cvq_lock);
2047
2048 list_for_each_entry(port, &portdev->ports, list) {
2049 port->in_vq = portdev->in_vqs[port->id];
2050 port->out_vq = portdev->out_vqs[port->id];
2051
2052 fill_queue(port->in_vq, &port->inbuf_lock);
2053
2054 /* Get port open/close status on the host */
2055 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
fa8b66cc
AS
2056
2057 /*
2058 * If a port was open at the time of suspending, we
2059 * have to let the host know that it's still open.
2060 */
2061 if (port->guest_connected)
2062 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
2b8f41d8
AS
2063 }
2064 return 0;
2065}
2066#endif
2067
31610434 2068static struct virtio_driver virtio_console = {
c2983458
CB
2069 .feature_table = features,
2070 .feature_table_size = ARRAY_SIZE(features),
31610434
RR
2071 .driver.name = KBUILD_MODNAME,
2072 .driver.owner = THIS_MODULE,
2073 .id_table = id_table,
2074 .probe = virtcons_probe,
7177876f 2075 .remove = virtcons_remove,
7f5d810d 2076 .config_changed = config_intr,
2b8f41d8
AS
2077#ifdef CONFIG_PM
2078 .freeze = virtcons_freeze,
2079 .restore = virtcons_restore,
2080#endif
31610434
RR
2081};
2082
2083static int __init init(void)
2084{
fb08bd27
AS
2085 int err;
2086
2087 pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
2088 if (IS_ERR(pdrvdata.class)) {
2089 err = PTR_ERR(pdrvdata.class);
2090 pr_err("Error %d creating virtio-ports class\n", err);
2091 return err;
2092 }
d99393ef
AS
2093
2094 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
2095 if (!pdrvdata.debugfs_dir) {
2096 pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
2097 PTR_ERR(pdrvdata.debugfs_dir));
2098 }
38edf58d 2099 INIT_LIST_HEAD(&pdrvdata.consoles);
6bdf2afd 2100 INIT_LIST_HEAD(&pdrvdata.portdevs);
38edf58d 2101
33e1afc3
AK
2102 err = register_virtio_driver(&virtio_console);
2103 if (err < 0) {
2104 pr_err("Error %d registering virtio driver\n", err);
2105 goto free;
2106 }
2107 return 0;
2108free:
2109 if (pdrvdata.debugfs_dir)
2110 debugfs_remove_recursive(pdrvdata.debugfs_dir);
2111 class_destroy(pdrvdata.class);
2112 return err;
31610434 2113}
7177876f
AS
2114
2115static void __exit fini(void)
2116{
2117 unregister_virtio_driver(&virtio_console);
2118
2119 class_destroy(pdrvdata.class);
2120 if (pdrvdata.debugfs_dir)
2121 debugfs_remove_recursive(pdrvdata.debugfs_dir);
2122}
31610434 2123module_init(init);
7177876f 2124module_exit(fini);
31610434
RR
2125
2126MODULE_DEVICE_TABLE(virtio, id_table);
2127MODULE_DESCRIPTION("Virtio console driver");
2128MODULE_LICENSE("GPL");