e310ca6ed1a34b2f78e8e02de45cb9cf222614ac
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
22 #include <linux/mei.h>
29 * mei_me_cl_by_uuid - locate index of me client
32 * returns me client index or -ENOENT if not found
34 int mei_me_cl_by_uuid(const struct mei_device
*dev
, const uuid_le
*uuid
)
38 for (i
= 0; i
< dev
->me_clients_num
; ++i
)
39 if (uuid_le_cmp(*uuid
,
40 dev
->me_clients
[i
].props
.protocol_name
) == 0) {
50 * mei_me_cl_by_id return index to me_clients for client_id
52 * @dev: the device structure
53 * @client_id: me client id
55 * Locking: called under "dev->device_lock" lock
57 * returns index on success, -ENOENT on failure.
60 int mei_me_cl_by_id(struct mei_device
*dev
, u8 client_id
)
63 for (i
= 0; i
< dev
->me_clients_num
; i
++)
64 if (dev
->me_clients
[i
].client_id
== client_id
)
66 if (WARN_ON(dev
->me_clients
[i
].client_id
!= client_id
))
69 if (i
== dev
->me_clients_num
)
77 * mei_io_list_flush - removes list entry belonging to cl.
79 * @list: An instance of our list structure
82 void mei_io_list_flush(struct mei_cl_cb
*list
, struct mei_cl
*cl
)
85 struct mei_cl_cb
*next
;
87 list_for_each_entry_safe(cb
, next
, &list
->list
, list
) {
88 if (cb
->cl
&& mei_cl_cmp_id(cl
, cb
->cl
))
94 * mei_io_cb_free - free mei_cb_private related memory
96 * @cb: mei callback struct
98 void mei_io_cb_free(struct mei_cl_cb
*cb
)
103 kfree(cb
->request_buffer
.data
);
104 kfree(cb
->response_buffer
.data
);
109 * mei_io_cb_init - allocate and initialize io callback
112 * @fp: pointer to file structure
114 * returns mei_cl_cb pointer or NULL;
116 struct mei_cl_cb
*mei_io_cb_init(struct mei_cl
*cl
, struct file
*fp
)
118 struct mei_cl_cb
*cb
;
120 cb
= kzalloc(sizeof(struct mei_cl_cb
), GFP_KERNEL
);
124 mei_io_list_init(cb
);
126 cb
->file_object
= fp
;
133 * mei_io_cb_alloc_req_buf - allocate request buffer
135 * @cb: io callback structure
136 * @length: size of the buffer
138 * returns 0 on success
139 * -EINVAL if cb is NULL
140 * -ENOMEM if allocation failed
142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb
*cb
, size_t length
)
150 cb
->request_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
151 if (!cb
->request_buffer
.data
)
153 cb
->request_buffer
.size
= length
;
157 * mei_io_cb_alloc_resp_buf - allocate respose buffer
159 * @cb: io callback structure
160 * @length: size of the buffer
162 * returns 0 on success
163 * -EINVAL if cb is NULL
164 * -ENOMEM if allocation failed
166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb
*cb
, size_t length
)
174 cb
->response_buffer
.data
= kmalloc(length
, GFP_KERNEL
);
175 if (!cb
->response_buffer
.data
)
177 cb
->response_buffer
.size
= length
;
184 * mei_cl_flush_queues - flushes queue lists belonging to cl.
188 int mei_cl_flush_queues(struct mei_cl
*cl
)
190 if (WARN_ON(!cl
|| !cl
->dev
))
193 dev_dbg(&cl
->dev
->pdev
->dev
, "remove list entry belonging to cl\n");
194 mei_io_list_flush(&cl
->dev
->read_list
, cl
);
195 mei_io_list_flush(&cl
->dev
->write_list
, cl
);
196 mei_io_list_flush(&cl
->dev
->write_waiting_list
, cl
);
197 mei_io_list_flush(&cl
->dev
->ctrl_wr_list
, cl
);
198 mei_io_list_flush(&cl
->dev
->ctrl_rd_list
, cl
);
199 mei_io_list_flush(&cl
->dev
->amthif_cmd_list
, cl
);
200 mei_io_list_flush(&cl
->dev
->amthif_rd_complete_list
, cl
);
206 * mei_cl_init - initializes intialize cl.
208 * @cl: host client to be initialized
211 void mei_cl_init(struct mei_cl
*cl
, struct mei_device
*dev
)
213 memset(cl
, 0, sizeof(struct mei_cl
));
214 init_waitqueue_head(&cl
->wait
);
215 init_waitqueue_head(&cl
->rx_wait
);
216 init_waitqueue_head(&cl
->tx_wait
);
217 INIT_LIST_HEAD(&cl
->link
);
218 INIT_LIST_HEAD(&cl
->device_link
);
219 cl
->reading_state
= MEI_IDLE
;
220 cl
->writing_state
= MEI_IDLE
;
225 * mei_cl_allocate - allocates cl structure and sets it up.
228 * returns The allocated file or NULL on failure
230 struct mei_cl
*mei_cl_allocate(struct mei_device
*dev
)
234 cl
= kmalloc(sizeof(struct mei_cl
), GFP_KERNEL
);
238 mei_cl_init(cl
, dev
);
244 * mei_cl_find_read_cb - find this cl's callback in the read list
248 * returns cb on success, NULL on error
250 struct mei_cl_cb
*mei_cl_find_read_cb(struct mei_cl
*cl
)
252 struct mei_device
*dev
= cl
->dev
;
253 struct mei_cl_cb
*cb
= NULL
;
254 struct mei_cl_cb
*next
= NULL
;
256 list_for_each_entry_safe(cb
, next
, &dev
->read_list
.list
, list
)
257 if (mei_cl_cmp_id(cl
, cb
->cl
))
262 /** mei_cl_link: allocte host id in the host map
265 * @id - fixed host id or -1 for genereting one
267 * returns 0 on success
268 * -EINVAL on incorrect values
269 * -ENONET if client not found
271 int mei_cl_link(struct mei_cl
*cl
, int id
)
273 struct mei_device
*dev
;
275 if (WARN_ON(!cl
|| !cl
->dev
))
280 /* If Id is not asigned get one*/
281 if (id
== MEI_HOST_CLIENT_ID_ANY
)
282 id
= find_first_zero_bit(dev
->host_clients_map
,
285 if (id
>= MEI_CLIENTS_MAX
) {
286 dev_err(&dev
->pdev
->dev
, "id exceded %d", MEI_CLIENTS_MAX
) ;
290 dev
->open_handle_count
++;
292 cl
->host_client_id
= id
;
293 list_add_tail(&cl
->link
, &dev
->file_list
);
295 set_bit(id
, dev
->host_clients_map
);
297 cl
->state
= MEI_FILE_INITIALIZING
;
299 dev_dbg(&dev
->pdev
->dev
, "link cl host id = %d\n", cl
->host_client_id
);
304 * mei_cl_unlink - remove me_cl from the list
308 int mei_cl_unlink(struct mei_cl
*cl
)
310 struct mei_device
*dev
;
311 struct mei_cl
*pos
, *next
;
313 /* don't shout on error exit path */
317 /* wd and amthif might not be initialized */
323 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
324 if (cl
->host_client_id
== pos
->host_client_id
) {
325 dev_dbg(&dev
->pdev
->dev
, "remove host client = %d, ME client = %d\n",
326 pos
->host_client_id
, pos
->me_client_id
);
327 list_del_init(&pos
->link
);
335 void mei_host_client_init(struct work_struct
*work
)
337 struct mei_device
*dev
= container_of(work
,
338 struct mei_device
, init_work
);
339 struct mei_client_properties
*client_props
;
342 mutex_lock(&dev
->device_lock
);
344 bitmap_zero(dev
->host_clients_map
, MEI_CLIENTS_MAX
);
345 dev
->open_handle_count
= 0;
348 * Reserving the first three client IDs
349 * 0: Reserved for MEI Bus Message communications
350 * 1: Reserved for Watchdog
351 * 2: Reserved for AMTHI
353 bitmap_set(dev
->host_clients_map
, 0, 3);
355 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
356 client_props
= &dev
->me_clients
[i
].props
;
358 if (!uuid_le_cmp(client_props
->protocol_name
, mei_amthif_guid
))
359 mei_amthif_host_init(dev
);
360 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_wd_guid
))
361 mei_wd_host_init(dev
);
362 else if (!uuid_le_cmp(client_props
->protocol_name
, mei_nfc_guid
))
363 mei_nfc_host_init(dev
);
367 dev
->dev_state
= MEI_DEV_ENABLED
;
369 mutex_unlock(&dev
->device_lock
);
374 * mei_cl_disconnect - disconnect host clinet form the me one
378 * Locking: called under "dev->device_lock" lock
380 * returns 0 on success, <0 on failure.
382 int mei_cl_disconnect(struct mei_cl
*cl
)
384 struct mei_device
*dev
;
385 struct mei_cl_cb
*cb
;
388 if (WARN_ON(!cl
|| !cl
->dev
))
393 if (cl
->state
!= MEI_FILE_DISCONNECTING
)
396 cb
= mei_io_cb_init(cl
, NULL
);
400 cb
->fop_type
= MEI_FOP_CLOSE
;
401 if (dev
->hbuf_is_ready
) {
402 dev
->hbuf_is_ready
= false;
403 if (mei_hbm_cl_disconnect_req(dev
, cl
)) {
405 dev_err(&dev
->pdev
->dev
, "failed to disconnect.\n");
408 mdelay(10); /* Wait for hardware disconnection ready */
409 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
411 dev_dbg(&dev
->pdev
->dev
, "add disconnect cb to control write list\n");
412 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
415 mutex_unlock(&dev
->device_lock
);
417 err
= wait_event_timeout(dev
->wait_recvd_msg
,
418 MEI_FILE_DISCONNECTED
== cl
->state
,
419 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
));
421 mutex_lock(&dev
->device_lock
);
422 if (MEI_FILE_DISCONNECTED
== cl
->state
) {
424 dev_dbg(&dev
->pdev
->dev
, "successfully disconnected from FW client.\n");
427 if (MEI_FILE_DISCONNECTED
!= cl
->state
)
428 dev_dbg(&dev
->pdev
->dev
, "wrong status client disconnect.\n");
431 dev_dbg(&dev
->pdev
->dev
,
432 "wait failed disconnect err=%08x\n",
435 dev_dbg(&dev
->pdev
->dev
, "failed to disconnect from FW client.\n");
438 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
439 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
447 * mei_cl_is_other_connecting - checks if other
448 * client with the same me client id is connecting
450 * @cl: private data of the file object
452 * returns ture if other client is connected, 0 - otherwise.
454 bool mei_cl_is_other_connecting(struct mei_cl
*cl
)
456 struct mei_device
*dev
;
460 if (WARN_ON(!cl
|| !cl
->dev
))
465 list_for_each_entry_safe(pos
, next
, &dev
->file_list
, link
) {
466 if ((pos
->state
== MEI_FILE_CONNECTING
) &&
467 (pos
!= cl
) && cl
->me_client_id
== pos
->me_client_id
)
476 * mei_cl_connect - connect host clinet to the me one
480 * Locking: called under "dev->device_lock" lock
482 * returns 0 on success, <0 on failure.
484 int mei_cl_connect(struct mei_cl
*cl
, struct file
*file
)
486 struct mei_device
*dev
;
487 struct mei_cl_cb
*cb
;
488 long timeout
= mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT
);
491 if (WARN_ON(!cl
|| !cl
->dev
))
496 cb
= mei_io_cb_init(cl
, file
);
502 cb
->fop_type
= MEI_FOP_IOCTL
;
504 if (dev
->hbuf_is_ready
&& !mei_cl_is_other_connecting(cl
)) {
505 dev
->hbuf_is_ready
= false;
507 if (mei_hbm_cl_connect_req(dev
, cl
)) {
511 cl
->timer_count
= MEI_CONNECT_TIMEOUT
;
512 list_add_tail(&cb
->list
, &dev
->ctrl_rd_list
.list
);
514 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
517 mutex_unlock(&dev
->device_lock
);
518 rets
= wait_event_timeout(dev
->wait_recvd_msg
,
519 (cl
->state
== MEI_FILE_CONNECTED
||
520 cl
->state
== MEI_FILE_DISCONNECTED
),
522 mutex_lock(&dev
->device_lock
);
524 if (cl
->state
!= MEI_FILE_CONNECTED
) {
527 mei_io_list_flush(&dev
->ctrl_rd_list
, cl
);
528 mei_io_list_flush(&dev
->ctrl_wr_list
, cl
);
540 * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
542 * @cl: private data of the file object
544 * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
545 * -ENOENT if mei_cl is not present
546 * -EINVAL if single_recv_buf == 0
548 int mei_cl_flow_ctrl_creds(struct mei_cl
*cl
)
550 struct mei_device
*dev
;
553 if (WARN_ON(!cl
|| !cl
->dev
))
558 if (!dev
->me_clients_num
)
561 if (cl
->mei_flow_ctrl_creds
> 0)
564 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
565 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
566 if (me_cl
->client_id
== cl
->me_client_id
) {
567 if (me_cl
->mei_flow_ctrl_creds
) {
568 if (WARN_ON(me_cl
->props
.single_recv_buf
== 0))
580 * mei_cl_flow_ctrl_reduce - reduces flow_control.
582 * @cl: private data of the file object
586 * -ENOENT when me client is not found
587 * -EINVAL when ctrl credits are <= 0
589 int mei_cl_flow_ctrl_reduce(struct mei_cl
*cl
)
591 struct mei_device
*dev
;
594 if (WARN_ON(!cl
|| !cl
->dev
))
599 if (!dev
->me_clients_num
)
602 for (i
= 0; i
< dev
->me_clients_num
; i
++) {
603 struct mei_me_client
*me_cl
= &dev
->me_clients
[i
];
604 if (me_cl
->client_id
== cl
->me_client_id
) {
605 if (me_cl
->props
.single_recv_buf
!= 0) {
606 if (WARN_ON(me_cl
->mei_flow_ctrl_creds
<= 0))
608 dev
->me_clients
[i
].mei_flow_ctrl_creds
--;
610 if (WARN_ON(cl
->mei_flow_ctrl_creds
<= 0))
612 cl
->mei_flow_ctrl_creds
--;
621 * mei_cl_read_start - the start read client message function.
625 * returns 0 on success, <0 on failure.
627 int mei_cl_read_start(struct mei_cl
*cl
, size_t length
)
629 struct mei_device
*dev
;
630 struct mei_cl_cb
*cb
;
634 if (WARN_ON(!cl
|| !cl
->dev
))
639 if (cl
->state
!= MEI_FILE_CONNECTED
)
642 if (dev
->dev_state
!= MEI_DEV_ENABLED
)
646 dev_dbg(&dev
->pdev
->dev
, "read is pending.\n");
649 i
= mei_me_cl_by_id(dev
, cl
->me_client_id
);
651 dev_err(&dev
->pdev
->dev
, "no such me client %d\n",
656 cb
= mei_io_cb_init(cl
, NULL
);
660 /* always allocate at least client max message */
661 length
= max_t(size_t, length
, dev
->me_clients
[i
].props
.max_msg_length
);
662 rets
= mei_io_cb_alloc_resp_buf(cb
, length
);
666 cb
->fop_type
= MEI_FOP_READ
;
668 if (dev
->hbuf_is_ready
) {
669 dev
->hbuf_is_ready
= false;
670 if (mei_hbm_cl_flow_control_req(dev
, cl
)) {
674 list_add_tail(&cb
->list
, &dev
->read_list
.list
);
676 list_add_tail(&cb
->list
, &dev
->ctrl_wr_list
.list
);
685 * mei_cl_write - submit a write cb to mei device
686 assumes device_lock is locked
689 * @cl: write callback with filled data
691 * returns numbe of bytes sent on success, <0 on failure.
693 int mei_cl_write(struct mei_cl
*cl
, struct mei_cl_cb
*cb
, bool blocking
)
695 struct mei_device
*dev
;
696 struct mei_msg_data
*buf
;
697 struct mei_msg_hdr mei_hdr
;
701 if (WARN_ON(!cl
|| !cl
->dev
))
710 buf
= &cb
->request_buffer
;
712 dev_dbg(&dev
->pdev
->dev
, "mei_cl_write %d\n", buf
->size
);
715 cb
->fop_type
= MEI_FOP_WRITE
;
717 rets
= mei_cl_flow_ctrl_creds(cl
);
721 /* Host buffer is not ready, we queue the request */
722 if (rets
== 0 || !dev
->hbuf_is_ready
) {
724 /* unseting complete will enqueue the cb for write */
725 mei_hdr
.msg_complete
= 0;
726 cl
->writing_state
= MEI_WRITING
;
731 dev
->hbuf_is_ready
= false;
733 /* Check for a maximum length */
734 if (buf
->size
> mei_hbuf_max_len(dev
)) {
735 mei_hdr
.length
= mei_hbuf_max_len(dev
);
736 mei_hdr
.msg_complete
= 0;
738 mei_hdr
.length
= buf
->size
;
739 mei_hdr
.msg_complete
= 1;
742 mei_hdr
.host_addr
= cl
->host_client_id
;
743 mei_hdr
.me_addr
= cl
->me_client_id
;
744 mei_hdr
.reserved
= 0;
746 dev_dbg(&dev
->pdev
->dev
, "write " MEI_HDR_FMT
"\n",
747 MEI_HDR_PRM(&mei_hdr
));
750 if (mei_write_message(dev
, &mei_hdr
, buf
->data
)) {
755 cl
->writing_state
= MEI_WRITING
;
756 cb
->buf_idx
= mei_hdr
.length
;
760 if (mei_hdr
.msg_complete
) {
761 if (mei_cl_flow_ctrl_reduce(cl
)) {
765 list_add_tail(&cb
->list
, &dev
->write_waiting_list
.list
);
767 list_add_tail(&cb
->list
, &dev
->write_list
.list
);
771 if (blocking
&& cl
->writing_state
!= MEI_WRITE_COMPLETE
) {
773 mutex_unlock(&dev
->device_lock
);
774 if (wait_event_interruptible(cl
->tx_wait
,
775 cl
->writing_state
== MEI_WRITE_COMPLETE
)) {
776 if (signal_pending(current
))
781 mutex_lock(&dev
->device_lock
);
790 * mei_cl_all_disconnect - disconnect forcefully all connected clients
795 void mei_cl_all_disconnect(struct mei_device
*dev
)
797 struct mei_cl
*cl
, *next
;
799 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
800 cl
->state
= MEI_FILE_DISCONNECTED
;
801 cl
->mei_flow_ctrl_creds
= 0;
809 * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
813 void mei_cl_all_read_wakeup(struct mei_device
*dev
)
815 struct mei_cl
*cl
, *next
;
816 list_for_each_entry_safe(cl
, next
, &dev
->file_list
, link
) {
817 if (waitqueue_active(&cl
->rx_wait
)) {
818 dev_dbg(&dev
->pdev
->dev
, "Waking up client!\n");
819 wake_up_interruptible(&cl
->rx_wait
);
825 * mei_cl_all_write_clear - clear all pending writes
829 void mei_cl_all_write_clear(struct mei_device
*dev
)
831 struct mei_cl_cb
*cb
, *next
;
833 list_for_each_entry_safe(cb
, next
, &dev
->write_list
.list
, list
) {