Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / staging / greybus / control.c
1 /*
2 * Greybus CPort control protocol.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include "greybus.h"
14
15 /* Highest control-protocol version supported */
16 #define GB_CONTROL_VERSION_MAJOR 0
17 #define GB_CONTROL_VERSION_MINOR 1
18
19
20 static int gb_control_get_version(struct gb_control *control)
21 {
22 struct gb_interface *intf = control->connection->intf;
23 struct gb_control_version_request request;
24 struct gb_control_version_response response;
25 int ret;
26
27 request.major = GB_CONTROL_VERSION_MAJOR;
28 request.minor = GB_CONTROL_VERSION_MINOR;
29
30 ret = gb_operation_sync(control->connection,
31 GB_CONTROL_TYPE_VERSION,
32 &request, sizeof(request), &response,
33 sizeof(response));
34 if (ret) {
35 dev_err(&intf->dev,
36 "failed to get control-protocol version: %d\n",
37 ret);
38 return ret;
39 }
40
41 if (response.major > request.major) {
42 dev_err(&intf->dev,
43 "unsupported major control-protocol version (%u > %u)\n",
44 response.major, request.major);
45 return -ENOTSUPP;
46 }
47
48 control->protocol_major = response.major;
49 control->protocol_minor = response.minor;
50
51 dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
52 response.minor);
53
54 return 0;
55 }
56
57 static int gb_control_get_bundle_version(struct gb_control *control,
58 struct gb_bundle *bundle)
59 {
60 struct gb_interface *intf = control->connection->intf;
61 struct gb_control_bundle_version_request request;
62 struct gb_control_bundle_version_response response;
63 int ret;
64
65 request.bundle_id = bundle->id;
66
67 ret = gb_operation_sync(control->connection,
68 GB_CONTROL_TYPE_BUNDLE_VERSION,
69 &request, sizeof(request),
70 &response, sizeof(response));
71 if (ret) {
72 dev_err(&intf->dev,
73 "failed to get bundle %u class version: %d\n",
74 bundle->id, ret);
75 return ret;
76 }
77
78 bundle->class_major = response.major;
79 bundle->class_minor = response.minor;
80
81 dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
82 response.major, response.minor);
83
84 return 0;
85 }
86
87 int gb_control_get_bundle_versions(struct gb_control *control)
88 {
89 struct gb_interface *intf = control->connection->intf;
90 struct gb_bundle *bundle;
91 int ret;
92
93 if (!control->has_bundle_version)
94 return 0;
95
96 list_for_each_entry(bundle, &intf->bundles, links) {
97 ret = gb_control_get_bundle_version(control, bundle);
98 if (ret)
99 return ret;
100 }
101
102 return 0;
103 }
104
105 /* Get Manifest's size from the interface */
106 int gb_control_get_manifest_size_operation(struct gb_interface *intf)
107 {
108 struct gb_control_get_manifest_size_response response;
109 struct gb_connection *connection = intf->control->connection;
110 int ret;
111
112 ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
113 NULL, 0, &response, sizeof(response));
114 if (ret) {
115 dev_err(&connection->intf->dev,
116 "failed to get manifest size: %d\n", ret);
117 return ret;
118 }
119
120 return le16_to_cpu(response.size);
121 }
122
123 /* Reads Manifest from the interface */
124 int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
125 size_t size)
126 {
127 struct gb_connection *connection = intf->control->connection;
128
129 return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
130 NULL, 0, manifest, size);
131 }
132
133 int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
134 {
135 struct gb_control_connected_request request;
136
137 request.cport_id = cpu_to_le16(cport_id);
138 return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
139 &request, sizeof(request), NULL, 0);
140 }
141
142 int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
143 {
144 struct gb_control_disconnected_request request;
145
146 request.cport_id = cpu_to_le16(cport_id);
147 return gb_operation_sync(control->connection,
148 GB_CONTROL_TYPE_DISCONNECTED, &request,
149 sizeof(request), NULL, 0);
150 }
151
152 int gb_control_disconnecting_operation(struct gb_control *control,
153 u16 cport_id)
154 {
155 struct gb_control_disconnecting_request *request;
156 struct gb_operation *operation;
157 int ret;
158
159 operation = gb_operation_create_core(control->connection,
160 GB_CONTROL_TYPE_DISCONNECTING,
161 sizeof(*request), 0, 0,
162 GFP_KERNEL);
163 if (!operation)
164 return -ENOMEM;
165
166 request = operation->request->payload;
167 request->cport_id = cpu_to_le16(cport_id);
168
169 ret = gb_operation_request_send_sync(operation);
170 if (ret) {
171 dev_err(&control->dev, "failed to send disconnecting: %d\n",
172 ret);
173 }
174
175 gb_operation_put(operation);
176
177 return ret;
178 }
179
180 int gb_control_mode_switch_operation(struct gb_control *control)
181 {
182 struct gb_operation *operation;
183 int ret;
184
185 operation = gb_operation_create_core(control->connection,
186 GB_CONTROL_TYPE_MODE_SWITCH,
187 0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
188 GFP_KERNEL);
189 if (!operation)
190 return -ENOMEM;
191
192 ret = gb_operation_request_send_sync(operation);
193 if (ret)
194 dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
195
196 gb_operation_put(operation);
197
198 return ret;
199 }
200
201 static int gb_control_bundle_pm_status_map(u8 status)
202 {
203 switch (status) {
204 case GB_CONTROL_BUNDLE_PM_INVAL:
205 return -EINVAL;
206 case GB_CONTROL_BUNDLE_PM_BUSY:
207 return -EBUSY;
208 case GB_CONTROL_BUNDLE_PM_NA:
209 return -ENOMSG;
210 case GB_CONTROL_BUNDLE_PM_FAIL:
211 default:
212 return -EREMOTEIO;
213 }
214 }
215
216 int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
217 {
218 struct gb_control_bundle_pm_request request;
219 struct gb_control_bundle_pm_response response;
220 int ret;
221
222 request.bundle_id = bundle_id;
223 ret = gb_operation_sync(control->connection,
224 GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
225 sizeof(request), &response, sizeof(response));
226 if (ret) {
227 dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
228 bundle_id, ret);
229 return ret;
230 }
231
232 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
233 dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
234 bundle_id, response.status);
235 return gb_control_bundle_pm_status_map(response.status);
236 }
237
238 return 0;
239 }
240
241 int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
242 {
243 struct gb_control_bundle_pm_request request;
244 struct gb_control_bundle_pm_response response;
245 int ret;
246
247 request.bundle_id = bundle_id;
248 ret = gb_operation_sync(control->connection,
249 GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
250 sizeof(request), &response, sizeof(response));
251 if (ret) {
252 dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
253 bundle_id, ret);
254 return ret;
255 }
256
257 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
258 dev_err(&control->dev, "failed to resume bundle %u: %d\n",
259 bundle_id, response.status);
260 return gb_control_bundle_pm_status_map(response.status);
261 }
262
263 return 0;
264 }
265
266 int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
267 {
268 struct gb_control_bundle_pm_request request;
269 struct gb_control_bundle_pm_response response;
270 int ret;
271
272 request.bundle_id = bundle_id;
273 ret = gb_operation_sync(control->connection,
274 GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
275 sizeof(request), &response, sizeof(response));
276 if (ret) {
277 dev_err(&control->dev,
278 "failed to send bundle %u deactivate: %d\n", bundle_id,
279 ret);
280 return ret;
281 }
282
283 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
284 dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
285 bundle_id, response.status);
286 return gb_control_bundle_pm_status_map(response.status);
287 }
288
289 return 0;
290 }
291
292 int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
293 {
294 struct gb_control_bundle_pm_request request;
295 struct gb_control_bundle_pm_response response;
296 int ret;
297
298 if (!control->has_bundle_activate)
299 return 0;
300
301 request.bundle_id = bundle_id;
302 ret = gb_operation_sync(control->connection,
303 GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
304 sizeof(request), &response, sizeof(response));
305 if (ret) {
306 dev_err(&control->dev,
307 "failed to send bundle %u activate: %d\n", bundle_id,
308 ret);
309 return ret;
310 }
311
312 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
313 dev_err(&control->dev, "failed to activate bundle %u: %d\n",
314 bundle_id, response.status);
315 return gb_control_bundle_pm_status_map(response.status);
316 }
317
318 return 0;
319 }
320
321 static int gb_control_interface_pm_status_map(u8 status)
322 {
323 switch (status) {
324 case GB_CONTROL_INTF_PM_BUSY:
325 return -EBUSY;
326 case GB_CONTROL_INTF_PM_NA:
327 return -ENOMSG;
328 default:
329 return -EREMOTEIO;
330 }
331 }
332
333 int gb_control_interface_suspend_prepare(struct gb_control *control)
334 {
335 struct gb_control_intf_pm_response response;
336 int ret;
337
338 ret = gb_operation_sync(control->connection,
339 GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
340 &response, sizeof(response));
341 if (ret) {
342 dev_err(&control->dev,
343 "failed to send interface suspend prepare: %d\n", ret);
344 return ret;
345 }
346
347 if (response.status != GB_CONTROL_INTF_PM_OK) {
348 dev_err(&control->dev, "interface error while preparing suspend: %d\n",
349 response.status);
350 return gb_control_interface_pm_status_map(response.status);
351 }
352
353 return 0;
354 }
355
356 int gb_control_interface_deactivate_prepare(struct gb_control *control)
357 {
358 struct gb_control_intf_pm_response response;
359 int ret;
360
361 ret = gb_operation_sync(control->connection,
362 GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
363 0, &response, sizeof(response));
364 if (ret) {
365 dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
366 ret);
367 return ret;
368 }
369
370 if (response.status != GB_CONTROL_INTF_PM_OK) {
371 dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
372 response.status);
373 return gb_control_interface_pm_status_map(response.status);
374 }
375
376 return 0;
377 }
378
379 int gb_control_interface_hibernate_abort(struct gb_control *control)
380 {
381 struct gb_control_intf_pm_response response;
382 int ret;
383
384 ret = gb_operation_sync(control->connection,
385 GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
386 &response, sizeof(response));
387 if (ret) {
388 dev_err(&control->dev,
389 "failed to send interface aborting hibernate: %d\n",
390 ret);
391 return ret;
392 }
393
394 if (response.status != GB_CONTROL_INTF_PM_OK) {
395 dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
396 response.status);
397 return gb_control_interface_pm_status_map(response.status);
398 }
399
400 return 0;
401 }
402
403 static ssize_t vendor_string_show(struct device *dev,
404 struct device_attribute *attr, char *buf)
405 {
406 struct gb_control *control = to_gb_control(dev);
407
408 return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
409 }
410 static DEVICE_ATTR_RO(vendor_string);
411
412 static ssize_t product_string_show(struct device *dev,
413 struct device_attribute *attr, char *buf)
414 {
415 struct gb_control *control = to_gb_control(dev);
416
417 return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
418 }
419 static DEVICE_ATTR_RO(product_string);
420
421 static struct attribute *control_attrs[] = {
422 &dev_attr_vendor_string.attr,
423 &dev_attr_product_string.attr,
424 NULL,
425 };
426 ATTRIBUTE_GROUPS(control);
427
428 static void gb_control_release(struct device *dev)
429 {
430 struct gb_control *control = to_gb_control(dev);
431
432 gb_connection_destroy(control->connection);
433
434 kfree(control->vendor_string);
435 kfree(control->product_string);
436
437 kfree(control);
438 }
439
440 struct device_type greybus_control_type = {
441 .name = "greybus_control",
442 .release = gb_control_release,
443 };
444
445 struct gb_control *gb_control_create(struct gb_interface *intf)
446 {
447 struct gb_connection *connection;
448 struct gb_control *control;
449
450 control = kzalloc(sizeof(*control), GFP_KERNEL);
451 if (!control)
452 return ERR_PTR(-ENOMEM);
453
454 control->intf = intf;
455
456 connection = gb_connection_create_control(intf);
457 if (IS_ERR(connection)) {
458 dev_err(&intf->dev,
459 "failed to create control connection: %ld\n",
460 PTR_ERR(connection));
461 kfree(control);
462 return ERR_CAST(connection);
463 }
464
465 control->connection = connection;
466
467 control->dev.parent = &intf->dev;
468 control->dev.bus = &greybus_bus_type;
469 control->dev.type = &greybus_control_type;
470 control->dev.groups = control_groups;
471 control->dev.dma_mask = intf->dev.dma_mask;
472 device_initialize(&control->dev);
473 dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
474
475 gb_connection_set_data(control->connection, control);
476
477 return control;
478 }
479
480 int gb_control_enable(struct gb_control *control)
481 {
482 int ret;
483
484 dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
485
486 ret = gb_connection_enable_tx(control->connection);
487 if (ret) {
488 dev_err(&control->connection->intf->dev,
489 "failed to enable control connection: %d\n",
490 ret);
491 return ret;
492 }
493
494 ret = gb_control_get_version(control);
495 if (ret)
496 goto err_disable_connection;
497
498 if (control->protocol_major > 0 || control->protocol_minor > 1)
499 control->has_bundle_version = true;
500
501 /* FIXME: use protocol version instead */
502 if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
503 control->has_bundle_activate = true;
504
505 return 0;
506
507 err_disable_connection:
508 gb_connection_disable(control->connection);
509
510 return ret;
511 }
512
513 void gb_control_disable(struct gb_control *control)
514 {
515 dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
516
517 if (control->intf->disconnected)
518 gb_connection_disable_forced(control->connection);
519 else
520 gb_connection_disable(control->connection);
521 }
522
523 int gb_control_suspend(struct gb_control *control)
524 {
525 gb_connection_disable(control->connection);
526
527 return 0;
528 }
529
530 int gb_control_resume(struct gb_control *control)
531 {
532 int ret;
533
534 ret = gb_connection_enable_tx(control->connection);
535 if (ret) {
536 dev_err(&control->connection->intf->dev,
537 "failed to enable control connection: %d\n", ret);
538 return ret;
539 }
540
541 return 0;
542 }
543
544 int gb_control_add(struct gb_control *control)
545 {
546 int ret;
547
548 ret = device_add(&control->dev);
549 if (ret) {
550 dev_err(&control->dev,
551 "failed to register control device: %d\n",
552 ret);
553 return ret;
554 }
555
556 return 0;
557 }
558
559 void gb_control_del(struct gb_control *control)
560 {
561 if (device_is_registered(&control->dev))
562 device_del(&control->dev);
563 }
564
565 struct gb_control *gb_control_get(struct gb_control *control)
566 {
567 get_device(&control->dev);
568
569 return control;
570 }
571
572 void gb_control_put(struct gb_control *control)
573 {
574 put_device(&control->dev);
575 }
576
577 void gb_control_mode_switch_prepare(struct gb_control *control)
578 {
579 gb_connection_mode_switch_prepare(control->connection);
580 }
581
582 void gb_control_mode_switch_complete(struct gb_control *control)
583 {
584 gb_connection_mode_switch_complete(control->connection);
585 }