drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / tidspbridge / rmgr / rmm.c
CommitLineData
7d55524d
ORL
1/*
2 * rmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2005-2006 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17/*
18 * This memory manager provides general heap management and arbitrary
19 * alignment for any number of memory segments.
20 *
21 * Notes:
22 *
23 * Memory blocks are allocated from the end of the first free memory
24 * block large enough to satisfy the request. Alignment requirements
25 * are satisfied by "sliding" the block forward until its base satisfies
26 * the alignment specification; if this is not possible then the next
27 * free block large enough to hold the request is tried.
28 *
29 * Since alignment can cause the creation of a new free block - the
30 * unused memory formed between the start of the original free block
31 * and the start of the allocated block - the memory manager must free
32 * this memory to prevent a memory leak.
33 *
34 * Overlay memory is managed by reserving through rmm_alloc, and freeing
35 * it through rmm_free. The memory manager prevents DSP code/data that is
36 * overlayed from being overwritten as long as the memory it runs at has
37 * been allocated, and not yet freed.
38 */
39
2094f12d 40#include <linux/types.h>
0005391f
IN
41#include <linux/list.h>
42
43/* ----------------------------------- Host OS */
44#include <dspbridge/host_os.h>
2094f12d 45
7d55524d 46/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
47#include <dspbridge/dbdefs.h>
48
7d55524d
ORL
49/* ----------------------------------- This */
50#include <dspbridge/rmm.h>
51
52/*
53 * ======== rmm_header ========
54 * This header is used to maintain a list of free memory blocks.
55 */
56struct rmm_header {
57 struct rmm_header *next; /* form a free memory link list */
58 u32 size; /* size of the free memory */
59 u32 addr; /* DSP address of memory block */
60};
61
62/*
63 * ======== rmm_ovly_sect ========
64 * Keeps track of memory occupied by overlay section.
65 */
66struct rmm_ovly_sect {
67 struct list_head list_elem;
68 u32 addr; /* Start of memory section */
69 u32 size; /* Length (target MAUs) of section */
70 s32 page; /* Memory page */
71};
72
73/*
74 * ======== rmm_target_obj ========
75 */
76struct rmm_target_obj {
77 struct rmm_segment *seg_tab;
78 struct rmm_header **free_list;
79 u32 num_segs;
0005391f 80 struct list_head ovly_list; /* List of overlay memory in use */
7d55524d
ORL
81};
82
7d55524d 83static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
b301c858 84 u32 align, u32 *dsp_address);
7d55524d
ORL
85static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
86 u32 size);
87
88/*
89 * ======== rmm_alloc ========
90 */
91int rmm_alloc(struct rmm_target_obj *target, u32 segid, u32 size,
b301c858 92 u32 align, u32 *dsp_address, bool reserve)
7d55524d 93{
0005391f 94 struct rmm_ovly_sect *sect, *prev_sect = NULL;
7d55524d
ORL
95 struct rmm_ovly_sect *new_sect;
96 u32 addr;
97 int status = 0;
98
7d55524d 99 if (!reserve) {
b301c858 100 if (!alloc_block(target, segid, size, align, dsp_address)) {
7d55524d
ORL
101 status = -ENOMEM;
102 } else {
103 /* Increment the number of allocated blocks in this
104 * segment */
105 target->seg_tab[segid].number++;
106 }
107 goto func_end;
108 }
109 /* An overlay section - See if block is already in use. If not,
110 * insert into the list in ascending address size. */
b301c858 111 addr = *dsp_address;
7d55524d
ORL
112 /* Find place to insert new list element. List is sorted from
113 * smallest to largest address. */
0005391f 114 list_for_each_entry(sect, &target->ovly_list, list_elem) {
7d55524d
ORL
115 if (addr <= sect->addr) {
116 /* Check for overlap with sect */
117 if ((addr + size > sect->addr) || (prev_sect &&
118 (prev_sect->addr +
119 prev_sect->size >
120 addr))) {
121 status = -ENXIO;
122 }
123 break;
124 }
125 prev_sect = sect;
7d55524d 126 }
a741ea6e 127 if (!status) {
7d55524d
ORL
128 /* No overlap - allocate list element for new section. */
129 new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
130 if (new_sect == NULL) {
131 status = -ENOMEM;
132 } else {
7d55524d
ORL
133 new_sect->addr = addr;
134 new_sect->size = size;
135 new_sect->page = segid;
a994b051 136 if (list_is_last(&sect->list_elem, &target->ovly_list))
7d55524d 137 /* Put new section at the end of the list */
0005391f
IN
138 list_add_tail(&new_sect->list_elem,
139 &target->ovly_list);
140 else
7d55524d 141 /* Put new section just before sect */
0005391f
IN
142 list_add_tail(&new_sect->list_elem,
143 &sect->list_elem);
7d55524d
ORL
144 }
145 }
146func_end:
147 return status;
148}
149
150/*
151 * ======== rmm_create ========
152 */
153int rmm_create(struct rmm_target_obj **target_obj,
154 struct rmm_segment seg_tab[], u32 num_segs)
155{
156 struct rmm_header *hptr;
157 struct rmm_segment *sptr, *tmp;
158 struct rmm_target_obj *target;
159 s32 i;
160 int status = 0;
161
7d55524d
ORL
162 /* Allocate DBL target object */
163 target = kzalloc(sizeof(struct rmm_target_obj), GFP_KERNEL);
164
165 if (target == NULL)
166 status = -ENOMEM;
167
b66e0986 168 if (status)
7d55524d
ORL
169 goto func_cont;
170
171 target->num_segs = num_segs;
172 if (!(num_segs > 0))
173 goto func_cont;
174
175 /* Allocate the memory for freelist from host's memory */
176 target->free_list = kzalloc(num_segs * sizeof(struct rmm_header *),
177 GFP_KERNEL);
178 if (target->free_list == NULL) {
179 status = -ENOMEM;
180 } else {
181 /* Allocate headers for each element on the free list */
182 for (i = 0; i < (s32) num_segs; i++) {
183 target->free_list[i] =
184 kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
185 if (target->free_list[i] == NULL) {
186 status = -ENOMEM;
187 break;
188 }
189 }
190 /* Allocate memory for initial segment table */
191 target->seg_tab = kzalloc(num_segs * sizeof(struct rmm_segment),
192 GFP_KERNEL);
193 if (target->seg_tab == NULL) {
194 status = -ENOMEM;
195 } else {
196 /* Initialize segment table and free list */
197 sptr = target->seg_tab;
198 for (i = 0, tmp = seg_tab; num_segs > 0;
199 num_segs--, i++) {
200 *sptr = *tmp;
201 hptr = target->free_list[i];
202 hptr->addr = tmp->base;
203 hptr->size = tmp->length;
204 hptr->next = NULL;
205 tmp++;
206 sptr++;
207 }
208 }
209 }
210func_cont:
211 /* Initialize overlay memory list */
0005391f
IN
212 if (!status)
213 INIT_LIST_HEAD(&target->ovly_list);
7d55524d 214
a741ea6e 215 if (!status) {
7d55524d
ORL
216 *target_obj = target;
217 } else {
218 *target_obj = NULL;
219 if (target)
220 rmm_delete(target);
221
222 }
223
7d55524d
ORL
224 return status;
225}
226
227/*
228 * ======== rmm_delete ========
229 */
230void rmm_delete(struct rmm_target_obj *target)
231{
0005391f 232 struct rmm_ovly_sect *sect, *tmp;
7d55524d
ORL
233 struct rmm_header *hptr;
234 struct rmm_header *next;
235 u32 i;
236
7d55524d
ORL
237 kfree(target->seg_tab);
238
0005391f
IN
239 list_for_each_entry_safe(sect, tmp, &target->ovly_list, list_elem) {
240 list_del(&sect->list_elem);
241 kfree(sect);
7d55524d
ORL
242 }
243
244 if (target->free_list != NULL) {
245 /* Free elements on freelist */
246 for (i = 0; i < target->num_segs; i++) {
247 hptr = next = target->free_list[i];
248 while (next) {
249 hptr = next;
250 next = hptr->next;
251 kfree(hptr);
252 }
253 }
254 kfree(target->free_list);
255 }
256
257 kfree(target);
258}
259
7d55524d
ORL
260/*
261 * ======== rmm_free ========
262 */
c8c1ad8c 263bool rmm_free(struct rmm_target_obj *target, u32 segid, u32 dsp_addr, u32 size,
7d55524d
ORL
264 bool reserved)
265{
0005391f
IN
266 struct rmm_ovly_sect *sect, *tmp;
267 bool ret = false;
7d55524d 268
7d55524d
ORL
269 /*
270 * Free or unreserve memory.
271 */
272 if (!reserved) {
c8c1ad8c 273 ret = free_block(target, segid, dsp_addr, size);
7d55524d
ORL
274 if (ret)
275 target->seg_tab[segid].number--;
276
277 } else {
278 /* Unreserve memory */
0005391f
IN
279 list_for_each_entry_safe(sect, tmp, &target->ovly_list,
280 list_elem) {
c8c1ad8c 281 if (dsp_addr == sect->addr) {
7d55524d 282 /* Remove from list */
0005391f 283 list_del(&sect->list_elem);
7d55524d 284 kfree(sect);
0005391f 285 return true;
7d55524d 286 }
7d55524d 287 }
7d55524d
ORL
288 }
289 return ret;
290}
291
7d55524d
ORL
292/*
293 * ======== rmm_stat ========
294 */
295bool rmm_stat(struct rmm_target_obj *target, enum dsp_memtype segid,
fb6aabb7 296 struct dsp_memstat *mem_stat_buf)
7d55524d
ORL
297{
298 struct rmm_header *head;
299 bool ret = false;
300 u32 max_free_size = 0;
301 u32 total_free_size = 0;
302 u32 free_blocks = 0;
303
7d55524d
ORL
304 if ((u32) segid < target->num_segs) {
305 head = target->free_list[segid];
306
307 /* Collect data from free_list */
308 while (head != NULL) {
309 max_free_size = max(max_free_size, head->size);
310 total_free_size += head->size;
311 free_blocks++;
312 head = head->next;
313 }
314
315 /* ul_size */
085467b8 316 mem_stat_buf->size = target->seg_tab[segid].length;
7d55524d 317
6c66e948
RS
318 /* num_free_blocks */
319 mem_stat_buf->num_free_blocks = free_blocks;
7d55524d 320
085467b8
RS
321 /* total_free_size */
322 mem_stat_buf->total_free_size = total_free_size;
7d55524d 323
6c66e948
RS
324 /* len_max_free_block */
325 mem_stat_buf->len_max_free_block = max_free_size;
7d55524d 326
6c66e948
RS
327 /* num_alloc_blocks */
328 mem_stat_buf->num_alloc_blocks =
7d55524d
ORL
329 target->seg_tab[segid].number;
330
331 ret = true;
332 }
333
334 return ret;
335}
336
337/*
338 * ======== balloc ========
339 * This allocation function allocates memory from the lowest addresses
340 * first.
341 */
342static bool alloc_block(struct rmm_target_obj *target, u32 segid, u32 size,
b301c858 343 u32 align, u32 *dsp_address)
7d55524d
ORL
344{
345 struct rmm_header *head;
346 struct rmm_header *prevhead = NULL;
347 struct rmm_header *next;
348 u32 tmpalign;
349 u32 alignbytes;
350 u32 hsize;
351 u32 allocsize;
352 u32 addr;
353
354 alignbytes = (align == 0) ? 1 : align;
355 prevhead = NULL;
356 head = target->free_list[segid];
357
358 do {
359 hsize = head->size;
360 next = head->next;
361
362 addr = head->addr; /* alloc from the bottom */
363
364 /* align allocation */
365 (tmpalign = (u32) addr % alignbytes);
366 if (tmpalign != 0)
367 tmpalign = alignbytes - tmpalign;
368
369 allocsize = size + tmpalign;
370
371 if (hsize >= allocsize) { /* big enough */
372 if (hsize == allocsize && prevhead != NULL) {
373 prevhead->next = next;
374 kfree(head);
375 } else {
376 head->size = hsize - allocsize;
377 head->addr += allocsize;
378 }
379
380 /* free up any hole created by alignment */
381 if (tmpalign)
382 free_block(target, segid, addr, tmpalign);
383
b301c858 384 *dsp_address = addr + tmpalign;
7d55524d
ORL
385 return true;
386 }
387
388 prevhead = head;
389 head = next;
390
391 } while (head != NULL);
392
393 return false;
394}
395
396/*
397 * ======== free_block ========
398 * TO DO: free_block() allocates memory, which could result in failure.
399 * Could allocate an rmm_header in rmm_alloc(), to be kept in a pool.
400 * free_block() could use an rmm_header from the pool, freeing as blocks
401 * are coalesced.
402 */
403static bool free_block(struct rmm_target_obj *target, u32 segid, u32 addr,
404 u32 size)
405{
406 struct rmm_header *head;
407 struct rmm_header *thead;
408 struct rmm_header *rhead;
409 bool ret = true;
410
411 /* Create a memory header to hold the newly free'd block. */
412 rhead = kzalloc(sizeof(struct rmm_header), GFP_KERNEL);
413 if (rhead == NULL) {
414 ret = false;
415 } else {
416 /* search down the free list to find the right place for addr */
417 head = target->free_list[segid];
418
419 if (addr >= head->addr) {
420 while (head->next != NULL && addr > head->next->addr)
421 head = head->next;
422
423 thead = head->next;
424
425 head->next = rhead;
426 rhead->next = thead;
427 rhead->addr = addr;
428 rhead->size = size;
429 } else {
430 *rhead = *head;
431 head->next = rhead;
432 head->addr = addr;
433 head->size = size;
434 thead = rhead->next;
435 }
436
437 /* join with upper block, if possible */
438 if (thead != NULL && (rhead->addr + rhead->size) ==
439 thead->addr) {
440 head->next = rhead->next;
441 thead->size = size + thead->size;
442 thead->addr = addr;
443 kfree(rhead);
444 rhead = thead;
445 }
446
447 /* join with the lower block, if possible */
448 if ((head->addr + head->size) == rhead->addr) {
449 head->next = rhead->next;
450 head->size = head->size + rhead->size;
451 kfree(rhead);
452 }
453 }
454
455 return ret;
456}