for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
+ rdev->uvd.img_size[i] = 0;
}
return 0;
unsigned offset, unsigned buf_sizes[])
{
int32_t *msg, msg_type, handle;
+ unsigned img_size = 0;
void *ptr;
int i, r;
if (msg_type == 1) {
/* it's a decode msg, calc buffer sizes */
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ /* calc image size (width * height) */
+ img_size = msg[6] * msg[7];
radeon_bo_kunmap(bo);
if (r)
return r;
radeon_bo_kunmap(bo);
return 0;
} else {
+ /* it's a create msg, calc image size (width * height) */
+ img_size = msg[7] * msg[8];
radeon_bo_kunmap(bo);
if (msg_type != 0) {
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
+ p->rdev->uvd.img_size[i] = img_size;
return 0;
}
}
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
+/**
+ * radeon_uvd_count_handles - count number of open streams
+ *
+ * @rdev: radeon_device pointer
+ * @sd: number of SD streams
+ * @hd: number of HD streams
+ *
+ * Count the number of open SD/HD streams as a hint for power mangement
+ */
+static void radeon_uvd_count_handles(struct radeon_device *rdev,
+ unsigned *sd, unsigned *hd)
+{
+ unsigned i;
+
+ *sd = 0;
+ *hd = 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_read(&rdev->uvd.handles[i]))
+ continue;
+
+ if (rdev->uvd.img_size[i] >= 720*576)
+ ++(*hd);
+ else
+ ++(*sd);
+ }
+}
+
static void radeon_uvd_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =