This patch makes the scheduller honor affinity information for each
context being scheduled. If the context has no affinity information,
behaviour is unchanged. If there are affinity information, context is
schedulled to be run on the exact spu recommended by the affinity
placement algorithm.
Signed-off-by: Andre Detsch <adetsch@br.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
}
}
+struct spu *spu_alloc_spu(struct spu *req_spu)
+{
+ struct spu *spu, *ret = NULL;
+
+ mutex_lock(&spu_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) {
+ if (spu == req_spu) {
+ list_del_init(&spu->list);
+ pr_debug("Got SPU %d %d\n", spu->number, spu->node);
+ spu_init_channels(spu);
+ ret = spu;
+ break;
+ }
+ }
+ mutex_unlock(&spu_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spu_alloc_spu);
+
struct spu *spu_alloc_node(int node)
{
struct spu *spu = NULL;
int node = cpu_to_node(raw_smp_processor_id());
int n;
+ spu = affinity_check(ctx);
+ if (spu)
+ return spu_alloc_spu(spu);
+
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(ctx, node))