--- linux-2.6.5-7.97ZP2RC3/kernel/fork.c 2004-12-03 10:37:15.000000000 +1100 +++ linux-2.6.5-7.97ZP2RC4/kernel/fork.c 2004-12-08 16:59:26.480431272 +1100 @@ -1051,12 +1051,6 @@ sched_fork(p); /* - * call pagg modules to properly attach new process to the same - * process aggregate containers as the parent process. - */ - pagg_attach(p, current); - - /* * Ok, make it visible to the rest of the system. * We dont wake it up yet. */ @@ -1131,13 +1125,20 @@ write_unlock_irq(&tasklist_lock); retval = 0; + /* + * call pagg modules to properly attach new process to the + * same process aggregate containers as the parent process. + * We do this after its thread group has been assigned to + * permit clients to distinguish thread groups clearly. + */ + pagg_attach(p, current); + fork_out: if (retval) return ERR_PTR(retval); return p; bad_fork_cleanup_namespace: - pagg_detach(p); exit_namespace(p); bad_fork_cleanup_mm: exit_mm(p); --- linux-2.6.5-7.97ZP2RC3/kernel/pagg.c 2004-12-03 10:37:15.000000000 +1100 +++ linux-2.6.5-7.97ZP2RC4/kernel/pagg.c 2004-12-08 17:04:08.512555872 +1100 @@ -261,7 +261,7 @@ struct task_struct *g = NULL, *p = NULL; int init_result = 0; - /* Because of internal race conditions we can't gaurantee + /* Because of internal race conditions we can't guarantee * getting every task in just one pass so we just keep going * until we don't find any unitialized tasks. The inefficiency * of this should be tempered by the fact that this happens @@ -389,32 +389,33 @@ { struct pagg *from_pagg; - /* lock the parents pagg_list we are copying from */ - down_read(&from_task->pagg_sem); /* read lock the pagg list */ + /* Lock the parents pagg_list we are copying from. We will be + * on the task list already and may have competition with + * registering clients, so lock our own list. + */ + down_read(&from_task->pagg_sem); + down_write(&to_task->pagg_sem); list_for_each_entry(from_pagg, &from_task->pagg_list, entry) { struct pagg *to_pagg = NULL; - to_pagg = pagg_alloc(to_task, from_pagg->hook); - if (!to_pagg) { - goto error_return; - } - if (to_pagg->hook->attach(to_task, to_pagg, from_pagg->data) != 0 ) - goto error_return; - } - - up_read(&from_task->pagg_sem); /* unlock the pagg list */ + /* Raced with allocation from registration. */ + if (pagg_get(to_task, from_pagg->hook->name) != NULL) + continue; + + /* Break on memory allocation failure. */ + if (!(to_pagg = pagg_alloc(to_task, from_pagg->hook))) + break; - return; /* success */ + /* Do not detach all attachments on error. This + * behaviour is inconsistent with registration. + */ + if (to_pagg->hook->attach(to_task, to_pagg, from_pagg->data)) + pagg_free(to_pagg); + } - error_return: - /* - * Clean up all the pagg attachments made on behalf of the new - * task. Set new task pagg ptr to NULL for return. - */ + up_write(&to_task->pagg_sem); up_read(&from_task->pagg_sem); /* unlock the pagg list */ - __pagg_detach(to_task); - return; /* failure */ } /** @@ -437,9 +438,9 @@ pagg->hook->detach(task, pagg); pagg_free(pagg); } - + up_write(&task->pagg_sem); /* write unlock the pagg list */ - + return; /* 0 = success, else return last code for failure */ } @@ -459,7 +460,6 @@ { struct pagg *pagg; - /* lock the parents pagg_list we are copying from */ down_read(&task->pagg_sem); /* lock the pagg list */ list_for_each_entry(pagg, &task->pagg_list, entry) {