bitkeeper revision 1.1159.230.2 (41f65234Pi4Crimteaw690fX-H8jyg)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 25 Jan 2005 14:05:40 +0000 (14:05 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 25 Jan 2005 14:05:40 +0000 (14:05 +0000)
Use list_for_each_entry() in preference to list_for_each().
signed-off-by: keir.fraser@cl.cam.ac.uk

12 files changed:
xen/arch/x86/domain.c
xen/arch/x86/mpparse.c
xen/arch/x86/pci-pc.c
xen/arch/x86/pdb-stub.c
xen/common/physdev.c
xen/common/sched_atropos.c
xen/common/sched_bvt.c
xen/common/sched_rrobin.c
xen/common/slab.c
xen/drivers/pci/pci.c
xen/include/xen/list.h
xen/include/xen/pci.h

index 2ba535a3ac5b1c4205cc630cbef8ab9e1d37250c..4ca96f5d3cbf85d399e31b0eb8f5e2c605e39cbe 100644 (file)
@@ -199,13 +199,11 @@ void machine_halt(void)
 void dump_pageframe_info(struct domain *d)
 {
     struct pfn_info *page;
-    struct list_head *ent;
 
     if ( d->tot_pages < 10 )
     {
-        list_for_each ( ent, &d->page_list )
+        list_for_each_entry ( page, &d->page_list, list )
         {
-            page = list_entry(ent, struct pfn_info, list);
             printk("Page %08x: caf=%08x, taf=%08x\n",
                    page_to_phys(page), page->count_info,
                    page->u.inuse.type_info);
index 5cf760d62ebac5faec215cd64a10a69bcb9abf5b..7db6f8a6da7e5fd6b4be3233942d71f8499427c5 100644 (file)
@@ -1232,7 +1232,6 @@ void __init mp_config_acpi_legacy_irqs (void)
 
 void __init mp_parse_prt (void)
 {
-       struct list_head        *node = NULL;
        struct acpi_prt_entry   *entry = NULL;
        int                     ioapic = -1;
        int                     ioapic_pin = 0;
@@ -1245,9 +1244,7 @@ void __init mp_parse_prt (void)
         * Parsing through the PCI Interrupt Routing Table (PRT) and program
         * routing for all entries.
         */
-       list_for_each(node, &acpi_prt.entries) {
-               entry = list_entry(node, struct acpi_prt_entry, node);
-
+       list_for_each_entry(entry, &acpi_prt.entries, node) {
                /* Need to get irq for dynamic entry */
                if (entry->link.handle) {
                        irq = acpi_pci_link_get_irq(entry->link.handle, entry->link.index, &edge_level, &active_high_low);
index 363b635141e28bd3a6d087472df2ec6c8e289f33..cb5889628cd7c24b6cf219d1fcadd5d924e9493c 100644 (file)
@@ -1372,11 +1372,9 @@ void __devinit  pcibios_fixup_bus(struct pci_bus *b)
 
 struct pci_bus * __devinit pcibios_scan_root(int busnum)
 {
-       struct list_head *list;
        struct pci_bus *bus;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       pci_for_each_bus(bus) {
                if (bus->number == busnum) {
                        /* Already scanned */
                        return bus;
index 827035e07f320d0897639eab80a49f55ea6d7a22..568bcea1134417b330903cdd66d23cec2313249f 100644 (file)
@@ -778,12 +778,10 @@ void pdb_bkpt_add (unsigned long cr3, unsigned long address)
 struct pdb_breakpoint* pdb_bkpt_search (unsigned long cr3, 
                                        unsigned long address)
 {
-    struct list_head *list_entry;
     struct pdb_breakpoint *bkpt;
 
-    list_for_each(list_entry, &breakpoints.list)
+    list_for_each_entry ( bkpt, &breakpoints.list, list )
     {
-        bkpt = list_entry(list_entry, struct pdb_breakpoint, list);
        if ( bkpt->cr3 == cr3 && bkpt->address == address )
             return bkpt;
     }
@@ -797,12 +795,10 @@ struct pdb_breakpoint* pdb_bkpt_search (unsigned long cr3,
  */
 int pdb_bkpt_remove (unsigned long cr3, unsigned long address)
 {
-    struct list_head *list_entry;
     struct pdb_breakpoint *bkpt;
 
-    list_for_each(list_entry, &breakpoints.list)
+    list_for_each_entry ( bkpt, &breakpoints.list, list )
     {
-        bkpt = list_entry(list_entry, struct pdb_breakpoint, list);
        if ( bkpt->cr3 == cr3 && bkpt->address == address )
        {
             list_del(&bkpt->list);
index 4b6f398e9aacb5fa6bb12de6b164c15e31d13c1d..cd4d2811e31e4221c7fe8a54aed57624da83ea73 100644 (file)
@@ -73,11 +73,9 @@ typedef struct _phys_dev_st {
 static phys_dev_t *find_pdev(struct domain *p, struct pci_dev *dev)
 {
     phys_dev_t *t, *res = NULL;
-    struct list_head *tmp;
 
-    list_for_each(tmp, &p->pcidev_list)
+    list_for_each_entry ( t, &p->pcidev_list, node )
     {
-        t = list_entry(tmp,  phys_dev_t, node);
         if ( dev == t->dev )
         {
             res = t;
@@ -149,9 +147,9 @@ int physdev_pci_access_modify(
 
     /* Make the domain privileged. */
     set_bit(DF_PHYSDEV, &p->flags);
-       /* FIXME: MAW for now make the domain REALLY privileged so that it
-        * can run a backend driver (hw access should work OK otherwise) */
-       set_bit(DF_PRIVILEGED, &p->flags);
+    /* FIXME: MAW for now make the domain REALLY privileged so that it
+     * can run a backend driver (hw access should work OK otherwise) */
+    set_bit(DF_PRIVILEGED, &p->flags);
 
     /* Grant write access to the specified device. */
     if ( (pdev = pci_find_slot(bus, PCI_DEVFN(dev, func))) == NULL )
@@ -214,17 +212,16 @@ int physdev_pci_access_modify(
 int domain_iomem_in_pfn(struct domain *p, unsigned long pfn)
 {
     int ret = 0;
-    struct list_head *l;
+    phys_dev_t *phys_dev;
 
     VERBOSE_INFO("Checking if physdev-capable domain %u needs access to "
                  "pfn %08lx\n", p->id, pfn);
     
     spin_lock(&p->pcidev_lock);
 
-    list_for_each(l, &p->pcidev_list)
+    list_for_each_entry ( phys_dev, &p->pcidev_list, node )
     {
         int i;
-        phys_dev_t *phys_dev = list_entry(l, phys_dev_t, node);
         struct pci_dev *pci_dev = phys_dev->dev;
 
         for ( i = 0; (i < DEVICE_COUNT_RESOURCE) && (ret == 0); i++ )
@@ -619,15 +616,11 @@ static long pci_cfgreg_write(int bus, int dev, int func, int reg,
 static long pci_probe_root_buses(u32 *busmask)
 {
     phys_dev_t *pdev;
-    struct list_head *tmp;
 
     memset(busmask, 0, 256/8);
 
-    list_for_each ( tmp, &current->pcidev_list )
-    {
-        pdev = list_entry(tmp, phys_dev_t, node);
+    list_for_each_entry ( pdev, &current->pcidev_list, node )
         set_bit(pdev->dev->bus->number, busmask);
-    }
 
     return 0;
 }
index 1565b86ac9db6548a6de3f8753a3cacf21eeda7c..b5901f2397a19d3bbd904fde1ae492343e71a7e0 100644 (file)
@@ -1,6 +1,6 @@
 /*
- *     atropos.c
- *     ---------
+ * atropos.c
+ * ---------
  *
  * Copyright (c) 1994 University of Cambridge Computer Laboratory.
  * This is part of Nemesis; consult your contract for terms and conditions.
@@ -98,8 +98,9 @@ static inline int __task_on_runqueue(struct domain *d)
 static int q_len(struct list_head *q) 
 {
     int i = 0;
-    struct list_head *tmp;
-    list_for_each(tmp, q) i++;
+    struct at_dom_info *tmp;
+    list_for_each_entry ( tmp, q, waitq )
+        i++;
     return i;
 }
 
@@ -129,60 +130,39 @@ static inline struct domain *waitq_el(struct list_head *l)
  */
 static void requeue(struct domain *sdom)
 {
-    struct at_dom_info *inf = DOM_INFO(sdom);
-    struct list_head *prev;
-    struct list_head *next;
-
+    struct at_dom_info *i, *inf = DOM_INFO(sdom);
 
-    if(!domain_runnable(sdom)) return;
+    if ( !domain_runnable(sdom) )
+        return;
     
-    if(inf->state == ATROPOS_TASK_WAIT ||
-        inf->state == ATROPOS_TASK_UNBLOCKED)
+    if ( (inf->state == ATROPOS_TASK_WAIT) ||
+         (inf->state == ATROPOS_TASK_UNBLOCKED) )
     {
-        prev = WAITQ(sdom->processor);
-
-        list_for_each(next, WAITQ(sdom->processor))
+        list_for_each_entry ( i, WAITQ(sdom->processor), waitq )
         {
-            struct at_dom_info *i = 
-                list_entry(next, struct at_dom_info, waitq);
             if ( i->deadline > inf->deadline )
             {
-                __list_add(&inf->waitq, prev, next);
+                __list_add(&inf->waitq, i->waitq.prev, &i->waitq);
                 break;
             }
-
-            prev = next;
         }
 
-        /* put the domain on the end of the list if it hasn't been put
-         * elsewhere */
-        if ( next == WAITQ(sdom->processor) )
+        if ( &i->waitq == WAITQ(sdom->processor) )
             list_add_tail(&inf->waitq, WAITQ(sdom->processor));
     }
     else if ( domain_runnable(sdom) )
     {
-        /* insert into ordered run queue */
-        
-        prev = RUNQ(sdom->processor);
-
-        list_for_each(next, RUNQ(sdom->processor))
+        list_for_each_entry ( i, RUNQ(sdom->processor), run_list )
         {
-            struct at_dom_info *p = list_entry(next, struct at_dom_info,
-                                               run_list);
-
-            if( p->deadline > inf->deadline || is_idle_task(p->owner) )
+            if ( (i->deadline > inf->deadline) || is_idle_task(i->owner) )
             {
-                __list_add(&inf->run_list, prev, next);
+                __list_add(&inf->run_list, i->run_list.prev, &i->run_list);
                 break;
             }
-
-            prev = next;
         }
 
-        if ( next == RUNQ(sdom->processor) )
+        if ( &i->waitq == RUNQ(sdom->processor) )
             list_add_tail(&inf->run_list, RUNQ(sdom->processor));
-        
-    
     }
     /* silently ignore tasks in other states like BLOCKED, DYING, STOPPED, etc
      * - they shouldn't be on any queue */
@@ -194,7 +174,7 @@ static int at_alloc_task(struct domain *p)
     ASSERT(p != NULL);
     
     p->sched_priv = xmem_cache_alloc(dom_info_cache);
-    if( p->sched_priv == NULL )
+    if ( p->sched_priv == NULL )
         return -1;
     
     return 0;
@@ -294,26 +274,26 @@ static void unblock(struct domain *sdom)
     {
         /* Long blocking case */
 
-           /* The sdom has passed its deadline since it was blocked. 
-              Give it its new deadline based on the latency value. */
-           inf->prevddln = time;
+        /* The sdom has passed its deadline since it was blocked. 
+           Give it its new deadline based on the latency value. */
+        inf->prevddln = time;
 
         /* Scale the scheduling parameters as requested by the latency hint. */
-           inf->deadline = time + inf->latency;
+        inf->deadline = time + inf->latency;
         inf->slice = inf->nat_slice / ( inf->nat_period / inf->latency );
         inf->period = inf->latency;
-           inf->remain = inf->slice;
+        inf->remain = inf->slice;
     }
     else 
     {
         /* Short blocking case */
 
-           /* We leave REMAIN intact, but put this domain on the WAIT
-               queue marked as recently unblocked.  It will be given
-               priority over other domains on the wait queue until while
-               REMAIN>0 in a generous attempt to help it make up for its
-               own foolishness. */
-           if(inf->remain > 0)
+        /* We leave REMAIN intact, but put this domain on the WAIT
+           queue marked as recently unblocked.  It will be given
+           priority over other domains on the wait queue until while
+           REMAIN>0 in a generous attempt to help it make up for its
+           own foolishness. */
+        if(inf->remain > 0)
             inf->state = ATROPOS_TASK_UNBLOCKED;
         else
             inf->state = ATROPOS_TASK_WAIT;
@@ -349,10 +329,10 @@ static void block(struct domain* sdom)
  */
 task_slice_t ksched_scheduler(s_time_t time)
 {
-    struct domain      *cur_sdom = current;  /* Current sdom           */
-    s_time_t     newtime;
-    s_time_t      ranfor;              /* How long the domain ran      */
-    struct domain      *sdom;          /* tmp. scheduling domain       */
+    struct domain *cur_sdom = current;  /* Current sdom           */
+    s_time_t       newtime;
+    s_time_t       ranfor;              /* How long the domain ran      */
+    struct domain *sdom;                /* tmp. scheduling domain       */
     int cpu = cur_sdom->processor;      /* current CPU                  */
     struct at_dom_info *cur_info;
     static unsigned long waitq_rrobin = 0;
@@ -367,7 +347,7 @@ task_slice_t ksched_scheduler(s_time_t time)
     /* If we were spinning in the idle loop, there is no current
      * domain to deschedule. */
     if (is_idle_task(cur_sdom))
-       goto deschedule_done;
+        goto deschedule_done;
 
     /*****************************
      * 
@@ -375,7 +355,7 @@ task_slice_t ksched_scheduler(s_time_t time)
      *
      ****************************/
 
-   /* Record the time the domain was preempted and for how long it
+    /* Record the time the domain was preempted and for how long it
        ran.  Work out if the domain is going to be blocked to save
        some pointless queue shuffling */
     cur_sdom->lastdeschd = time;
@@ -388,26 +368,26 @@ task_slice_t ksched_scheduler(s_time_t time)
          (cur_info->state == ATROPOS_TASK_UNBLOCKED) )
     {
 
-           /* In this block, we are doing accounting for an sdom which has 
-               been running in contracted time.  Note that this could now happen
-               even if the domain is on the wait queue (i.e. if it blocked) */
+        /* In this block, we are doing accounting for an sdom which has 
+           been running in contracted time.  Note that this could now happen
+           even if the domain is on the wait queue (i.e. if it blocked) */
 
-           /* Deduct guaranteed time from the domain */
-           cur_info->remain  -= ranfor;
+        /* Deduct guaranteed time from the domain */
+        cur_info->remain  -= ranfor;
 
-           /* If guaranteed time has run out... */
-           if ( cur_info->remain <= 0 )
+        /* If guaranteed time has run out... */
+        if ( cur_info->remain <= 0 )
         {
-               /* Move domain to correct position in WAIT queue */
+            /* Move domain to correct position in WAIT queue */
             /* XXX sdom_unblocked doesn't need this since it is 
                already in the correct place. */
-               cur_info->state = ATROPOS_TASK_WAIT;
-           }
+            cur_info->state = ATROPOS_TASK_WAIT;
+        }
     }
 
     requeue(cur_sdom);
 
-deschedule_done:
+ deschedule_done:
     /*****************************
      * 
      * We have now successfully descheduled the current sdom.
@@ -424,10 +404,10 @@ deschedule_done:
      ****************************/
     
     while(!list_empty(WAITQ(cpu)) && 
-           DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 
+          DOM_INFO(sdom = waitq_el(WAITQ(cpu)->next))->deadline <= time ) 
     {
 
-           struct at_dom_info *inf = DOM_INFO(sdom);
+        struct at_dom_info *inf = DOM_INFO(sdom);
         dequeue(sdom);
         
         if ( inf->period != inf->nat_period )
@@ -444,22 +424,22 @@ deschedule_done:
             }
         }
 
-           /* Domain begins a new period and receives a slice of CPU 
-            * If this domain has been blocking then throw away the
-            * rest of it's remain - it can't be trusted */
-           if (inf->remain > 0) 
-               inf->remain = inf->slice;
+        /* Domain begins a new period and receives a slice of CPU 
+         * If this domain has been blocking then throw away the
+         * rest of it's remain - it can't be trusted */
+        if (inf->remain > 0) 
+            inf->remain = inf->slice;
         else 
-               inf->remain += inf->slice;
+            inf->remain += inf->slice;
 
-           inf->prevddln = inf->deadline;
-           inf->deadline += inf->period;
+        inf->prevddln = inf->deadline;
+        inf->deadline += inf->period;
 
         if ( inf->remain <= 0 )
             inf->state = ATROPOS_TASK_WAIT;
 
-           /* Place on the appropriate queue */
-           requeue(sdom);
+        /* Place on the appropriate queue */
+        requeue(sdom);
     }
 
     /*****************************
@@ -484,30 +464,27 @@ deschedule_done:
      * queue */
     if (cur_sdom->id == IDLE_DOMAIN_ID && !list_empty(WAITQ(cpu)))
     {
-        struct list_head *item;
-
-           /* Try running a domain on the WAIT queue - this part of the
-               scheduler isn't particularly efficient but then again, we
-               don't have any guaranteed domains to worry about. */
-       
-           /* See if there are any unblocked domains on the WAIT
-               queue who we can give preferential treatment to. */
+        struct at_dom_info *inf;
+
+        /* Try running a domain on the WAIT queue - this part of the
+           scheduler isn't particularly efficient but then again, we
+           don't have any guaranteed domains to worry about. */
+
+        /* See if there are any unblocked domains on the WAIT
+           queue who we can give preferential treatment to. */
         
-        list_for_each(item, WAITQ(cpu))
+        list_for_each_entry ( inf, WAITQ(cpu), waitq )
         {
-            struct at_dom_info *inf =
-                list_entry(item, struct at_dom_info, waitq);
-
             sdom = inf->owner;
             
-               if (inf->state == ATROPOS_TASK_UNBLOCKED) 
+            if (inf->state == ATROPOS_TASK_UNBLOCKED) 
             { 
-                       cur_sdom = sdom;
-                   cur_info  = inf;
-                   newtime  = time + inf->remain;
-                       goto found;
-               }
-           }
+                cur_sdom = sdom;
+                cur_info  = inf;
+                newtime  = time + inf->remain;
+                goto found;
+            }
+        }
 
         /* init values needed to approximate round-robin for slack time */
         i = 0;
@@ -515,14 +492,11 @@ deschedule_done:
             waitq_rrobin = 0;
         
         
-           /* Last chance: pick a domain on the wait queue with the XTRA
-               flag set.  The NEXT_OPTM field is used to cheaply achieve
-               an approximation of round-robin order */
-        list_for_each(item, WAITQ(cpu))
+        /* Last chance: pick a domain on the wait queue with the XTRA
+           flag set.  The NEXT_OPTM field is used to cheaply achieve
+           an approximation of round-robin order */
+        list_for_each_entry ( inf, WAITQ(cpu), waitq )
         {
-            struct at_dom_info *inf =
-                list_entry(item, struct at_dom_info, waitq);
-            
             sdom = inf->owner;
             
             if (inf->xtratime && i >= waitq_rrobin) 
@@ -538,7 +512,7 @@ deschedule_done:
         }
     }
 
   found:
+ found:
     /**********************
      * 
      * We now have to work out the time when we next need to
@@ -554,7 +528,7 @@ deschedule_done:
     /* exhausted its time, cut short the time allocation */
     if (!list_empty(WAITQ(cpu)))
     {
-           newtime = MIN(newtime,
+        newtime = MIN(newtime,
                       DOM_INFO(waitq_el(WAITQ(cpu)->next))->deadline);
     }
 
@@ -603,44 +577,44 @@ static void at_dump_runq_el(struct domain *p)
 /* dump relevant per-cpu state for a run queue dump */
 static void at_dump_cpu_state(int cpu)
 {
-    struct list_head *list, *queue;
+    struct list_head *queue;
     int loop = 0;
     struct at_dom_info *d_inf;
     struct domain *d;
 
     queue = RUNQ(cpu);
     printk("\nRUNQUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
-    (unsigned long) queue->next, (unsigned long) queue->prev);
+           (unsigned long) queue->next, (unsigned long) queue->prev);
 
-    list_for_each ( list, queue )
+    list_for_each_entry ( d_inf, queue, run_list )
     {
-        d_inf = list_entry(list, struct at_dom_info, run_list);
         d = d_inf->owner;
         printk("%3d: %d has=%c ", loop++, d->id, 
-                                    test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
+               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
         at_dump_runq_el(d);
         printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
         printk("         l: %lx n: %lx  p: %lx\n",
-                        (unsigned long)list, (unsigned long)list->next,
-                        (unsigned long)list->prev);
+               (unsigned long)&d_inf->run_list,
+               (unsigned long)d_inf->run_list.next,
+               (unsigned long)d_inf->run_list.prev);
     }
 
 
     queue = WAITQ(cpu);
     printk("\nWAITQUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
-    (unsigned long) queue->next, (unsigned long) queue->prev);
+           (unsigned long) queue->next, (unsigned long) queue->prev);
 
-    list_for_each ( list, queue )
+    list_for_each_entry ( d_inf, queue, waitq )
     {
-        d_inf = list_entry(list, struct at_dom_info, waitq);
         d = d_inf->owner;
         printk("%3d: %d has=%c ", loop++, d->id, 
-                                    test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
+               test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
         at_dump_runq_el(d);
         printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
         printk("         l: %lx n: %lx  p: %lx\n",
-                        (unsigned long)list, (unsigned long)list->next,
-                        (unsigned long)list->prev);
+               (unsigned long)&d_inf->waitq,
+               (unsigned long)d_inf->waitq.next,
+               (unsigned long)d_inf->waitq.prev);
     }
        
 }
index 1d447b718b1398a2b3dcb17b5dac37c133a498f2..6f0a79fad1ab59e67c9276a47dc4685739d638a1 100644 (file)
@@ -348,7 +348,6 @@ int bvt_adjdom(
 static task_slice_t bvt_do_schedule(s_time_t now)
 {
     struct domain      *prev = current, *next = NULL, *next_prime, *p; 
-    struct list_head   *tmp;
     int                 cpu = prev->processor;
     s32                 r_time;     /* time for new dom to run */
     u32                 next_evt, next_prime_evt, min_avt;
@@ -392,10 +391,8 @@ static task_slice_t bvt_do_schedule(s_time_t now)
     next_prime_evt = ~0U;
     min_avt        = ~0U;
 
-    list_for_each ( tmp, RUNQUEUE(cpu) )
+    list_for_each_entry ( p_inf, RUNQUEUE(cpu), run_list )
     {
-        p_inf = list_entry(tmp, struct bvt_dom_info, run_list);
-
         if ( p_inf->evt < next_evt )
         {
             next_prime_inf  = next_inf;
@@ -505,7 +502,7 @@ static void bvt_dump_settings(void)
 
 static void bvt_dump_cpu_state(int i)
 {
-    struct list_head *list, *queue;
+    struct list_head *queue;
     int loop = 0;
     struct bvt_dom_info *d_inf;
     struct domain *d;
@@ -516,17 +513,15 @@ static void bvt_dump_cpu_state(int i)
     printk("QUEUE rq %lx   n: %lx, p: %lx\n",  (unsigned long)queue,
            (unsigned long) queue->next, (unsigned long) queue->prev);
 
-    list_for_each ( list, queue )
+    list_for_each_entry ( d_inf, queue, run_list )
     {
-        d_inf = list_entry(list, struct bvt_dom_info, run_list);
         d = d_inf->domain;
         printk("%3d: %u has=%c ", loop++, d->id,
                test_bit(DF_RUNNING, &d->flags) ? 'T':'F');
         bvt_dump_runq_el(d);
         printk("c=0x%X%08X\n", (u32)(d->cpu_time>>32), (u32)d->cpu_time);
-        printk("         l: %lx n: %lx  p: %lx\n",
-               (unsigned long)list, (unsigned long)list->next,
-               (unsigned long)list->prev);
+        printk("         l: %p n: %p  p: %p\n",
+               &d_inf->run_list, d_inf->run_list.next, d_inf->run_list.prev);
     }
 }
 
index fbf17ab84cf4219228f80b7ad6ad1df6d1b984c1..2164ce22b411fb8d6fcfb767458bdbbd71641b8f 100644 (file)
@@ -187,7 +187,7 @@ static void rr_dump_domain(struct domain *d)
 
 static void rr_dump_cpu_state(int i)
 {
-    struct list_head *list, *queue;
+    struct list_head *queue;
     int loop = 0;
     struct rrobin_dom_info *d_inf;
 
@@ -199,10 +199,9 @@ static void rr_dump_cpu_state(int i)
     d_inf = list_entry(queue, struct rrobin_dom_info, run_list);
     rr_dump_domain(d_inf->domain);
  
-    list_for_each ( list, queue )
+    list_for_each_entry ( d_inf, queue, run_list )
     {
         printk("%3d: ",loop++);
-        d_inf = list_entry(list, struct rrobin_dom_info, run_list);
         rr_dump_domain(d_inf->domain);
     }
 }
index 3e91a6a0582d68bba999af426932e7759c405976..cfbf4022615ace56ffc4539a68f47923f5a9fc0b 100644 (file)
@@ -774,11 +774,9 @@ xmem_cache_create (const char *name, size_t size, size_t offset,
     /* Need the semaphore to access the chain. */
     down(&cache_chain_sem);
     {
-        struct list_head *p;
-
-        list_for_each(p, &cache_chain) {
-            xmem_cache_t *pc = list_entry(p, xmem_cache_t, next);
+       xmem_cache_t *pc;
 
+        list_for_each_entry(pc, &cache_chain, next) {
             /* The name field is constant - no lock needed. */
             if (!strcmp(pc->name, name))
                 BUG();
@@ -802,14 +800,14 @@ xmem_cache_create (const char *name, size_t size, size_t offset,
  */
 static int is_chained_xmem_cache(xmem_cache_t * cachep)
 {
-    struct list_head *p;
+    xmem_cache_t *pc;
     int ret = 0;
     unsigned long spin_flags;
 
     /* Find the cache in the chain of caches. */
     down(&cache_chain_sem);
-    list_for_each(p, &cache_chain) {
-        if (p == &cachep->next) {
+    list_for_each_entry(pc, &cache_chain, next) {
+        if (pc == &cachep) {
             ret = 1;
             break;
         }
@@ -1765,7 +1763,6 @@ void dump_slabinfo()
     p = &cache_cache.next;
     do {
         xmem_cache_t   *cachep;
-        struct list_head *q;
         slab_t         *slabp;
         unsigned long  active_objs;
         unsigned long  num_objs;
@@ -1776,22 +1773,19 @@ void dump_slabinfo()
         spin_lock_irq(&cachep->spinlock);
         active_objs = 0;
         num_slabs = 0;
-        list_for_each(q,&cachep->slabs_full) {
-            slabp = list_entry(q, slab_t, list);
+        list_for_each_entry(slabp, &cachep->slabs_full, list) {
             if (slabp->inuse != cachep->num)
                 BUG();
             active_objs += cachep->num;
             active_slabs++;
         }
-        list_for_each(q,&cachep->slabs_partial) {
-            slabp = list_entry(q, slab_t, list);
+        list_for_each_entry(slabp, &cachep->slabs_partial, list) {
             if (slabp->inuse == cachep->num || !slabp->inuse)
                 BUG();
             active_objs += slabp->inuse;
             active_slabs++;
         }
-        list_for_each(q,&cachep->slabs_free) {
-            slabp = list_entry(q, slab_t, list);
+        list_for_each_entry(slabp, &cachep->slabs_free, list) {
             if (slabp->inuse)
                 BUG();
             num_slabs++;
index 4ae22d07e09e0dcb73abebb2a365f0577a5e4003..50a4ebb5e07e91951d46fe15b73ef4970cfacf1c 100644 (file)
@@ -1565,15 +1565,15 @@ static int pci_pm_resume_device(struct pci_dev *dev)
 
 static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
 {
-       struct list_head *list;
+       struct pci_bus *i;
        int error = 0;
 
-       list_for_each(list, &bus->children) {
-               error = pci_pm_save_state_bus(pci_bus_b(list),state);
+       list_for_each_entry(i, &bus->children, node) {
+               error = pci_pm_save_state_bus(i, state);
                if (error) return error;
        }
-       list_for_each(list, &bus->devices) {
-               error = pci_pm_save_state_device(pci_dev_b(list),state);
+       list_for_each_entry(i, &bus->devices, node) {
+               error = pci_pm_save_state_device(i, state);
                if (error) return error;
        }
        return 0;
@@ -1581,40 +1581,38 @@ static int pci_pm_save_state_bus(struct pci_bus *bus, u32 state)
 
 static int pci_pm_suspend_bus(struct pci_bus *bus, u32 state)
 {
-       struct list_head *list;
+       struct pci_bus *i;
 
        /* Walk the bus children list */
-       list_for_each(list, &bus->children
-               pci_pm_suspend_bus(pci_bus_b(list),state);
+       list_for_each_entry(i, &bus->children, node
+               pci_pm_suspend_bus(i, state);
 
        /* Walk the device children list */
-       list_for_each(list, &bus->devices)
-               pci_pm_suspend_device(pci_dev_b(list),state);
+       list_for_each_entry(i, &bus->devices, node)
+               pci_pm_suspend_device(i, state);
        return 0;
 }
 
 static int pci_pm_resume_bus(struct pci_bus *bus)
 {
-       struct list_head *list;
+       struct pci_bus *i;
 
        /* Walk the device children list */
-       list_for_each(list, &bus->devices)
-               pci_pm_resume_device(pci_dev_b(list));
+       list_for_each_entry(i, &bus->devices, node)
+               pci_pm_resume_device(i);
 
        /* And then walk the bus children */
-       list_for_each(list, &bus->children)
-               pci_pm_resume_bus(pci_bus_b(list));
+       list_for_each_entry(i, &bus->children, node)
+               pci_pm_resume_bus(i);
        return 0;
 }
 
 static int pci_pm_save_state(u32 state)
 {
-       struct list_head *list;
        struct pci_bus *bus;
        int error = 0;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       list_for_each_entry(bus, &pci_root_buses, node) {
                error = pci_pm_save_state_bus(bus,state);
                if (!error)
                        error = pci_pm_save_state_device(bus->self,state);
@@ -1624,11 +1622,9 @@ static int pci_pm_save_state(u32 state)
 
 static int pci_pm_suspend(u32 state)
 {
-       struct list_head *list;
        struct pci_bus *bus;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       list_for_each_entry(bus, &pci_root_buses, node) {
                pci_pm_suspend_bus(bus,state);
                pci_pm_suspend_device(bus->self,state);
        }
@@ -1637,11 +1633,9 @@ static int pci_pm_suspend(u32 state)
 
 int pci_pm_resume(void)
 {
-       struct list_head *list;
        struct pci_bus *bus;
 
-       list_for_each(list, &pci_root_buses) {
-               bus = pci_bus_b(list);
+       list_for_each_entry(bus, &pci_root_buses, node) {
                pci_pm_resume_device(bus->self);
                pci_pm_resume_bus(bus);
        }
index cc38a310cdc640f6b8386575645f91b266fd1930..7b19bb4650bbca57853c8420a5af4a31bf984b68 100644 (file)
@@ -161,8 +161,6 @@ static __inline__ void list_splice(struct list_head *list, struct list_head *hea
        for (pos = (head)->next, n = pos->next; pos != (head); \
                pos = n, n = pos->next)
 
-#endif
-
 /**
  * list_for_each_entry -       iterate over list of given type
  * @pos:       the type * to use as a loop counter.
@@ -175,3 +173,6 @@ static __inline__ void list_splice(struct list_head *list, struct list_head *hea
             &pos->member != (head);                                    \
             pos = list_entry(pos->member.next, typeof(*pos), member),  \
                     prefetch(pos->member.next))
+
+#endif /* _LINUX_LIST_H */
+
index c81cd0d57dc35be4d44e505999febe40c9b56f89..5f8740a31cd90b43a5e01b86970c6ef9a5b786e5 100644 (file)
@@ -358,7 +358,7 @@ enum pci_mmap_state {
        for(dev = pci_dev_g(pci_devices.prev); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.prev))
 
 #define pci_for_each_bus(bus) \
-for(bus = pci_bus_b(pci_root_buses.next); bus != pci_bus_b(&pci_root_buses); bus = pci_bus_b(bus->node.next))
+       list_for_each_entry(bus, &pci_root_buses, node)
 
 /*
  * The pci_dev structure is used to describe both PCI and ISAPnP devices.