ordered.c

Go to the documentation of this file.
00001 /* Copyright (C) 2005, 2009 Free Software Foundation, Inc.
00002    Contributed by Richard Henderson <rth@redhat.com>.
00003 
00004    This file is part of the GNU OpenMP Library (libgomp).
00005 
00006    Libgomp is free software; you can redistribute it and/or modify it
00007    under the terms of the GNU General Public License as published by
00008    the Free Software Foundation; either version 3, or (at your option)
00009    any later version.
00010 
00011    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
00012    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00013    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
00014    more details.
00015 
00016    Under Section 7 of GPL version 3, you are granted additional
00017    permissions described in the GCC Runtime Library Exception, version
00018    3.1, as published by the Free Software Foundation.
00019 
00020    You should have received a copy of the GNU General Public License and
00021    a copy of the GCC Runtime Library Exception along with this program;
00022    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00023    <http://www.gnu.org/licenses/>.  */
00024 
00025 /* This file handles the ORDERED construct.  */
00026 
00027 #include "libgomp.h"
00028 
00029 
00030 /* This function is called when first allocating an iteration block.  That
00031    is, the thread is not currently on the queue.  The work-share lock must
00032    be held on entry.  */
00033 
00034 void
00035 gomp_ordered_first (void)
00036 {
00037   struct gomp_thread *thr = gomp_thread ();
00038   struct gomp_team *team = thr->ts.team;
00039   struct gomp_work_share *ws = thr->ts.work_share;
00040   unsigned index;
00041 
00042   /* Work share constructs can be orphaned.  */
00043   if (team == NULL || team->nthreads == 1)
00044     return;
00045 
00046   index = ws->ordered_cur + ws->ordered_num_used;
00047   if (index >= team->nthreads)
00048     index -= team->nthreads;
00049   ws->ordered_team_ids[index] = thr->ts.team_id;
00050 
00051   /* If this is the first and only thread in the queue, then there is
00052      no one to release us when we get to our ordered section.  Post to
00053      our own release queue now so that we won't block later.  */
00054   if (ws->ordered_num_used++ == 0)
00055     gomp_sem_post (team->ordered_release[thr->ts.team_id]);
00056 }
00057 
00058 /* This function is called when completing the last iteration block.  That
00059    is, there are no more iterations to perform and so the thread should be
00060    removed from the queue entirely.  Because of the way ORDERED blocks are
00061    managed, it follows that we currently own access to the ORDERED block,
00062    and should now pass it on to the next thread.  The work-share lock must
00063    be held on entry.  */
00064 
00065 void
00066 gomp_ordered_last (void)
00067 {
00068   struct gomp_thread *thr = gomp_thread ();
00069   struct gomp_team *team = thr->ts.team;
00070   struct gomp_work_share *ws = thr->ts.work_share;
00071   unsigned next_id;
00072 
00073   /* Work share constructs can be orphaned.  */
00074   if (team == NULL || team->nthreads == 1)
00075     return;
00076 
00077   /* We're no longer the owner.  */
00078   ws->ordered_owner = -1;
00079 
00080   /* If we're not the last thread in the queue, then wake the next.  */
00081   if (--ws->ordered_num_used > 0)
00082     {
00083       unsigned next = ws->ordered_cur + 1;
00084       if (next == team->nthreads)
00085     next = 0;
00086       ws->ordered_cur = next;
00087 
00088       next_id = ws->ordered_team_ids[next];
00089       gomp_sem_post (team->ordered_release[next_id]);
00090     }
00091 }
00092 
00093 
00094 /* This function is called when allocating a subsequent allocation block.
00095    That is, we're done with the current iteration block and we're allocating
00096    another.  This is the logical combination of a call to gomp_ordered_last
00097    followed by a call to gomp_ordered_first.  The work-share lock must be
00098    held on entry. */
00099 
00100 void
00101 gomp_ordered_next (void)
00102 {
00103   struct gomp_thread *thr = gomp_thread ();
00104   struct gomp_team *team = thr->ts.team;
00105   struct gomp_work_share *ws = thr->ts.work_share;
00106   unsigned index, next_id;
00107 
00108   /* Work share constructs can be orphaned.  */
00109   if (team == NULL || team->nthreads == 1)
00110     return;
00111 
00112   /* We're no longer the owner.  */
00113   ws->ordered_owner = -1;
00114 
00115   /* If there's only one thread in the queue, that must be us.  */
00116   if (ws->ordered_num_used == 1)
00117     {
00118       /* We have a similar situation as in gomp_ordered_first
00119      where we need to post to our own release semaphore.  */
00120       gomp_sem_post (team->ordered_release[thr->ts.team_id]);
00121       return;
00122     }
00123 
00124   /* If the queue is entirely full, then we move ourself to the end of 
00125      the queue merely by incrementing ordered_cur.  Only if it's not 
00126      full do we have to write our id.  */
00127   if (ws->ordered_num_used < team->nthreads)
00128     {
00129       index = ws->ordered_cur + ws->ordered_num_used;
00130       if (index >= team->nthreads)
00131     index -= team->nthreads;
00132       ws->ordered_team_ids[index] = thr->ts.team_id;
00133     }
00134 
00135   index = ws->ordered_cur + 1;
00136   if (index == team->nthreads)
00137     index = 0;
00138   ws->ordered_cur = index;
00139 
00140   next_id = ws->ordered_team_ids[index];
00141   gomp_sem_post (team->ordered_release[next_id]);
00142 }
00143 
00144 
00145 /* This function is called when a statically scheduled loop is first
00146    being created.  */
00147 
00148 void
00149 gomp_ordered_static_init (void)
00150 {
00151   struct gomp_thread *thr = gomp_thread ();
00152   struct gomp_team *team = thr->ts.team;
00153 
00154   if (team == NULL || team->nthreads == 1)
00155     return;
00156 
00157   gomp_sem_post (team->ordered_release[0]);
00158 }
00159 
00160 /* This function is called when a statically scheduled loop is moving to
00161    the next allocation block.  Static schedules are not first come first
00162    served like the others, so we're to move to the numerically next thread,
00163    not the next thread on a list.  The work-share lock should *not* be held
00164    on entry.  */
00165 
00166 void
00167 gomp_ordered_static_next (void)
00168 {
00169   struct gomp_thread *thr = gomp_thread ();
00170   struct gomp_team *team = thr->ts.team;
00171   struct gomp_work_share *ws = thr->ts.work_share;
00172   unsigned id = thr->ts.team_id;
00173 
00174   if (team == NULL || team->nthreads == 1)
00175     return;
00176 
00177   ws->ordered_owner = -1;
00178 
00179   /* This thread currently owns the lock.  Increment the owner.  */
00180   if (++id == team->nthreads)
00181     id = 0;
00182   ws->ordered_team_ids[0] = id;
00183   gomp_sem_post (team->ordered_release[id]);
00184 }
00185 
00186 /* This function is called when we need to assert that the thread owns the
00187    ordered section.  Due to the problem of posted-but-not-waited semaphores,
00188    this needs to happen before completing a loop iteration.  */
00189 
00190 void
00191 gomp_ordered_sync (void)
00192 {
00193   struct gomp_thread *thr = gomp_thread ();
00194   struct gomp_team *team = thr->ts.team;
00195   struct gomp_work_share *ws = thr->ts.work_share;
00196 
00197   /* Work share constructs can be orphaned.  But this clearly means that
00198      we are the only thread, and so we automatically own the section.  */
00199   if (team == NULL || team->nthreads == 1)
00200     return;
00201 
00202   /* ??? I believe it to be safe to access this data without taking the
00203      ws->lock.  The only presumed race condition is with the previous
00204      thread on the queue incrementing ordered_cur such that it points
00205      to us, concurrently with our check below.  But our team_id is
00206      already present in the queue, and the other thread will always
00207      post to our release semaphore.  So the two cases are that we will
00208      either win the race an momentarily block on the semaphore, or lose
00209      the race and find the semaphore already unlocked and so not block.
00210      Either way we get correct results.  */
00211 
00212   if (ws->ordered_owner != thr->ts.team_id)
00213     {
00214       gomp_sem_wait (team->ordered_release[thr->ts.team_id]);
00215       ws->ordered_owner = thr->ts.team_id;
00216     }
00217 }
00218 
00219 /* This function is called by user code when encountering the start of an
00220    ORDERED block.  We must check to see if the current thread is at the
00221    head of the queue, and if not, block.  */
00222 
00223 #ifdef HAVE_ATTRIBUTE_ALIAS
00224 extern void GOMP_ordered_start (void)
00225     __attribute__((alias ("gomp_ordered_sync")));
00226 #else
00227 void
00228 GOMP_ordered_start (void)
00229 {
00230   gomp_ordered_sync ();
00231 }
00232 #endif
00233 
00234 /* This function is called by user code when encountering the end of an
00235    ORDERED block.  With the current ORDERED implementation there's nothing
00236    for us to do.
00237 
00238    However, the current implementation has a flaw in that it does not allow
00239    the next thread into the ORDERED section immediately after the current
00240    thread exits the ORDERED section in its last iteration.  The existance
00241    of this function allows the implementation to change.  */
00242 
00243 void
00244 GOMP_ordered_end (void)
00245 {
00246 }

Generated on Fri Apr 5 05:38:10 2013 for Libgomp by  doxygen 1.4.7