[SCM] Gerris Flow Solver branch, upstream, updated. e8f73a07832050124d2b8bf6c6f35b33180e65a8

Stephane Popinet popinet at users.sf.net
Tue Nov 24 12:24:26 UTC 2009


The following commit has been merged in the upstream branch:
commit acf37534b6d504f2903e7c19428a1faa58f95745
Author: Stephane Popinet <popinet at users.sf.net>
Date:   Sun Jun 28 21:05:55 2009 +1000

    GfsBoundaryMpi is included by default (even in the serial version)
    
    This simplifies the code and also means that the serial version can be used
    to join and load simulation files generated by parallel runs (e.g. a serial
    version of GfsView ran on a different system will now be able to visualise
    the results of a parallel run).
    
    darcs-hash:20090628110555-d4795-1e098eae8dfacefe53efa79a50b87859c3e9e2bd.gz

diff --git a/configure.in b/configure.in
index ac16f19..036e5d9 100644
--- a/configure.in
+++ b/configure.in
@@ -90,7 +90,6 @@ if test "x$with_mpicc" != "xno" ; then
      AC_MSG_WARN([MPI not found. No MPI support will be compiled in.])
   fi
 fi
-AM_CONDITIONAL(HAVE_MPI, test "x$use_mpicc" = "xyes")
 
 AC_PROG_CC
 
diff --git a/doc/examples/classes.c b/doc/examples/classes.c
index d8ba6fb..4e42203 100644
--- a/doc/examples/classes.c
+++ b/doc/examples/classes.c
@@ -14,6 +14,7 @@ int main (int argc, char * argv[])
 {
   GtsObjectClass ** klass;
 
+  gfs_init (&argc, &argv);
   klass = gfs_classes ();
 
   printf ("# Language file for source-highlight\n"
diff --git a/src/Makefile.am b/src/Makefile.am
index ddcfb76..d9ed841 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -26,14 +26,6 @@ BUILT_SOURCES= \
 	gerris2D3.pc \
 	gerris3D.pc
 
-if HAVE_MPI
-MPI_SRC = mpi_boundary.c
-MPI_HDS = mpi_boundary.h
-else
-MPI_SRC = 
-MPI_HDS = 
-endif
-
 GFS_HDS = \
 	ftt.h \
 	fluid.h \
@@ -43,6 +35,7 @@ GFS_HDS = \
 	poisson.h \
 	advection.h \
 	boundary.h \
+	mpi_boundary.h \
 	timestep.h \
 	domain.h \
 	init.h \
@@ -65,8 +58,7 @@ GFS_HDS = \
 	map.h \
 	river.h \
 	moving.h \
-	version.h \
-	$(MPI_HDS)
+	version.h
 
 pkginclude_HEADERS = \
 	$(GFS_HDS) \
@@ -86,9 +78,9 @@ SRC = \
 	poisson.c \
 	advection.c \
 	boundary.c \
+	mpi_boundary.c \
 	timestep.c \
 	domain.c \
-	$(MPI_SRC) \
 	init.c \
 	refine.c \
 	event.c \
@@ -134,8 +126,6 @@ libgfs2D3_la_CFLAGS = $(AM_CFLAGS) -DFTT_2D3=1
 CLEANFILES = $(BUILT_SOURCES)
 
 EXTRA_DIST = \
-	mpi_boundary.c \
-	mpi_boundary.h \
 	ftt_internal.c \
 	moving2.c \
 	m4.awk
diff --git a/src/domain.c b/src/domain.c
index cdf1e2b..0ce0496 100644
--- a/src/domain.c
+++ b/src/domain.c
@@ -29,13 +29,10 @@
 #include "source.h"
 #include "solid.h"
 #include "adaptive.h"
+#include "mpi_boundary.h"
 #include "version.h"
 
 #include "config.h"
-#ifdef HAVE_MPI
-#  include "mpi_boundary.h"
-#  include "init.h"
-#endif /* HAVE_MPI */
 
 /* GfsDomain: Object */
 
@@ -213,7 +210,6 @@ static void set_ref_pos (GfsBox * box, FttVector * pos)
     box_set_pos (box, pos, FTT_RIGHT);
 }
 
-#ifdef HAVE_MPI
 static void removed_list (GfsBox * box, gpointer * data)
 {
   GfsDomain * domain = data[0];
@@ -257,7 +253,6 @@ static void mpi_links (GfsBox * box, GfsDomain * domain)
 			    FTT_OPPOSITE_DIRECTION (d), 
 			    pid, id);
 }
-#endif /* HAVE_MPI */
 
 static void add_id (GfsBox * box, GPtrArray * ids)
 {
@@ -276,7 +271,6 @@ static GPtrArray * box_ids (GfsDomain * domain)
 
 static void convert_boundary_mpi_into_edges (GfsBox * box, GPtrArray * ids)
 {
-#ifdef HAVE_MPI
   gint pid = gfs_box_domain (box)->pid;
   FttDirection d;
 
@@ -305,9 +299,6 @@ static void convert_boundary_mpi_into_edges (GfsBox * box, GPtrArray * ids)
     }
   if (pid >= 0)
     box->pid = pid;
-#else /* not HAVE_MPI */
-  g_assert_not_reached ();
-#endif /* not HAVE_MPI */
 }
 
 static void domain_post_read (GfsDomain * domain, GtsFile * fp)
@@ -315,28 +306,26 @@ static void domain_post_read (GfsDomain * domain, GtsFile * fp)
   gts_graph_foreach_edge (GTS_GRAPH (domain), (GtsFunc) gfs_gedge_link_boxes, NULL);
 
   if (domain->pid >= 0) { /* Multiple PEs */
-#ifdef HAVE_MPI
     GSList * removed = NULL;
     guint np = 0;
     gpointer data[3];
-    int comm_size;
     
     gts_container_foreach (GTS_CONTAINER (domain), (GtsFunc) set_ref_pos, &domain->refpos);
     data[0] = domain;
     data[1] = &removed;
     data[2] = &np;
     gts_container_foreach (GTS_CONTAINER (domain), (GtsFunc) removed_list, data);
+#ifdef HAVE_MPI
+    int comm_size;
     MPI_Comm_size (MPI_COMM_WORLD, &comm_size);
     if (np + 1 != comm_size) {
       g_slist_free (removed);
       gts_file_error (fp, "it would be valid if one or %d PE were used", np + 1);
       return;
     }
+#endif /* HAVE_MPI */
     g_slist_foreach (removed, (GFunc) mpi_links, domain);
     g_slist_free (removed);
-#else /* not HAVE_MPI */
-    g_assert_not_reached ();
-#endif /* not HAVE_MPI */
   }
   else { /* Single PE */
     /* Create array for fast linking of ids to GfsBox pointers */
@@ -1406,6 +1395,10 @@ static void domain_range_reduce (GfsDomain * domain, GtsRange * s)
     s->n = out[4];
   }
 }
+#else /* not HAVE_MPI */
+static void domain_range_reduce (GfsDomain * domain, GtsRange * s)
+{
+}
 #endif /* HAVE_MPI */
 
 /**
@@ -1436,9 +1429,7 @@ GtsRange gfs_domain_stats_variable (GfsDomain * domain,
   data[1] = v;
   gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, flags, max_depth, 
 			   (FttCellTraverseFunc) add_stats, data);
-#ifdef HAVE_MPI
   domain_range_reduce (domain, &s);
-#endif /* HAVE_MPI */
   gts_range_update (&s);
 
   return s;
@@ -1467,9 +1458,7 @@ GtsRange gfs_domain_stats_solid (GfsDomain * domain)
   gts_range_init (&s);
   gfs_domain_traverse_mixed (domain, FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS,
 			    (FttCellTraverseFunc) add_stats_solid, &s);
-#ifdef HAVE_MPI
   domain_range_reduce (domain, &s);
-#endif /* HAVE_MPI */
   gts_range_update (&s);
 
   return s;
@@ -1524,10 +1513,8 @@ void gfs_domain_stats_merged (GfsDomain * domain,
   data[1] = number;
   gfs_domain_traverse_merged (domain,
 			     (GfsMergedTraverseFunc) add_stats_merged, data);
-#ifdef HAVE_MPI
   domain_range_reduce (domain, solid);
   domain_range_reduce (domain, number);
-#endif /* HAVE_MPI */
   gts_range_update (solid);
   gts_range_update (number);
 }
@@ -1555,12 +1542,8 @@ static void boundary_size (GfsBox * box, GArray * a)
   guint count = 0;
 
   for (d = 0; d < FTT_NEIGHBORS; d++)
-    if (
-#ifdef HAVE_MPI
-	GFS_IS_BOUNDARY_MPI (box->neighbor[d]) ||
-#endif
-	(GFS_IS_BOX (box->neighbor[d]) && GFS_BOX (box->neighbor[d])->pid != box->pid)
-       )
+    if (GFS_IS_BOUNDARY_MPI (box->neighbor[d]) ||
+	(GFS_IS_BOX (box->neighbor[d]) && GFS_BOX (box->neighbor[d])->pid != box->pid))
       ftt_cell_traverse_boundary (box->root, d, FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
 				  (FttCellTraverseFunc) cell_count, &count);
   g_array_index (a, guint, BPID (box)) += count;
@@ -1611,11 +1594,9 @@ void gfs_domain_stats_balance (GfsDomain * domain,
     if (v > 0)
       gts_range_add_value (boundary, v);
   }
-#ifdef HAVE_MPI
   domain_range_reduce (domain, size);
   domain_range_reduce (domain, boundary);
   domain_range_reduce (domain, mpiwait);
-#endif /* HAVE_MPI */
   g_array_free (a, TRUE);
   gts_range_update (size);
   gts_range_update (boundary);
@@ -1672,7 +1653,11 @@ static void domain_norm_reduce (GfsDomain * domain, GfsNorm * n)
     n->w = out[4];
   }
 }
-#endif /* HAVE_MPI */
+#else /* not HAVE_MPI */
+static void domain_norm_reduce (GfsDomain * domain, GfsNorm * n)
+{
+}
+#endif /* not HAVE_MPI */
 
 /**
  * gfs_domain_norm_variable:
@@ -1712,9 +1697,7 @@ GfsNorm gfs_domain_norm_variable (GfsDomain * domain,
   else
     gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, flags, max_depth, 
 			      (FttCellTraverseFunc) add_norm, data);
-#ifdef HAVE_MPI
   domain_norm_reduce (domain, &n);
-#endif /* HAVE_MPI */
   gfs_norm_update (&n);
 
   return n;
@@ -1762,9 +1745,7 @@ GfsNorm gfs_domain_norm_residual (GfsDomain * domain,
   data[1] = &n;
   gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, flags, max_depth, 
 			   (FttCellTraverseFunc) add_norm_residual, data);
-#ifdef HAVE_MPI
   domain_norm_reduce (domain, &n);
-#endif /* HAVE_MPI */
   gfs_norm_update (&n);
 
   dt *= dt;
@@ -1829,9 +1810,7 @@ GfsNorm gfs_domain_norm_velocity (GfsDomain * domain,
   data[1] = &n;
   gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, flags, max_depth, 
 			   (FttCellTraverseFunc) add_norm_velocity, data);
-#ifdef HAVE_MPI
   domain_norm_reduce (domain, &n);
-#endif /* HAVE_MPI */
   gfs_norm_update (&n);
 
   return n;
@@ -3958,7 +3937,6 @@ GSList * gfs_receive_objects (GfsDomain * domain, int src)
 
 static void unlink_box (GfsBox * box, gint * dest)
 {
-#ifdef HAVE_MPI
   FttDirection d;
   for (d = 0; d < FTT_NEIGHBORS; d++)
     if (GFS_IS_BOX (box->neighbor[d])) {
@@ -3969,9 +3947,6 @@ static void unlink_box (GfsBox * box, gint * dest)
       box->neighbor[d] = NULL;
       gfs_boundary_mpi_new (gfs_boundary_mpi_class (), box, d, nbox->pid, nbox->id);
     }
-#else /* doesn't HAVE_MPI */
-  g_assert_not_reached ();
-#endif
 }
 
 static void setup_binary_IO (GfsDomain * domain)
diff --git a/src/init.c b/src/init.c
index f07cbc9..b0064a8 100644
--- a/src/init.c
+++ b/src/init.c
@@ -28,6 +28,7 @@
 #include <locale.h>
 
 #include "boundary.h"
+#include "mpi_boundary.h"
 #include "init.h"
 #include "refine.h"
 #include "output.h"
@@ -46,20 +47,20 @@
 
 #ifdef HAVE_MPI
 # include <mpi.h>
-# include "mpi_boundary.h"
 #endif /* HAVE_MPI */
 
 static void gfs_log (const gchar * log_domain,
 		     GLogLevelFlags log_level,
 		     const gchar * message)
 {
-  int rank = -1, type = 0;
+  int type = 0;
   gchar * pe;
   const gchar stype[][10] = {
     "ERROR", "CRITICAL", "WARNING", "MESSAGE", "INFO", "DEBUG"
   };
 
 #ifdef HAVE_MPI
+  int rank = -1;
   MPI_Comm_size (MPI_COMM_WORLD, &rank);
   if (rank > 1)
     MPI_Comm_rank (MPI_COMM_WORLD, &rank);
@@ -128,6 +129,7 @@ GtsObjectClass ** gfs_classes (void)
     gfs_boundary_outflow_class (),
     gfs_boundary_gradient_class (),
     gfs_boundary_periodic_class (),
+      gfs_boundary_mpi_class (),
 
   gfs_refine_class (),
     gfs_refine_solid_class (),
@@ -292,9 +294,6 @@ void gfs_init (int * argc, char *** argv)
 
   /* Instantiates classes before reading any domain or simulation file */
   gfs_classes ();
-#ifdef HAVE_MPI
-  gfs_boundary_mpi_class ();
-#endif /* HAVE_MPI */
 
   /* If modules are not supported, calls modules init functions */
 #include "modules.c"
diff --git a/src/mpi_boundary.c b/src/mpi_boundary.c
index 0bf07c2..e0063f5 100644
--- a/src/mpi_boundary.c
+++ b/src/mpi_boundary.c
@@ -22,8 +22,6 @@
 #include "mpi_boundary.h"
 #include "adaptive.h"
 
-/* #define DEBUG mpi_debug */
-
 static void boundary_mpi_write (GtsObject * o, FILE * fp)
 {
   (* GTS_OBJECT_CLASS (gfs_boundary_mpi_class ())->parent_class->write) (o, fp);
@@ -46,9 +44,9 @@ static void boundary_mpi_read (GtsObject ** o, GtsFile * fp)
     GFS_BOUNDARY_MPI (*o)->process = GFS_BOUNDARY_MPI (*o)->id = -1;
 }
 
-#ifdef DEBUG
-FILE * mpi_debug = NULL;
-#endif
+#ifdef HAVE_MPI
+
+/* #define DEBUG mpi_debug */
 
 static guint tag_shift = 32767/FTT_NEIGHBORS;
 
@@ -56,6 +54,10 @@ static guint tag_shift = 32767/FTT_NEIGHBORS;
 #define MATCHING_TAG(boundary)  (tag_shift*FTT_OPPOSITE_DIRECTION ((boundary)->d) +\
                                  GFS_BOUNDARY_MPI (boundary)->id)
 
+#ifdef DEBUG
+FILE * mpi_debug = NULL;
+#endif
+
 static void send (GfsBoundary * bb)
 {
   GfsBoundaryPeriodic * boundary = GFS_BOUNDARY_PERIODIC (bb);
@@ -209,22 +211,26 @@ static void synchronize (GfsBoundary * bb)
   (* gfs_boundary_periodic_class ()->synchronize) (bb);
 }
 
+#endif /* HAVE_MPI */
+
 static void gfs_boundary_mpi_class_init (GfsBoundaryClass * klass)
 {
   GTS_OBJECT_CLASS (klass)->read = boundary_mpi_read;
   GTS_OBJECT_CLASS (klass)->write = boundary_mpi_write;
+#ifdef HAVE_MPI
   klass->send        = send;
   klass->receive     = receive;
   klass->synchronize = synchronize;
+#endif /* HAVE_MPI */
 }
 
 static void gfs_boundary_mpi_init (GfsBoundaryMpi * boundary)
 {
-  boundary->comm = MPI_COMM_WORLD;
   boundary->process = -1; 
   boundary->id = -1;
-
+#ifdef HAVE_MPI
   boundary->nrequest = 0;
+  boundary->comm = MPI_COMM_WORLD;
 #ifdef DEBUG
   if (mpi_debug == NULL) {
     int rank;
@@ -234,6 +240,7 @@ static void gfs_boundary_mpi_init (GfsBoundaryMpi * boundary)
     g_free (fname);
   }
 #endif
+#endif /* HAVE_MPI */
 }
 
 GfsBoundaryClass * gfs_boundary_mpi_class (void)
@@ -250,16 +257,17 @@ GfsBoundaryClass * gfs_boundary_mpi_class (void)
       (GtsArgSetFunc) NULL,
       (GtsArgGetFunc) NULL
     };
-    int * tagub, flag, maxtag;
-
     klass = gts_object_class_new (GTS_OBJECT_CLASS (gfs_boundary_periodic_class ()),
 				  &gfs_boundary_mpi_info);
+#ifdef HAVE_MPI
+    int * tagub, flag, maxtag;
     MPI_Attr_get (MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag);
     if (flag)
       maxtag = *tagub;
     else
       maxtag = 32767; /* minimum value from MPI standard specification */
     tag_shift = maxtag/FTT_NEIGHBORS;
+#endif /* HAVE_MPI */
   }
 
   return klass;
@@ -272,19 +280,18 @@ GfsBoundaryMpi * gfs_boundary_mpi_new (GfsBoundaryClass * klass,
 				       gint id)
 {
   GfsBoundaryMpi * boundary;
+#ifdef HAVE_MPI
   int comm_size;
-
   MPI_Comm_size (MPI_COMM_WORLD, &comm_size);
-
   g_return_val_if_fail (process >= 0 && process < comm_size, NULL);
 
-  boundary = GFS_BOUNDARY_MPI (gfs_boundary_periodic_new (klass, box, d, NULL));
-  boundary->process = process;
-  boundary->id = id;
-
   if (id >= tag_shift)
     g_warning ("GfsBoundaryMpi id (%d) is larger than the maximum MPI tag value\n"
 	       "allowed on this system (%d)", id, tag_shift);
+#endif /* HAVE_MPI */
+  boundary = GFS_BOUNDARY_MPI (gfs_boundary_periodic_new (klass, box, d, NULL));
+  boundary->process = process;
+  boundary->id = id;
 
   return boundary;
 }
diff --git a/src/mpi_boundary.h b/src/mpi_boundary.h
index 737aef5..837f89d 100644
--- a/src/mpi_boundary.h
+++ b/src/mpi_boundary.h
@@ -20,7 +20,6 @@
 #ifndef __MPI_BOUNDARY_H__
 #define __MPI_BOUNDARY_H__
 
-#include <mpi.h>
 #include "boundary.h"
 
 #ifdef __cplusplus
@@ -29,17 +28,26 @@ extern "C" {
 
 typedef struct _GfsBoundaryMpi         GfsBoundaryMpi;
 
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#  ifdef HAVE_MPI
+#    include <mpi.h>
+#  endif
+
 struct _GfsBoundaryMpi {
   /*< private >*/
   GfsBoundaryPeriodic parent;
-
-  MPI_Comm comm;
   gint process, id;
 
+#ifdef HAVE_MPI
+  MPI_Comm comm;
   MPI_Request request[2];
   guint nrequest;
+#endif /* HAVE_MPI */
 };
 
+#endif /* HAVE_CONFIG_H */
+
 #define GFS_BOUNDARY_MPI(obj)            GTS_OBJECT_CAST (obj,\
 					           GfsBoundaryMpi,\
 					           gfs_boundary_mpi_class ())
diff --git a/src/poisson.c b/src/poisson.c
index d03b68c..fe749c5 100644
--- a/src/poisson.c
+++ b/src/poisson.c
@@ -23,11 +23,6 @@
 #include "source.h"
 #include "tension.h"
 
-#include "config.h"
-#ifdef HAVE_MPI
-#  include "mpi_boundary.h"
-#endif
-
 /**
  * gfs_multilevel_params_write:
  * @par: the multilevel parameters.

-- 
Gerris Flow Solver



More information about the debian-science-commits mailing list