Statistics
| Branch: | Revision:

root / synthbench / euroben-dm / mod2cr / mpimod.f @ 0:839f52ef7657

History | View | Annotate | Download (5.1 kB)

1
      Module mpi_module
2
! ---------------------------------------------------------------------
3
! --- This module contains information for communication and data 
4
!     distribution.
5
! ---------------------------------------------------------------------
6
      Include               'mpif.h'
7

    
8
      Integer            :: me, nodes
9
      Integer, Parameter :: maxnod = 2048
10
      Integer            :: offset(0:maxnod-1), sizes(0:maxnod-1)
11
      Integer            :: glb, gub, la1, ua1, la2, ua2, la3, ua3,
12
     &                                lb1, ub1, lb2, ub2, lb3, ub3,
13
     &                                lb
14
      Integer            :: comm, ierr, ityp, rtyp
15
! ---------------------------------------------------------------------
16
      Contains
17

    
18
      Subroutine bsaddr
19
! ----------------------------------------------------------------------
20
! --- Routine 'bsaddr' calculates the offsets that occur when local
21
!     data must be copied into a global one that combines all local
22
!     data. As the sizes of the local arrays need not be the same,
23
!     offsets cannot be generated with local knowledge only.
24
!
25
! --- Input:  - Integer nodes              (via Module mpi_module)
26
!             - Integer sizes(0:maxnod-1)  (via Module mpi_module)
27
! --- Output: - Integer offset(0:maxnod-1) (via Module mpi_module)
28
! ----------------------------------------------------------------------
29
      Implicit   None
30

    
31
      Integer :: i
32
! ----------------------------------------------------------------------
33
      offset(0) = 0
34
      Do i = 1, nodes - 1
35
         offset(i) = offset(i-1) + sizes(i-1)
36
      End Do
37
      glb = offset(me)
38
      lb  = glb + 1
39
      If ( me < nodes - 1 ) Then
40
         gub = offset(me+1)
41
      Else
42
         gub = offset(me) + sizes(me)
43
      End If
44
! ----------------------------------------------------------------------
45
      End Subroutine bsaddr
46

    
47
      Subroutine mpibye
48
! ----------------------------------------------------------------------
49
      Call MPI_Finalize( ierr )
50
! ----------------------------------------------------------------------
51
      End Subroutine mpibye
52

    
53
      Subroutine csetup
54
! ----------------------------------------------------------------------
55
! --- 'csetup' initializes the communication for MPI programs.
56
!
57
! --- Output parameters (in mpi_module):
58
!     Integer: me    --- Local node number (0 <= me <= nodes-1).
59
!     Integer: nodes --- Number of nodes as found in the MPI system.
60
! ----------------------------------------------------------------------
61
      Integer :: ierr
62
! ----------------------------------------------------------------------
63
! --- Initialize MPI also, initialize this processor and the set of
64
!     processors for this job. (In case of problems, report and stop).
65
! ----------------------------------------------------------------------
66

    
67
      Call Mpi_Init( ierr )
68
      If ( ierr /= Mpi_Success ) Then
69
         Print *, 'Could not initialize MPI, ierr = ', ierr
70
         Stop 'Csetup stage 1'
71
      End If
72
      Call Mpi_Comm_Rank( Mpi_Comm_World, me, ierr )
73
      If ( ierr /= Mpi_Success ) Then
74
         Print *, 'Could not find my process id, ierr = ', ierr
75
         Stop 'Csetup stage 2'
76
      End If
77
      Call Mpi_Comm_Size( Mpi_Comm_World, nodes, ierr )
78
      If ( ierr /= Mpi_Success ) Then
79
         Print *, 'Could not determine no. of nodes, ierr = ', ierr
80
         Stop 'Csetup stage 3'
81
      End If
82
! ----------------------------------------------------------------------
83
! --- Be sure that all processors start together.
84

    
85
      Call Mpi_Barrier( Mpi_Comm_World, ierr )
86
! ----------------------------------------------------------------------
87
      End Subroutine csetup
88

    
89

    
90
      Subroutine evdist( n )
91
! ---------------------------------------------------------------------
92
! --- Routine 'evdist' distributes 'n' elements as evenly as possible
93
!     over 'nodes' processors. The actual number of elements per
94
!     processor are returned in array 'sizes'.
95
! ---------------------------------------------------------------------
96
! --- Input:  - Integer n
97
!             - Integer nodes             (via Module mpi_module).
98
! --- Output: - Integer sizes(0:maxnod-1) (via Module mpi_module).
99
! ---------------------------------------------------------------------
100
      Implicit   None
101

    
102
      Integer :: n
103

    
104
      Integer :: i, non, nrest, rest
105
! ---------------------------------------------------------------------
106
      non   = n/nodes
107
      rest  = Mod( n, nodes )
108
      nrest = nodes - rest - 1
109
      Do i = 0, nodes - 1
110
         sizes(i) = non
111
         If ( i > nrest ) sizes(i) = non + 1
112
      End Do
113
! ---------------------------------------------------------------------
114
      End Subroutine evdist
115

    
116
      Subroutine mpistart
117
! ---------------------------------------------------------------------
118
      Call csetup
119
      Call setnames
120
! ---------------------------------------------------------------------
121
      End Subroutine mpistart
122

    
123
      Subroutine setnames
124
! ---------------------------------------------------------------------
125
      comm = MPI_Comm_World
126
      ityp = MPI_Integer8
127
      rtyp = MPI_Real8
128
! ---------------------------------------------------------------------
129
      End Subroutine
130

    
131
      End Module mpi_module