1 /* CalculiX - A 3-dimensional finite element program */
2 /* Copyright (C) 1998-2021 Guido Dhondt */
3
4 /* This program is free software; you can redistribute it and/or */
5 /* modify it under the terms of the GNU General Public License as */
6 /* published by the Free Software Foundation(version 2); */
7 /* */
8
9 /* This program is distributed in the hope that it will be useful, */
10 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
11 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
12 /* GNU General Public License for more details. */
13
14 /* You should have received a copy of the GNU General Public License */
15 /* along with this program; if not, write to the Free Software */
16 /* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
17
18 #include <unistd.h>
19 #include <stdio.h>
20 #include <math.h>
21 #include <stdlib.h>
22 #include <pthread.h>
23 #include "CalculiX.h"
24
25 static char *lakonf1;
26
27 static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*nzs1,
28 *ielfa1,*ifabou1,*nbody1,*neq1,*nactdohinv1,*iau61,*iturbulent1;
29
30 static double *au1,*ad1,*b1,*vfa1,*xxn1,*area1,*vel1,
31 *umel1,*alet1,*ale1,*gradtfa1,*xxi1,*body1,*volume1,*dtimef1,*velo1,
32 *veloo1,*cvfa1,*hcfa1,*cvel1,*gradvel1,*xload1,*gamma1,*xrlfa1,
33 *xxj1,*a11,*a21,*a31,*flux1,*xxni1,*xxnj1,*of21,*sc1;
34
mafilltcompmain(ITG * nef,ITG * ipnei,ITG * neifa,ITG * neiel,double * vfa,double * xxn,double * area,double * au,double * ad,ITG * jq,ITG * irow,ITG * nzs,double * b,double * vel,double * umel,double * alet,double * ale,double * gradtfa,double * xxi,double * body,double * volume,ITG * ielfa,char * lakonf,ITG * ifabou,ITG * nbody,ITG * neq,double * dtimef,double * velo,double * veloo,double * cvfa,double * hcfa,double * cvel,double * gradvel,double * xload,double * gamma,double * xrlfa,double * xxj,ITG * nactdohinv,double * a1,double * a2,double * a3,double * flux,ITG * iau6,double * xxni,double * xxnj,ITG * iturbulent,double * of2,double * sc)35 void mafilltcompmain(ITG *nef,ITG *ipnei,ITG *neifa,
36 ITG *neiel,double *vfa,double *xxn,double *area,
37 double *au,double *ad,ITG *jq,ITG *irow,ITG *nzs,
38 double *b,double *vel,double *umel,double *alet,
39 double *ale,double *gradtfa,double *xxi,double *body,
40 double *volume,ITG *ielfa,char *lakonf,
41 ITG *ifabou,ITG *nbody,ITG *neq,double *dtimef,double *velo,
42 double *veloo,double *cvfa,double *hcfa,double *cvel,
43 double *gradvel,double *xload,double *gamma,double *xrlfa,
44 double *xxj,ITG *nactdohinv,double *a1,double *a2,double *a3,
45 double *flux,ITG *iau6,double *xxni,double *xxnj,
46 ITG *iturbulent,double *of2,double *sc){
47
48 ITG i,j;
49
50 /* variables for multithreading procedure */
51
52 ITG sys_cpus,*ithread=NULL;
53 char *env,*envloc,*envsys;
54
55 num_cpus = 0;
56 sys_cpus=0;
57
58 /* explicit user declaration prevails */
59
60 envsys=getenv("NUMBER_OF_CPUS");
61 if(envsys){
62 sys_cpus=atoi(envsys);
63 if(sys_cpus<0) sys_cpus=0;
64 }
65
66 /* automatic detection of available number of processors */
67
68 if(sys_cpus==0){
69 sys_cpus = getSystemCPUs();
70 if(sys_cpus<1) sys_cpus=1;
71 }
72
73 /* local declaration prevails, if strictly positive */
74
75 envloc = getenv("CCX_NPROC_CFD");
76 if(envloc){
77 num_cpus=atoi(envloc);
78 if(num_cpus<0){
79 num_cpus=0;
80 }else if(num_cpus>sys_cpus){
81 num_cpus=sys_cpus;
82 }
83
84 }
85
86 /* else global declaration, if any, applies */
87
88 env = getenv("OMP_NUM_THREADS");
89 if(num_cpus==0){
90 if (env)
91 num_cpus = atoi(env);
92 if (num_cpus < 1) {
93 num_cpus=1;
94 }else if(num_cpus>sys_cpus){
95 num_cpus=sys_cpus;
96 }
97 }
98
99 // next line is to be inserted in a similar way for all other paralell parts
100
101 if(*nef<num_cpus) num_cpus=*nef;
102
103 pthread_t tid[num_cpus];
104
105 /* calculating the stiffness and/or mass matrix
106 (symmetric part) */
107
108 nef1=nef;ipnei1=ipnei;neifa1=neifa;neiel1=neiel;vfa1=vfa;xxn1=xxn;
109 area1=area;
110 jq1=jq;irow1=irow;nzs1=nzs;vel1=vel;umel1=umel;alet1=alet;ale1=ale;
111 gradtfa1=gradtfa;xxi1=xxi;body1=body;volume1=volume;
112 ielfa1=ielfa;lakonf1=lakonf;ifabou1=ifabou;
113 nbody1=nbody;neq1=neq;dtimef1=dtimef;velo1=velo;veloo1=veloo;
114 cvfa1=cvfa;hcfa1=hcfa;cvel1=cvel;gradvel1=gradvel;xload1=xload;
115 gamma1=gamma;xrlfa1=xrlfa;xxj1=xxj;nactdohinv1=nactdohinv;a11=a1;
116 a21=a2;a31=a3;flux1=flux;iau61=iau6;ad1=ad;au1=au;b1=b;xxni1=xxni;
117 xxnj1=xxnj;iturbulent1=iturbulent;of21=of2;sc1=sc;
118
119 /* create threads and wait */
120
121 NNEW(ithread,ITG,num_cpus);
122 for(i=0; i<num_cpus; i++) {
123 ithread[i]=i;
124 pthread_create(&tid[i], NULL, (void *)mafilltcompmt, (void *)&ithread[i]);
125 }
126 for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL);
127
128 SFREE(ithread);
129
130 return;
131
132 }
133
134 /* subroutine for multithreading of mafilltcomp */
135
mafilltcompmt(ITG * i)136 void *mafilltcompmt(ITG *i){
137
138 ITG nefa,nefb,nefdelta;
139
140 // ceil -> floor
141
142 nefdelta=(ITG)floor(*nef1/(double)num_cpus);
143 nefa=*i*nefdelta+1;
144 nefb=(*i+1)*nefdelta;
145 // next line! -> all parallel sections
146 if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1;
147
148 FORTRAN(mafilltcomp,(nef1,ipnei1,neifa1,neiel1,vfa1,xxn1,area1,
149 au1,ad1,jq1,irow1,nzs1,
150 b1,vel1,umel1,alet1,ale1,gradtfa1,xxi1,
151 body1,volume1,ielfa1,lakonf1,ifabou1,
152 nbody1,neq1,dtimef1,velo1,veloo1,cvfa1,hcfa1,cvel1,
153 gradvel1,xload1,gamma1,xrlfa1,xxj1,nactdohinv1,
154 a11,a21,a31,flux1,&nefa,&nefb,iau61,xxni1,xxnj1,
155 iturbulent1,of21,sc1));
156
157 return NULL;
158 }
159