MODULE MOD_AI USE MOD_AI_PAR USE MOD_IDF_PAR, ONLY : IDFOBJ USE MOD_IDF, ONLY : IDFALLOCATEX,IDFNULLIFY,IDFWRITE,IDFGETLOC USE IMODVAR, ONLY : DP_KIND USE MOD_LUDCMP, ONLY : IPEST_NORMAL_MS_SAMPLE USE MOD_UTL, ONLY : VTOS,UTL_GETUNIT,UTL_IDFSNAPTONICEGRID,VTOS CONTAINS !##================================================================ SUBROUTINE MOD_NN_MAIN(FNAME) !##================================================================ IMPLICIT NONE CHARACTER(LEN=*),INTENT(IN) :: FNAME FNAMEIN=FNAME CALL MOD_NN_INITIALISE() CALL MOD_NN_PREPDATA() !CALL MOD_NN_RANDOM_SEQUENCE() CALL MOD_NN_TRAIN() CLOSE(IU) END SUBROUTINE MOD_NN_MAIN !##================================================================ SUBROUTINE MOD_NN_TRAIN() !##================================================================ IMPLICIT NONE INTEGER :: I,J,ITER,IS,IX,IY,LDIM1,LDIM2 PERFORMANCE=0.0 DO ITER=1,NUM_TRAINING CALL MOD_NN_INIT_ITER(ITER) !## process each sample DO IS=1,NUM_SAMPLES ! !## available convolutional network ! IF(NUM_CNNLAYERS.GT.0)THEN ! !## forward prop the convolution network ! CALL MOD_CNN_FWDPROP(INPUTS_2D(IS,:,:,:)) ! I=0; DO J=1,NUM_CHANNELS; DO IX=1,CNNLAYER(NUM_CNNLAYERS)%WIDTH; DO IY=1,CNNLAYER(NUM_CNNLAYERS)%HEIGHT ! I=I+1; INPUTS(IS,I)=CNNLAYER(NUM_CNNLAYERS)%OUTPUT(J,IY,IX) ! ENDDO; ENDDO; ENDDO ! ENDIF !## forward propagation in full-connected neural network - input is activation from convolution network CALL MOD_NN_FWDPROP(INPUTS(IS,:)) !## get total error (loss-function) ERROR=ERROR+MOD_NN_LOSS(LAYER(NUM_LAYERS)%A,OUTPUTS(IS,:)) !## backward the full-connected neural network CALL MOD_NN_BACKPROP(INPUTS(IS,:),OUTPUTS(IS,:)) ! !## available convolutional network ! IF(NUM_CNNLAYERS.GT.0)THEN ! I=0; DO J=1,NUM_CHANNELS; DO IX=1,CNNLAYER(NUM_CNNLAYERS)%WIDTH; DO IY=1,CNNLAYER(NUM_CNNLAYERS)%HEIGHT ! I=I+1; CNNLAYER(NUM_CNNLAYERS)%GRADIENT(J,IY,IX)=LAYER(1)%DB(I) ! ENDDO; ENDDO; ENDDO ! !## backward prop the convolution network ! CALL MOD_CNN_BACKPROP(INPUTS_2D(IS,:,:,:)) ! ENDIF CALL MOD_NN_ACCUMULATE(IS) ENDDO WRITE(*,*) ITER,SUM(ERROR); PERFORMANCE(ITER)=SUM(ERROR) !## update after each sequence generated a fit CALL MOD_NN_UPDATE(ITER) ENDDO WRITE(IU,'(/A/)') 'ITERATION SEQUENCE' WRITE(IU,'(10(F15.7,1X))') (PERFORMANCE(ITER),ITER=1,NUM_TRAINING) CALL MOD_NN_WRITE_WEIGHTS_H_B('FINAL:') CALL MOD_NN_VALIDATE() END SUBROUTINE MOD_NN_TRAIN !##================================================================ SUBROUTINE MOD_NN_VALIDATE() !##================================================================ IMPLICIT NONE INTEGER :: IS,I CHARACTER(LEN=52) :: FRM REAL(KIND=DP_KIND),ALLOCATABLE,DIMENSION(:) :: X,Y ALLOCATE(X(SIZE(INPUTS,2)),Y(SIZE(OUTPUTS,2))) WRITE(IU,'(/A)') 'TRAINING' WRITE(FRM,'(A1,I5.5,A8)') '(',10*(NUM_INPUTS+NUM_OUTPUTS+1),'X,A20)' WRITE(IU,FRM) '<--- PREDICTION --->' WRITE(IU,'(99A10)') 'N',(L_INPUTS(I),I=1,NUM_INPUTS),(L_OUTPUTS(I),I=1,NUM_OUTPUTS),(L_OUTPUTS(I),I=1,NUM_OUTPUTS),('ERROR'//TRIM(VTOS(I)),I=1,NUM_OUTPUTS) DO IS=1,NUM_SAMPLES CALL MOD_NN_FWDPROP(INPUTS(IS,:)) INPUTS(IS,:)= (INPUTS(IS,:)-FINPUT)/SINPUT OUTPUTS(IS,:)=(OUTPUTS(IS,:)-FOUTPUT)/SOUTPUT LAYER(NUM_LAYERS)%A=(LAYER(NUM_LAYERS)%A-FOUTPUT)/SOUTPUT ERROR=MOD_NN_ERROR(LAYER(NUM_LAYERS)%A,OUTPUTS(IS,:)) DO I=1,SIZE(X); X(I)=INPUTS(IS,I); ENDDO DO I=1,SIZE(Y); Y(I)=OUTPUTS(IS,I); ENDDO WRITE(IU,'(A10,99F10.3)') 'ERROR'//TRIM(VTOS(IS)),X,Y,LAYER(NUM_LAYERS)%A,ERROR ENDDO DEALLOCATE(X,Y) IF(NUM_VALIDATION.GT.0)THEN ALLOCATE(X(SIZE(V_INPUTS,2)),Y(SIZE(V_OUTPUTS,2))) WRITE(IU,'(/A)') 'VALIDATION' WRITE(IU,FRM) '<--- PREDICTION --->' WRITE(IU,'(99A10)') 'N',(L_INPUTS(I),I=1,NUM_INPUTS),(L_OUTPUTS(I),I=1,NUM_OUTPUTS),(L_OUTPUTS(I),I=1,NUM_OUTPUTS),('ERROR'//TRIM(VTOS(I)),I=1,NUM_OUTPUTS) DO IS=1,NUM_VALIDATION CALL MOD_NN_FWDPROP(V_INPUTS(IS,:)) V_INPUTS(IS,:)= (V_INPUTS(IS,:)-FINPUT)/SINPUT V_OUTPUTS(IS,:)=(V_OUTPUTS(IS,:)-FOUTPUT)/SOUTPUT LAYER(NUM_LAYERS)%A=(LAYER(NUM_LAYERS)%A-FOUTPUT)/SOUTPUT ERROR=MOD_NN_ERROR(LAYER(NUM_LAYERS)%A,V_OUTPUTS(IS,:)) DO I=1,SIZE(X); X(I)=V_INPUTS(IS,I); ENDDO DO I=1,SIZE(Y); Y(I)=V_OUTPUTS(IS,I); ENDDO WRITE(IU,'(A10,99F10.3)') 'ERROR'//TRIM(VTOS(IS)),X,Y,LAYER(NUM_LAYERS)%A(:),ERROR ENDDO DEALLOCATE(X,Y) ENDIF END SUBROUTINE MOD_NN_VALIDATE !##================================================================ SUBROUTINE MOD_NN_OPTIMIZE(FCT,FCT0,ITER) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND) :: MU=0.0D0 INTEGER,INTENT(IN) :: ITER REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: FCT0 REAL(KIND=DP_KIND),INTENT(INOUT),DIMENSION(:) :: FCT INTEGER :: I,J,K,N,IS,IMINOBJF,NBOX,IU REAL(KIND=DP_KIND),ALLOCATABLE,DIMENSION(:) :: SFCT,PFCT REAL(KIND=DP_KIND),ALLOCATABLE,DIMENSION(:,:) :: X TYPE(IDFOBJ) :: X2D REAL(KIND=DP_KIND) :: OBJF,OBFR,DFCT,MINOBJF N=SIZE(FCT); ALLOCATE(SFCT(N),PFCT(N),X(0:N,2)) NBOX=100 IF(NBOX.GT.0)THEN DFCT=MAXVAL(INPUTS)-MINVAL(INPUTS) !## comute 2d matrix CALL IDFNULLIFY(X2D); X2D%NCOL=NBOX; X2D%NROW=NBOX X2D%XMIN=MINVAL(INPUTS(:,1))-DFCT/10.0D0 X2D%XMAX=MAXVAL(INPUTS(:,1))+DFCT/10.0D0 X2D%YMIN=MINVAL(INPUTS(:,2))-DFCT/10.0D0 X2D%YMAX=MAXVAL(INPUTS(:,2))+DFCT/10.0D0 X2D%DX=DFCT/100.0D0; X2D%DY=DFCT/100.D0 CALL UTL_IDFSNAPTONICEGRID(X2D%XMIN,X2D%XMAX,X2D%YMIN,X2D%YMAX,X2D%DX,X2D%NCOL,X2D%NROW) IF(.NOT.IDFALLOCATEX(X2D))THEN; ENDIF DO I=1,X2D%NROW DO J=1,X2D%NCOL CALL IDFGETLOC(X2D,I,J,SFCT(1),SFCT(2)) !## scale inputs SFCT=SFCT*SINPUT+FINPUT CALL MOD_NN_FWDPROP(SFCT) LAYER(NUM_LAYERS)%A=(LAYER(NUM_LAYERS)%A-FOUTPUT)/SOUTPUT OBJF=SUM(LAYER(NUM_LAYERS)%A) X2D%X(J,I)=OBJF ENDDO ENDDO X2D%FNAME='D:\TEXT_X2D_ITER'//TRIM(VTOS(ITER))//'.IDF' IF(.NOT.IDFWRITE(X2D,X2D%FNAME,1))THEN; PAUSE; STOP; ENDIF IU=UTL_GETUNIT(); OPEN(IU,FILE='D:\TEXT_X2D_ITER'//TRIM(VTOS(ITER))//'.IPF',STATUS='REPLACE',ACTION='WRITE') WRITE(IU,*) NUM_SAMPLES+1 WRITE(IU,*) '2' WRITE(IU,*) 'X' WRITE(IU,*) 'Y' WRITE(IU,*) '0,TXT' DO I=1,NUM_SAMPLES WRITE(IU,*) INPUTS(I,1),INPUTS(I,2) ENDDO ENDIF DFCT=(MAXVAL(INPUTS)-MINVAL(INPUTS))/2.0D0 !## set sfct to initial location SFCT=FCT DO !## scale inputs SFCT=SFCT*SINPUT+FINPUT !## compute neural network CALL MOD_NN_FWDPROP(SFCT); LAYER(NUM_LAYERS)%A=(LAYER(NUM_LAYERS)%A-FOUTPUT)/SOUTPUT !## rescale inputs SFCT=(SFCT-FINPUT)/SINPUT !## compute objective function value OBJF=SUM(LAYER(NUM_LAYERS)%A) !## add regularisation OBFR=0.0D0; DO K=1,N; OBFR=OBFR+(SFCT(K)-FCT0(K))**2.0D0; ENDDO !## save total objective function X(0,:)=OBJF+MU*OBFR; MINOBJF=X(0,1); IMINOBJF=0 !## compute square around current centre point DO I=1,N DO J=1,2 PFCT=SFCT IF(J.EQ.1)PFCT(I)=SFCT(I)-DFCT IF(J.EQ.2)PFCT(I)=SFCT(I)+DFCT !## make sure legs are within current neural-network area DO K=1,N PFCT(K)=MIN(MAX(MINVAL(INPUTS(:,K)),PFCT(K)),MAXVAL(INPUTS(:,K))) ENDDO !# scale inputs PFCT=PFCT*SINPUT+FINPUT !## compute neural network CALL MOD_NN_FWDPROP(PFCT); LAYER(NUM_LAYERS)%A=(LAYER(NUM_LAYERS)%A-FOUTPUT)/SOUTPUT !## rescale inputs PFCT=(PFCT-FINPUT)/SINPUT !## compute objective function value OBJF=SUM(LAYER(NUM_LAYERS)%A) !## add regularisation OBFR=0.0D0; DO K=1,N; OBFR=OBFR+(PFCT(K)-FCT0(K))**2.0D0; ENDDO !## save total objective function X(I,J)=OBJF+MU*OBFR !## get lowest objective function value IF(X(I,J).LT.MINOBJF)THEN MINOBJF =X(I,J) IMINOBJF=-I; IF(J.EQ.2)IMINOBJF=I ENDIF ENDDO ENDDO !## centre is lowest - reduce dfct IF(IMINOBJF.EQ.0)THEN DFCT=DFCT/2.0D0 !## found minimal value IF(DFCT.LE.0.1D-3)EXIT ELSE IF(IMINOBJF.LT.0)THEN SFCT(ABS(IMINOBJF))=SFCT(ABS(IMINOBJF))-DFCT ELSE SFCT( IMINOBJF )=SFCT( IMINOBJF )+DFCT ENDIF ENDIF !## make sure new position is within current neural-network area DO K=1,N SFCT(K)=MIN(MAX(MINVAL(INPUTS(:,K)),SFCT(K)),MAXVAL(INPUTS(:,K))) ENDDO WRITE(*,*) SFCT,MINOBJF ENDDO FCT=SFCT IF(NBOX.GT.0)THEN WRITE(IU,*) FCT(1),FCT(2) CLOSE(IU) ENDIF END SUBROUTINE MOD_NN_OPTIMIZE !##================================================================ SUBROUTINE MOD_NN_INIT_ITER(ITER) !##================================================================ IMPLICIT NONE INTEGER,INTENT(IN) :: ITER INTEGER :: N !## total error ERROR=0.0 DO N=1,NUM_LAYERS IF(N.LT.NUM_LAYERS)THEN LAYER(N)%TW=0.0; IF(ITER.EQ.1)THEN; LAYER(N)%PW=0.0; LAYER(N)%SW=0.0; ENDIF ENDIF IF(N.GT.1)THEN LAYER(N)%TB=0.0; IF(ITER.EQ.1)THEN; LAYER(N)%PB=0.0; LAYER(N)%SB=0.0; ENDIF ENDIF ENDDO ! DO N=1,NUM_CNNLAYERS ! CNNLAYER(N)%TKERNEL=0.0 ! CNNLAYER(N)%TBIASES=0.0 ! ENDDO END SUBROUTINE MOD_NN_INIT_ITER !##================================================================ SUBROUTINE MOD_NN_ACCUMULATE(IS) !##================================================================ IMPLICIT NONE INTEGER,INTENT(IN) :: IS INTEGER :: N ! DO N=1,NUM_CNNLAYERS ! CNNLAYER(N)%TKERNEL=CNNLAYER(N)%TKERNEL+CNNLAYER(N)%DW ! CNNLAYER(N)%TBIASES=CNNLAYER(N)%TBIASES+CNNLAYER(N)%DB ! ENDDO DO N=1,NUM_LAYERS IF(N.LT.NUM_LAYERS)THEN LAYER(N)%TW=LAYER(N)%TW+LAYER(N)%DW ENDIF IF(N.GT.1 )THEN LAYER(N)%TB=LAYER(N)%TB+LAYER(N)%DB ENDIF ENDDO END SUBROUTINE MOD_NN_ACCUMULATE !##================================================================ SUBROUTINE MOD_NN_UPDATE(ITER) !##================================================================ IMPLICIT NONE INTEGER,INTENT(IN) :: ITER INTEGER :: N,I,J REAL(KIND=DP_KIND) :: F,FL,FI FL=1.0D0/REAL(NUM_SAMPLES) FI=1.0D0/REAL(ITER) DO N=1,NUM_LAYERS IF(N.LT.NUM_LAYERS)THEN !## apply nesterov accelerated gradient descent IF(ITER.GT.1)THEN !## parameter update adaptive subgradient (adagrad) DO I=1,SIZE(LAYER(N)%W,1); DO J=1,SIZE(LAYER(N)%W,2) F=LEARNING_RATE/SQRT(FI*LAYER(N)%SW(I,J)+EPSILON) LAYER(N)%TW(I,J)=F*LAYER(N)%TW(I,J) ENDDO; ENDDO LAYER(N)%TW=MOMENTUM*LAYER(N)%PW+LAYER(N)%TW ENDIF LAYER(N)%W =LAYER(N)%W-FL*LAYER(N)%TW LAYER(N)%PW=FL*LAYER(N)%TW LAYER(N)%SW=LAYER(N)%SW+LAYER(N)%TW**2.0 ENDIF IF(N.GT.1)THEN !## apply nesterov accelerated gradient descent IF(ITER.GT.1)THEN !## parameter update adaptive subgradient (adagrad) DO I=1,SIZE(LAYER(N)%B,1) F=LEARNING_RATE/SQRT(FI*LAYER(N)%SB(I)+EPSILON) LAYER(N)%TB(I)=F*LAYER(N)%TB(I) ENDDO LAYER(N)%TB=MOMENTUM*LAYER(N)%PB+LAYER(N)%TB ENDIF LAYER(N)%B =LAYER(N)%B-FL*LAYER(N)%TB LAYER(N)%PB=FL*LAYER(N)%TB LAYER(N)%SB=LAYER(N)%SB+LAYER(N)%TB**2.0 ENDIF ENDDO ! DO N=1,NUM_CNNLAYERS ! CNNLAYER(N)%KERNEL=CNNLAYER(N)%KERNEL-FL*CNNLAYER(N)%TKERNEL ! CNNLAYER(N)%BIASES=CNNLAYER(N)%BIASES-FL*CNNLAYER(N)%TBIASES ! ENDDO END SUBROUTINE MOD_NN_UPDATE !##================================================================ SUBROUTINE MOD_NN_FWDPROP(X) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X INTEGER :: I !## set input value in first layer LAYER(1)%A=X DO I=2,NUM_LAYERS LAYER(I)%Z=MATMUL(TRANSPOSE(LAYER(I-1)%W),LAYER(I-1)%A)+LAYER(I)%B LAYER(I)%A=MOD_NN_ACTIVATION(LAYER(I)%Z,LAYER(I)%ACTIVATION) ENDDO END SUBROUTINE MOD_NN_FWDPROP ! !##================================================================ ! SUBROUTINE MOD_CNN_FWDPROP(INPUT) ! !##================================================================ ! IMPLICIT NONE ! REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:,:,:) :: INPUT ! INTEGER :: I ! ! DO I=1,NUM_CNNLAYERS ! IF(I.EQ.1)THEN ! CALL MOD_CNN_FWDPROP_COMPUTE(I,INPUT) ! ELSE ! CALL MOD_CNN_FWDPROP_COMPUTE(I,CNNLAYER(I-1)%OUTPUT) ! ENDIF ! ENDDO ! ! END SUBROUTINE MOD_CNN_FWDPROP ! ! !##================================================================ ! SUBROUTINE MOD_CNN_FWDPROP_COMPUTE(ICNN,INPUT) ! !##================================================================ ! IMPLICIT NONE ! INTEGER,INTENT(IN) :: ICNN ! REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:,:,:) :: INPUT ! INTEGER :: INPUT_WIDTH,INPUT_HEIGHT,INPUT_CHANNELS ! INTEGER :: ISTART,IEND,JSTART,JEND,I,J,N,IWS,IWE,JWS,JWE,HALF_WINDOW ! ! !## input dimensions are channels x width x height ! INPUT_CHANNELS=SIZE(INPUT,DIM=1) ! INPUT_WIDTH =SIZE(INPUT,DIM=2) ! INPUT_HEIGHT =SIZE(INPUT,DIM=3) ! ! !## half-window is 1 for window size 3; 2 for window size 5; etc. ! HALF_WINDOW=CNNLAYER(ICNN)%KERNEL_SIZE/2 ! ! !## add a stride if needed ##! ! ! !## determine the start and end indices for the width and height dimensions ! !## of the input that correspond to the center of each window. ! ISTART=HALF_WINDOW+ 1 ! JSTART=HALF_WINDOW+ 1 ! IEND =INPUT_WIDTH- ISTART+1 ! JEND =INPUT_HEIGHT-JSTART+1 ! ! !CONVOLUTION: DO CONCURRENT(I = ISTART:IEND, J = JSTART:JEND) ! DO I=ISTART,IEND; DO J=JSTART,JEND ! ! !## start and end indices of the input data on the filter window ! !## iws and jws are also coincidentally the indices of the output matrix ! IWS=I-HALF_WINDOW; IWE=I+HALF_WINDOW ! JWS=J-HALF_WINDOW; JWE=J+HALF_WINDOW ! ! !## compute the inner tensor product, sum(w_ij * x_ij), for each filter. !! DO CONCURRENT(N = 1:CNNLAYER(ICNN)%FILTERS) ! DO N=1,CNNLAYER(ICNN)%FILTERS ! CNNLAYER(ICNN)%Z(N,IWS,JWS)=SUM(CNNLAYER(ICNN)%KERNEL(N,:,:,:)*INPUT(:,IWS:IWE,JWS:JWE)) ! END DO ! ! !## add bias to the inner product. ! CNNLAYER(ICNN)%Z(:,IWS,JWS)=CNNLAYER(ICNN)%Z(:,IWS,JWS)+CNNLAYER(ICNN)%BIASES ! ! !# activate ! CNNLAYER(ICNN)%OUTPUT(:,IWS,JWS)=MOD_NN_ACTIVATION(CNNLAYER(ICNN)%Z(:,IWS,JWS),CNNLAYER(ICNN)%ACTIVATION) ! ! ENDDO; ENDDO ! END DO CONVOLUTION ! !! !# activate !! CNNLAYER(ICNN)%OUTPUT=MOD_NN_ACTIVATION(CNNLAYER(ICNN)%Z,CNNLAYER(ICNN)%ACTIVATION) ! ! END SUBROUTINE MOD_CNN_FWDPROP_COMPUTE !##================================================================ SUBROUTINE MOD_NN_BACKPROP(X,Y) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X,Y INTEGER :: I,N N=NUM_LAYERS LAYER(N)%DB =MOD_NN_LOSSDERIVATIVE(LAYER(N)%A,Y)*MOD_NN_ACTIVATIONDERIVATIVE(LAYER(N)%A,LAYER(N)%ACTIVATION) LAYER(N-1)%DW=MATMUL(RESHAPE(LAYER(N-1)%A,[LAYER(N-1)%NUM_NEURONS,1]),RESHAPE(LAYER(N)%DB,[1,LAYER(N)%NUM_NEURONS])) DO N=NUM_LAYERS-1,2,-1 LAYER(N)%DB =MATMUL(LAYER(N)%W,LAYER(N+1)%DB)*MOD_NN_ACTIVATIONDERIVATIVE(LAYER(N)%A,LAYER(N)%ACTIVATION) LAYER(N-1)%DW=MATMUL(RESHAPE(LAYER(N-1)%A,[LAYER(N-1)%NUM_NEURONS,1]),RESHAPE(LAYER(N)%DB,[1,LAYER(N)%NUM_NEURONS])) ENDDO ! IF(NUM_CNNLAYERS.GT.0)THEN ! LAYER(1)%DB=MATMUL(LAYER(1)%W,LAYER(2)%DB)*MOD_NN_ACTIVATIONDERIVATIVE(X,CNNLAYER(NUM_CNNLAYERS)%ACTIVATION) ! ENDIF END SUBROUTINE MOD_NN_BACKPROP ! !##================================================================ ! SUBROUTINE MOD_CNN_BACKPROP(INPUT) ! !##================================================================ ! IMPLICIT NONE ! REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:,:,:) :: INPUT ! INTEGER :: I ! ! DO I=NUM_CNNLAYERS,1,-1 ! CALL MOD_CNN_BACKPROP_COMPUTE(I,INPUT,CNNLAYER(I)%GRADIENT) ! ENDDO ! ! END SUBROUTINE MOD_CNN_BACKPROP ! ! !##================================================================ ! SUBROUTINE MOD_CNN_BACKPROP_COMPUTE(ICNN,INPUT,GRADIENT) ! !##================================================================ ! IMPLICIT NONE ! INTEGER,INTENT(IN) :: ICNN ! REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:,:,:) :: INPUT,GRADIENT ! INTEGER :: INPUT_WIDTH,INPUT_HEIGHT,INPUT_CHANNELS,INPUT_FILTERS,INPUT_KERNEL ! INTEGER :: ISTART,IEND,JSTART,JEND,I,J,K,N,IWS,IWE,JWS,JWE,HALF_WINDOW,II,JJ,IW,IH ! REAL(KIND=DP_KIND),DIMENSION(:,:,:,:),ALLOCATABLE :: DW ! REAL(KIND=DP_KIND),DIMENSION(:,:,:),ALLOCATABLE :: GDZ ! REAL(KIND=DP_KIND),DIMENSION(:),ALLOCATABLE :: DB ! ! !## input dimensions are channels x width x height ! INPUT_CHANNELS=SIZE(INPUT,DIM=1) ! INPUT_WIDTH =SIZE(INPUT,DIM=2) ! INPUT_HEIGHT =SIZE(INPUT,DIM=3) ! INPUT_FILTERS =CNNLAYER(ICNN)%FILTERS ! INPUT_KERNEL = CNNLAYER(ICNN)%KERNEL_SIZE ! ! !## half-window is 1 for window size 3; 2 for window size 5; etc. ! HALF_WINDOW=CNNLAYER(ICNN)%KERNEL_SIZE/2 ! ! !## add a stride if needed ##! ! ! !## determine the start and end indices for the width and height dimensions ! !## of the input that correspond to the center of each window. ! ISTART=HALF_WINDOW+ 1 ! JSTART=HALF_WINDOW+ 1 ! IEND =INPUT_WIDTH- ISTART+1 ! JEND =INPUT_HEIGHT-JSTART+1 ! ! ALLOCATE(DB(INPUT_FILTERS)) ; DB=0.0 ! ALLOCATE(DW(INPUT_FILTERS,NUM_CHANNELS,INPUT_KERNEL,INPUT_KERNEL)); DW=0.0 ! ALLOCATE(GDZ(INPUT_FILTERS,INPUT_WIDTH,INPUT_HEIGHT)) ; GDZ=0.0 ! ! DO IWS=ISTART,IEND ! DO JWS=JSTART,JEND ! GDZ(:,IWS,JWS)=GRADIENT(:,IWS,JWS)*MOD_NN_ACTIVATIONDERIVATIVE(CNNLAYER(ICNN)%Z(:,IWS,JWS),CNNLAYER(ICNN)%ACTIVATION) ! ENDDO ! ENDDO ! ! !## dl/db = sum(dl/dy * sigma'(z)) ! DO N=1,CNNLAYER(ICNN)%FILTERS ! DB(N)=SUM(GDZ(N,:,:)) ! END DO ! ! DW=0.0; CNNLAYER(ICNN)%GRADIENT=0.0 ! ! DO N=1,CNNLAYER(ICNN)%FILTERS ! DO K=1,CNNLAYER(ICNN)%CHANNELS ! DO I=ISTART,IEND ! DO J=JSTART,JEND ! ! !## start and end indices of the input data on the filter window ! IWS=I-HALF_WINDOW; IWE=I+HALF_WINDOW ! JWS=J-HALF_WINDOW; JWE=J+HALF_WINDOW ! ! !## dl/dw = sum(dl/dy * sigma'(z) * x) !! II=0; DO IW=IWS,IWE; II=II+1; JJ=0; DO IH=JWS,JWE !! JJ=JJ+1 !! DW(N,K,II,JJ)=DW(N,K,II,JJ)+INPUT(K,IW,IH)*GDZ(N,IW,IH) !! ENDDO; ENDDO ! DW(N,K,:,:)=DW(N,K,:,:)+INPUT(K,IWS:IWE,JWS:JWE)*GDZ(N,IWS:IWE,JWS:JWE) ! ! !## dl/dx = dl/dy * sigma'(z) .inner. w ! CNNLAYER(ICNN)%GRADIENT(K,I,J)=CNNLAYER(ICNN)%GRADIENT(K,I,J) & ! + SUM(GDZ(N,IWS:IWE,JWS:JWE)*CNNLAYER(ICNN)%KERNEL(N,K,:,:)) ! ! ENDDO ! ENDDO ! ENDDO ! ENDDO ! ! CNNLAYER(ICNN)%DW=CNNLAYER(ICNN)%DW+DW ! CNNLAYER(ICNN)%DB=CNNLAYER(ICNN)%DB+DB ! ! DEALLOCATE(DW,DB,GDZ) ! ! END SUBROUTINE MOD_CNN_BACKPROP_COMPUTE !##================================================================ FUNCTION MOD_NN_ACTIVATION(X,ACTIVATION) RESULT(RES) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X CHARACTER(LEN=*),INTENT(IN) :: ACTIVATION REAL(KIND=DP_KIND),DIMENSION(SIZE(X)) :: RES INTEGER :: I SELECT CASE (TRIM(ACTIVATION)) CASE ('SIGMOID'); RES= 1.0D0/(1.0D0+EXP(-X)) CASE ('TANH'); RES=(2.0D0/(1.0D0+EXP(-X)))-1.0D0 CASE ('RELU'); RES=MAX(0.0D0,X) CASE ('LRELU'); DO I=1,SIZE(X); IF(X(I).GT.0.0D0)THEN; RES(I)=1.0D0; ELSE; RES(I)=X(I)*ALPHA; ENDIF; ENDDO CASE DEFAULT; WRITE(*,'(/A/)') '>>> CANNOT RECOGNIZE ACTIVATION ['//TRIM(ACTIVATION)//'] <<<'; PAUSE; STOP END SELECT END FUNCTION MOD_NN_ACTIVATION !##================================================================ FUNCTION MOD_NN_ACTIVATIONDERIVATIVE(X,ACTIVATION) RESULT(RES) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X CHARACTER(LEN=*),INTENT(IN) :: ACTIVATION REAL(KIND=DP_KIND),DIMENSION(SIZE(X)) :: RES INTEGER :: I SELECT CASE (TRIM(ACTIVATION)) CASE ('SIGMOID'); RES=X*(1.0D0-X) CASE ('TANH') ; RES=1.0D0-TANH(X)**2.0D0 CASE ('RELU'); RES=0.0D0; DO I=1,SIZE(X); IF(X(I).GT.0.0D0)RES(I)=1.0D0; ENDDO CASE ('LRELU'); DO I=1,SIZE(X); IF(X(I).GT.0.0D0)THEN; RES(I)=1.0D0; ELSE; RES(I)=ALPHA; ENDIF; ENDDO CASE DEFAULT; WRITE(*,'(/A/)') '>>> CANNOT RECOGNIZE ACTIVATION ['//TRIM(ACTIVATION)//'] <<<'; PAUSE; STOP END SELECT END FUNCTION MOD_NN_ACTIVATIONDERIVATIVE !##================================================================ FUNCTION MOD_NN_ERROR(X,Y) RESULT(RES) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X,Y REAL(KIND=DP_KIND),DIMENSION(SIZE(X)) :: RES RES=X-Y END FUNCTION MOD_NN_ERROR !##================================================================ FUNCTION MOD_NN_LOSS(X,Y) RESULT(RES) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X,Y REAL(KIND=DP_KIND),DIMENSION(SIZE(X)) :: RES RES=(X-Y)**2.0D0/2.0D0 END FUNCTION MOD_NN_LOSS !##================================================================ FUNCTION MOD_NN_LOSSDERIVATIVE(X,Y) RESULT(RES) !##================================================================ IMPLICIT NONE REAL(KIND=DP_KIND),INTENT(IN),DIMENSION(:) :: X,Y REAL(KIND=DP_KIND),DIMENSION(SIZE(X)) :: RES RES=-2.0D0*(Y-X) END FUNCTION MOD_NN_LOSSDERIVATIVE !##================================================================ SUBROUTINE MOD_NN_PREPDATA() !##================================================================ IMPLICIT NONE INTEGER :: IS,I,IX,IY,J REAL(KIND=DP_KIND) :: DMIN,DMAX WRITE(IU,'(/50A1)') ('=',I=1,50) WRITE(IU,'( A )') 'DATA PREPARATION' WRITE(IU,'( 50A1)') ('=',I=1,50) WRITE(IU,'(3A10)') 'VARIABLE','SCALING','SHIFTING' ! !## shift per channel ! IF(NUM_CNNLAYERS.GT.0)THEN ! FINPUT=0.0; DO I=1,NUM_CHANNELS ! DO J=1,NUM_SAMPLES ! DO IY=1,NY_INPUTS ! DO IX=1,NX_INPUTS ! FINPUT(I)=FINPUT(I)+INPUTS_2D(J,I,IY,IX) ! ENDDO ! ENDDO ! ENDDO ! FINPUT(I)=FINPUT(I)/(NY_INPUTS*NX_INPUTS*NUM_SAMPLES) ! DO J=1,NUM_SAMPLES ! DO IY=1,NY_INPUTS ! DO IX=1,NX_INPUTS ! INPUTS_2D(J,I,IY,IX)=INPUTS_2D(J,I,IY,IX)-FINPUT(I) ! ENDDO ! ENDDO ! ENDDO ! ENDDO ! ELSE !## make maximum length of 1 per input, so in between 0 and 1 DO I=1,NUM_INPUTS DMIN=10.0E10; DMAX=-10.0E10 DO IS=1,NUM_SAMPLES; DMIN=MIN(DMIN, INPUTS(IS,I)); DMAX=MAX(DMAX, INPUTS(IS,I)); ENDDO DO IS=1,NUM_VALIDATION; DMIN=MIN(DMIN,V_INPUTS(IS,I)); DMAX=MAX(DMAX,V_INPUTS(IS,I)); ENDDO SINPUT(I)=1.0D0; IF(DMAX-DMIN.NE.0.0)SINPUT(I)= 1.0D0/(DMAX-DMIN) FINPUT(I)=-1.0D0*DMIN*SINPUT(I) DO IS=1,NUM_SAMPLES; INPUTS(IS,I)= INPUTS(IS,I)*SINPUT(I)+FINPUT(I); ENDDO DO IS=1,NUM_VALIDATION; V_INPUTS(IS,I)=V_INPUTS(IS,I)*SINPUT(I)+FINPUT(I); ENDDO WRITE(IU,'(A10,2F10.3)') 'INPUT'//TRIM(VTOS(I)),SINPUT(I),FINPUT(I) ENDDO ! ENDIF !## make maximum length of 1 per output, so in between 0 and 1 DO I=1,NUM_OUTPUTS DMIN=10.0E10; DMAX=-10.0E10 DO IS=1,NUM_SAMPLES; DMIN=MIN(DMIN, OUTPUTS(IS,I)); DMAX=MAX(DMAX, OUTPUTS(IS,I)); ENDDO DO IS=1,NUM_VALIDATION; DMIN=MIN(DMIN,V_OUTPUTS(IS,I)); DMAX=MAX(DMAX,V_OUTPUTS(IS,I)); ENDDO SOUTPUT(I)=1.0D0; IF(DMAX-DMIN.NE.0.0)SOUTPUT(I)= 1.0D0/(DMAX-DMIN) FOUTPUT(I)=-1.0D0*DMIN*SOUTPUT(I) DO IS=1,NUM_SAMPLES; OUTPUTS(IS,I)= OUTPUTS(IS,I)*SOUTPUT(I)+FOUTPUT(I); ENDDO DO IS=1,NUM_VALIDATION; V_OUTPUTS(IS,I)=V_OUTPUTS(IS,I)*SOUTPUT(I)+FOUTPUT(I); ENDDO WRITE(IU,'(A10,2F10.3)') 'OUTPUT'//TRIM(VTOS(I)),SOUTPUT(I),FOUTPUT(I) ENDDO !## decorrelate input (PCA) CALL MOD_NN_WRITE_INPUTS() END SUBROUTINE MOD_NN_PREPDATA !##================================================================ SUBROUTINE MOD_NN_DEALLOCATE() !##================================================================ IMPLICIT NONE INTEGER :: I IF(ALLOCATED(FINPUT)) DEALLOCATE(FINPUT) IF(ALLOCATED(SINPUT)) DEALLOCATE(SINPUT) IF(ALLOCATED(FOUTPUT)) DEALLOCATE(FOUTPUT) IF(ALLOCATED(SOUTPUT)) DEALLOCATE(SOUTPUT) IF(ASSOCIATED(SEQ)) DEALLOCATE(SEQ) IF(ALLOCATED(INPUTS)) DEALLOCATE(INPUTS) IF(ALLOCATED(V_INPUTS)) DEALLOCATE(V_INPUTS) IF(ALLOCATED(OUTPUTS)) DEALLOCATE(OUTPUTS) IF(ALLOCATED(V_OUTPUTS)) DEALLOCATE(V_OUTPUTS) IF(ALLOCATED(ERROR)) DEALLOCATE(ERROR) IF(ALLOCATED(PERFORMANCE))DEALLOCATE(PERFORMANCE) IF(ALLOCATED(INPUTS_2D)) DEALLOCATE(INPUTS_2D) IF(ALLOCATED(L_INPUTS)) DEALLOCATE(L_INPUTS) IF(ALLOCATED(L_OUTPUTS)) DEALLOCATE(L_OUTPUTS) DO I=1,SIZE(LAYER) IF(ASSOCIATED(LAYER(I)%A))DEALLOCATE(LAYER(I)%A) IF(ASSOCIATED(LAYER(I)%B))DEALLOCATE(LAYER(I)%B) IF(ASSOCIATED(LAYER(I)%Z))DEALLOCATE(LAYER(I)%Z) IF(ASSOCIATED(LAYER(I)%W))DEALLOCATE(LAYER(I)%W) IF(ASSOCIATED(LAYER(I)%DW))DEALLOCATE(LAYER(I)%DW) IF(ASSOCIATED(LAYER(I)%TW))DEALLOCATE(LAYER(I)%TW) IF(ASSOCIATED(LAYER(I)%PW))DEALLOCATE(LAYER(I)%PW) IF(ASSOCIATED(LAYER(I)%SW))DEALLOCATE(LAYER(I)%SW) IF(ASSOCIATED(LAYER(I)%DB))DEALLOCATE(LAYER(I)%DB) IF(ASSOCIATED(LAYER(I)%TB))DEALLOCATE(LAYER(I)%TB) IF(ASSOCIATED(LAYER(I)%PB))DEALLOCATE(LAYER(I)%PB) IF(ASSOCIATED(LAYER(I)%SB))DEALLOCATE(LAYER(I)%SB) ENDDO DEALLOCATE(LAYER) END SUBROUTINE MOD_NN_DEALLOCATE !##================================================================ SUBROUTINE MOD_NN_INITIALISE() !##================================================================ IMPLICIT NONE INTEGER :: I,J,K,NN,NEURONS,LDIM1,LDIM2,IOS,IY,IX,N,SEED CHARACTER(LEN=256) :: LINE REAL(KIND=DP_KIND) :: VARIANCE,STDEV INTEGER,ALLOCATABLE,DIMENSION(:) :: SEED_VALUES IU=UTL_GETUNIT(); OPEN(IU,FILE=FNAMEIN,STATUS='OLD',ACTION='READ') !## read output name READ(IU,*) FNAMEOUT !## number of idebug READ(IU,*) IDEBUG !## learning rate READ(IU,*) LEARNING_RATE !## momentum READ(IU,*) MOMENTUM !## config convolution layers READ(IU,*) NUM_CNNLAYERS ! IF(NUM_CNNLAYERS.GT.0)THEN ! ALLOCATE(CNNLAYER(NUM_CNNLAYERS)) ! READ(IU,*) (CNNLAYER(I)%KERNEL_SIZE,I=1,NUM_CNNLAYERS) ! READ(IU,*) (CNNLAYER(I)%FILTERS, I=1,NUM_CNNLAYERS) ! READ(IU,*) (CNNLAYER(I)%ACTIVATION, I=1,NUM_CNNLAYERS) ! ENDIF !## number of hidden-layers READ(IU,*) NUM_LAYERS !## input and result is layer as well internally NUM_LAYERS=NUM_LAYERS+2 !## allocate layers, layer=0 is output layer ALLOCATE(LAYER(NUM_LAYERS)) !## number of neural-networks (#neurons) READ(IU,*) (LAYER(I)%NUM_NEURONS,I=2,NUM_LAYERS-1) !## number of activations READ(IU,*) (LAYER(I)%ACTIVATION,I=2,NUM_LAYERS) !## number of num_training READ(IU,*) NUM_TRAINING !## number of num_batches READ(IU,*) NUM_BATCHES READ(IU,'(A256)') LINE !## number of inputs to generate a single output READ(LINE,*,IOSTAT=IOS) NUM_INPUTS !## continue reading from external csv-file IF(IOS.NE.0)THEN CLOSE(IU); OPEN(IU,FILE=LINE,STATUS='OLD',ACTION='READ') ! IF(NUM_CNNLAYERS.EQ.0)THEN READ(IU,*) NUM_INPUTS,NUM_OUTPUTS READ(IU,*) NUM_SAMPLES,NUM_VALIDATION ALLOCATE(L_INPUTS(NUM_INPUTS),L_OUTPUTS(NUM_OUTPUTS)) ! ELSE ! READ(IU,*) NX_INPUTS,NY_INPUTS,NUM_CHANNELS,NUM_OUTPUTS ! READ(IU,*) NUM_SAMPLES,NUM_VALIDATION ! !## input of tightconnected neural network equal to last step in cnn-layers ! NUM_INPUTS=NX_INPUTS*NY_INPUTS*NUM_CHANNELS ! ALLOCATE(L_INPUTS(NUM_SAMPLES),L_OUTPUTS(NUM_OUTPUTS)) ! ALLOCATE(INPUTS_2D(NUM_SAMPLES,NUM_CHANNELS,NY_INPUTS,NX_INPUTS)) !## ROW.COL ! ENDIF ALLOCATE(INPUTS(NUM_SAMPLES,NUM_INPUTS),FINPUT(NUM_INPUTS),SINPUT(NUM_INPUTS)) !## ROW.COL ALLOCATE(OUTPUTS(NUM_SAMPLES,NUM_OUTPUTS),FOUTPUT(NUM_OUTPUTS),SOUTPUT(NUM_OUTPUTS)) ALLOCATE(V_INPUTS (NUM_VALIDATION,NUM_INPUTS)) ALLOCATE(V_OUTPUTS (NUM_VALIDATION,NUM_OUTPUTS)) ! IF(NUM_CNNLAYERS.EQ.0)THEN READ(IU,*) (L_INPUTS(I),I=1,NUM_INPUTS),(L_OUTPUTS(I),I=1,NUM_OUTPUTS) !## read inputs DO J=1,NUM_SAMPLES READ(IU,*) (INPUTS(J,I),I=1,NUM_INPUTS),(OUTPUTS(J,I),I=1,NUM_OUTPUTS) ENDDO DO J=1,NUM_VALIDATION READ(IU,*) (V_INPUTS(J,I),I=1,NUM_INPUTS),(V_OUTPUTS(J,I),I=1,NUM_OUTPUTS) ENDDO ! ELSE ! DO J=1,NUM_SAMPLES ! READ(IU,*) L_INPUTS(J) ! DO I=1,NUM_CHANNELS ! DO IY=1,NY_INPUTS ! READ(IU,*) (INPUTS_2D(J,I,IY,IX),IX=1,NX_INPUTS) ! ENDDO ! ENDDO ! READ(IU,*) (OUTPUTS(J,I),I=1,NUM_OUTPUTS) ! ENDDO ! ENDIF !## continue reading from this file ELSE !## number of samples (training data) READ(IU,*) NUM_SAMPLES ALLOCATE(INPUTS(NUM_SAMPLES,NUM_INPUTS),FINPUT(NUM_INPUTS),SINPUT(NUM_INPUTS)) !## ROW.COL !## read inputs DO J=1,NUM_SAMPLES; READ(IU,*) (INPUTS(J,I),I=1,NUM_INPUTS); ENDDO !## number of outputs generated by input READ(IU,*) NUM_OUTPUTS ALLOCATE(OUTPUTS(NUM_SAMPLES,NUM_OUTPUTS),FOUTPUT(NUM_OUTPUTS),SOUTPUT(NUM_OUTPUTS)) !## read outputs DO J=1,NUM_SAMPLES; READ(IU,*) (OUTPUTS(J,I),I=1,NUM_OUTPUTS); ENDDO !## number of num_training READ(IU,*) NUM_VALIDATION !## allocate memory ALLOCATE(V_INPUTS (NUM_VALIDATION,NUM_INPUTS)) ALLOCATE(V_OUTPUTS (NUM_VALIDATION,NUM_OUTPUTS)) !## read inputs validation DO J=1,NUM_VALIDATION; READ(IU,*) (V_INPUTS(J,I),I=1,NUM_INPUTS); ENDDO !## read outputs validation DO J=1,NUM_VALIDATION; READ(IU,*) (V_OUTPUTS(J,I),I=1,NUM_OUTPUTS); ENDDO ALLOCATE(L_INPUTS(NUM_INPUTS),L_OUTPUTS(NUM_OUTPUTS)) DO I=1,NUM_INPUTS; L_INPUTS(I)='IN'//TRIM(VTOS(I)); ENDDO DO I=1,NUM_OUTPUTS; L_OUTPUTS(I)='OUT'//TRIM(VTOS(I)); ENDDO ENDIF L_INPUTS=ADJUSTR(L_INPUTS); L_OUTPUTS=ADJUSTR(L_OUTPUTS) ALLOCATE(ERROR(NUM_OUTPUTS),PERFORMANCE(NUM_TRAINING)) ! !## allocate cnn-layers ! CALL MOD_NN_ALLOCATE_CNN() CALL RANDOM_SEED(SIZE=N) ALLOCATE(SEED_VALUES(N)); SEED=12345 SEED_VALUES=0; SEED_VALUES(1)=SEED CALL RANDOM_SEED(PUT=SEED_VALUES) LAYER(1 )%NUM_NEURONS=NUM_INPUTS LAYER(NUM_LAYERS)%NUM_NEURONS=NUM_OUTPUTS !## initialise input weights DO K=1,NUM_LAYERS IF(K.EQ.NUM_LAYERS)THEN !## input layer, no weights LAYER(K)%NUM_WEIGHTS=0 ELSE !## number of weight equal to number of previous neurons LAYER(K)%NUM_WEIGHTS=LAYER(K+1)%NUM_NEURONS ENDIF LDIM1=LAYER(K)%NUM_NEURONS !## allocate a/b ALLOCATE(LAYER(K)%A(LDIM1)); LAYER(K)%A=0.0 !## allocate b,z and weights w IF(K.GT.1.OR.NUM_CNNLAYERS.GT.0)THEN ALLOCATE(LAYER(K)%B(LDIM1),LAYER(K)%Z(LDIM1)); LAYER(K)%B=0.0; LAYER(K)%Z=0.0 ENDIF IF(K.LT.NUM_LAYERS)THEN LDIM2=LAYER(K)%NUM_WEIGHTS !## weights ALLOCATE(LAYER(K)%W(LDIM1,LDIM2)) !## ROW=NEURONS/COL=WEIGHTS !## initiate weight values CALL RANDOM_NUMBER(LAYER(K)%W) !This is a theoretical justification for Xavier initialization. Xavier initialization works with tanh activations. !Myriad other initialization methods exist. If you are using ReLU, for example, a common initialization is He initialization !(He et al., Delving Deep into Rectifiers), in which the weights are initialized by multiplying by 2 the variance of the Xavier initialization. !While the justification for this initialization is slightly more complicated, it follows the same thought process as the one for tanh. !## initialise IF(K.EQ.1)THEN VARIANCE=1.0D0/REAL(NUM_INPUTS) ELSE VARIANCE=1.0D0/LAYER(K)%NUM_NEURONS ENDIF STDEV=SQRT(VARIANCE) !werkt niet ! SELECT CASE (TRIM(LAYER(K)%ACTIVATION)) ! CASE ('RELU','LRELU'); STDEV=STDEV*2.0 ! END SELECT !## Xavier initialisation DO I=1,LDIM1; DO J=1,LDIM2 CALL IPEST_NORMAL_MS_SAMPLE(0.0D0,STDEV,SEED_VALUES(1),LAYER(K)%W(I,J)) ENDDO; ENDDO ! LAYER(K)%W=2.0*LAYER(K)%W-1.0 ENDIF !## backprop for b IF(K.GT.1.OR.NUM_CNNLAYERS.GT.0)ALLOCATE(LAYER(K)%DB(LDIM1),LAYER(K)%TB(LDIM1),LAYER(K)%PB(LDIM1),LAYER(K)%SB(LDIM1)) !## backprop for w IF(K.LT.NUM_LAYERS)ALLOCATE(LAYER(K)%DW(LDIM1,LDIM2),LAYER(K)%TW(LDIM1,LDIM2),LAYER(K)%PW(LDIM1,LDIM2),LAYER(K)%SW(LDIM1,LDIM2)) ENDDO ! IF(NUM_CNNLAYERS.EQ.0)THEN ! LAYER(1)%W=0.200 ! LAYER(2)%W=0.921 ! LAYER(3)%W=0.298 ! ELSE ! LAYER(1)%W=0.921 ! LAYER(2)%W=0.298 ! ENDIF CLOSE(IU); OPEN(IU,FILE=FNAMEOUT,STATUS='UNKNOWN',ACTION='WRITE') !## debug setting WRITE(IU,'(A20,I10)') 'IDEBUG:',IDEBUG !## learning rate WRITE(IU,'(A20,F10.2)') 'LEARNING_RATE:',LEARNING_RATE !## momentum WRITE(IU,'(A20,F10.2)') 'MOMENTUM:',MOMENTUM !## number of convolution layers WRITE(IU,'(A20,I10)') 'NUM_CNN_LAYERS:',NUM_CNNLAYERS ! IF(NUM_CNNLAYERS.GT.0)THEN ! WRITE(IU,'(A20,99I10)') 'KERNELSIZES(LAYER)',(CNNLAYER(I)%KERNEL_SIZE,I=1,NUM_CNNLAYERS) ! WRITE(IU,'(A20,99I10)') 'FILTERS(LAYER)', (CNNLAYER(I)%FILTERS,I=1,NUM_CNNLAYERS) ! WRITE(IU,'(A20,99A10)') 'ACTIVATION(LAYER)', (ADJUSTR(CNNLAYER(I)%ACTIVATION),I=1,NUM_CNNLAYERS) ! ENDIF !## number of hidden layers WRITE(IU,'(A20,I10)') 'NUM_HIDDEN_LAYERS:',NUM_LAYERS-2 !## number of neural-networks (#neurons) IF(NUM_LAYERS-2.EQ.0)THEN WRITE(IU,'(A20,A10)') 'NUM_NEURONS(LAYER):' ,'-' ELSE WRITE(IU,'(A20,99I10)') 'NUM_NEURONS(LAYER):' ,(LAYER(I)%NUM_NEURONS,I=2,NUM_LAYERS-1) ENDIF !## activation WRITE(IU,'(A20,99A10)') 'ACTIVATION:' ,(ADJUSTR(LAYER(I)%ACTIVATION),I=2,NUM_LAYERS) !## number of trainings WRITE(IU,'(A20,I10)') 'NUM_TRAINING:' ,NUM_TRAINING !## number of batches WRITE(IU,'(A20,I10)') 'NUM_BATCHES:' ,NUM_BATCHES !## number of inputs to generate a single output WRITE(IU,'(A20,I10)') 'NUM_INPUT:' ,NUM_INPUTS !## number of samples (training data) WRITE(IU,'(A20,I10)') 'NUM_SAMPLES:' ,NUM_SAMPLES !## number of outputs generated by input WRITE(IU,'(A20,I10)') 'NUM_OUTPUTS:' ,NUM_OUTPUTS !## number of validation WRITE(IU,'(A20,I10)') 'NUM_VALIDATION:',NUM_VALIDATION CALL MOD_NN_WRITE_INPUTS() CALL MOD_NN_WRITE_WEIGHTS_H_B('INITIAL') DEALLOCATE(SEED_VALUES) END SUBROUTINE MOD_NN_INITIALISE !!##================================================================ !SUBROUTINE MOD_NN_ALLOCATE_CNN() !!##================================================================ !IMPLICIT NONE !INTEGER :: NX,NY,NZ,I ! !IF(NUM_CNNLAYERS.LE.0)RETURN ! !DO I=1,NUM_CNNLAYERS ! IF(I.EQ.1)THEN ! NX=NX_INPUTS ! NY=NY_INPUTS ! ELSE ! NX=CNNLAYER(I-1)%WIDTH ! NY=CNNLAYER(I-1)%HEIGHT ! ENDIF ! NZ=NUM_CHANNELS ! !## one channel only ! CNNLAYER(I)%CHANNELS=NZ ! CNNLAYER(I)%WIDTH =NX-CNNLAYER(I)%KERNEL_SIZE+1 ! CNNLAYER(I)%HEIGHT =NY-CNNLAYER(I)%KERNEL_SIZE+1 ! ALLOCATE(CNNLAYER(I)%OUTPUT(CNNLAYER(I)%FILTERS,CNNLAYER(I)%WIDTH,CNNLAYER(I)%HEIGHT)); CNNLAYER(I)%OUTPUT=0.0 ! ALLOCATE(CNNLAYER(I)%KERNEL(CNNLAYER(I)%FILTERS,CNNLAYER(I)%CHANNELS,CNNLAYER(I)%KERNEL_SIZE,CNNLAYER(I)%KERNEL_SIZE)) ! CALL RANDOM_NUMBER(CNNLAYER(I)%KERNEL); CNNLAYER(I)%KERNEL=CNNLAYER(I)%KERNEL/CNNLAYER(I)%KERNEL_SIZE**2 ! ALLOCATE(CNNLAYER(I)%BIASES(CNNLAYER(I)%FILTERS)); CNNLAYER(I)%BIASES=0.0 ! ALLOCATE(CNNLAYER(I)%Z,MOLD=CNNLAYER(I)%OUTPUT); CNNLAYER(I)%Z=0.0 ! ALLOCATE(CNNLAYER(I)%GRADIENT(NZ,NX,NY)); CNNLAYER(I)%GRADIENT=0.0 ! ALLOCATE(CNNLAYER(I)%DW,MOLD=CNNLAYER(I)%KERNEL); CNNLAYER(I)%DW=0.0 ! ALLOCATE(CNNLAYER(I)%DB,MOLD=CNNLAYER(I)%BIASES); CNNLAYER(I)%DB=0.0 ! ALLOCATE(CNNLAYER(I)%TKERNEL,MOLD=CNNLAYER(I)%KERNEL); CNNLAYER(I)%TKERNEL=0.0 ! ALLOCATE(CNNLAYER(I)%TBIASES,MOLD=CNNLAYER(I)%BIASES); CNNLAYER(I)%TBIASES=0.0 !ENDDO ! !CNNLAYER(1)%KERNEL=0.2 ! !DEALLOCATE(INPUTS); NUM_INPUTS=NUM_CHANNELS*CNNLAYER(NUM_CNNLAYERS)%WIDTH*CNNLAYER(NUM_CNNLAYERS)%HEIGHT !ALLOCATE(INPUTS(NUM_SAMPLES,NUM_INPUTS)) ! !END SUBROUTINE MOD_NN_ALLOCATE_CNN !##================================================================ SUBROUTINE MOD_NN_WRITE_INPUTS() !##================================================================ IMPLICIT NONE INTEGER :: I,J,IY,IX WRITE(IU,'(/A)') 'TRAINING' ! IF(NUM_CNNLAYERS.EQ.0)THEN !## write inputs WRITE(IU,'(/A20,99A10)') 'INPUT',(L_INPUTS(I),I=1,NUM_INPUTS) DO J=1,NUM_SAMPLES; WRITE(IU,'(I20,99F10.3)') J,(INPUTS(J,I),I=1,NUM_INPUTS); ENDDO ! ELSE ! DO J=1,NUM_SAMPLES ! WRITE(IU,*) L_INPUTS(J) ! DO I=1,NUM_CHANNELS ! DO IY=1,NY_INPUTS ! WRITE(IU,'(99F10.2)') (INPUTS_2D(J,I,IY,IX),IX=1,NX_INPUTS) ! ENDDO ! ENDDO ! ENDDO ! ENDIF !## write outputs WRITE(IU,'(/A20,99A10)') 'OUTPUT',(L_OUTPUTS(I),I=1,NUM_OUTPUTS) DO J=1,NUM_SAMPLES; WRITE(IU,'(I20,99F10.3)') J,(OUTPUTS(J,I),I=1,NUM_OUTPUTS); ENDDO IF(NUM_VALIDATION.GT.0)THEN WRITE(IU,'(/A)') 'VALIDATION' !## write validation input ! IF(NUM_CNNLAYERS.EQ.0)THEN WRITE(IU,'(/A20,99A10)') 'INPUT',(L_INPUTS(I),I=1,NUM_INPUTS) DO J=1,NUM_VALIDATION; WRITE(IU,'(I20,99F10.3)') J,(V_INPUTS(J,I),I=1,NUM_INPUTS); ENDDO !## write outputs WRITE(IU,'(/A20,99A10)') 'OUTPUT',(L_OUTPUTS(I),I=1,NUM_OUTPUTS) DO J=1,NUM_VALIDATION; WRITE(IU,'(I20,99F10.3)') J,(V_OUTPUTS(J,I),I=1,NUM_OUTPUTS); ENDDO ! ELSE ! ENDIF ENDIF END SUBROUTINE MOD_NN_WRITE_INPUTS !##================================================================ SUBROUTINE MOD_NN_WRITE_WEIGHTS_H_B(TXT) !##================================================================ IMPLICIT NONE CHARACTER(LEN=*),INTENT(IN) :: TXT INTEGER :: I,II,J,JJ,K,N,LDIM1,LDIM2,LDIM3 !## write initial inputs weights IF(TRIM(TXT).NE.'')THEN WRITE(IU,'(/50A1)') ('=',I=1,50) WRITE(IU,'( A )') TRIM(TXT) WRITE(IU,'( 50A1)') ('=',I=1,50) ENDIF ! DO K=1,NUM_CNNLAYERS ! LDIM1=CNNLAYER(K)%CHANNELS ! LDIM2=CNNLAYER(K)%FILTERS ! LDIM3=CNNLAYER(K)%KERNEL_SIZE ! WRITE(IU,'(/A20,I10)') 'CONVOLUTION_LAYER:',K ! WRITE(IU,'(A20,I10)') 'NUMBER OF CHANNELS:',LDIM1 ! WRITE(IU,'(A20,I10)') 'NUMBER OF FILTERS:' ,LDIM2 ! WRITE(IU,'(A20,I10)') 'KERNEL SIZE:' ,LDIM3 ! DO II=1,LDIM1; DO JJ=1,LDIM2 ! WRITE(IU,'(/2(A,I5))') 'CHANNEL:',II,' FILTER:',JJ ! DO I=1,LDIM3 ! WRITE(IU,'(99F10.3)') (CNNLAYER(K)%KERNEL(II,JJ,I,J),J=1,LDIM3) ! ENDDO ! ENDDO; ENDDO ! ENDDO DO K=1,NUM_LAYERS LDIM1=LAYER(K)%NUM_NEURONS LDIM2=LAYER(K)%NUM_WEIGHTS IF(K.EQ.1)THEN WRITE(IU,'(/A20,A10)') 'INPUT_LAYER:','[]' ELSEIF(K.EQ.NUM_LAYERS)THEN WRITE(IU,'(/A20,A10)') 'OUTPUT_LAYER:','[]' ELSE WRITE(IU,'(/A20,I10)') 'HIDDEN_LAYER:',K-1 ENDIF WRITE(IU,'(A20,I10)') 'NUMBER OF NEURONS:',LDIM1 WRITE(IU,'(A20,I10)') 'NUMBER OF WEIGHTS:',LDIM2 WRITE(IU,'(A20,99A10)') 'VARIABLE:',('NEURON_'//TRIM(VTOS(I)),I=1,LDIM1) WRITE(IU,'(A20,99F10.3)') 'A' ,(LAYER(K)%A(I) ,I=1,LDIM1) IF(K.GT.1.OR.NUM_CNNLAYERS.GT.0)THEN WRITE(IU,'(A20,99F10.3)') 'B' ,(LAYER(K)%B(I) ,I=1,LDIM1) WRITE(IU,'(A20,99F10.3)') 'Z' ,(LAYER(K)%Z(I) ,I=1,LDIM1) ENDIF IF(K.LT.NUM_LAYERS)THEN !## number of weights DO J=1,LDIM2 WRITE(IU,'(A20,99F10.3)') 'W'//TRIM(VTOS(J)),(LAYER(K)%W(I,J),I=1,LDIM1) ENDDO ENDIF ENDDO END SUBROUTINE MOD_NN_WRITE_WEIGHTS_H_B ! !###====================================================================== ! CHARACTER(LEN=10) FUNCTION VTOS(I) ! !###====================================================================== ! IMPLICIT NONE ! INTEGER,INTENT(IN) :: I ! CHARACTER(LEN=10) :: TXT ! WRITE(TXT,'(I10)') I ! VTOS=ADJUSTL(TXT) ! END FUNCTION VTOS !###====================================================================== SUBROUTINE MOD_NN_RANDOM_SEQUENCE() !###====================================================================== IMPLICIT NONE INTEGER :: I,N,TEMP REAL(KIND=DP_KIND) :: X !## allocate memory for the sequence N=NUM_SAMPLES; ALLOCATE(SEQ(N)) !## generate consecutive integers DO I=1,N; SEQ(I)=I; END DO !## shuffle the sequence using Fisher-Yates algorithm DO I=N,2,-1 CALL RANDOM_NUMBER(X) TEMP=INT(X*I)+1 IF(I.NE.TEMP)THEN SEQ(I) =SEQ(I)+SEQ(TEMP) SEQ(TEMP)=SEQ(I)-SEQ(TEMP) SEQ(I) =SEQ(I)-SEQ(TEMP) END IF END DO END SUBROUTINE MOD_NN_RANDOM_SEQUENCE ! !###====================================================================== ! LOGICAL FUNCTION MOD_NN_GENERATE_EX1(OUTPUTCSV,LABELS) ! !###====================================================================== ! IMPLICIT NONE ! CHARACTER(LEN=*),INTENT(IN) :: OUTPUTCSV ! CHARACTER(LEN=*),INTENT(IN),DIMENSION(:) :: LABELS ! INTEGER :: N,I,J,M ! REAL(KIND=DP_KIND) :: X,Y ! ! MOD_NN_GENERATE_EX1=.FALSE. ! ! N=SIZE(LABELS); M=5 !21 ! ! OPEN(IU,FILE=OUTPUTCSV,STATUS='REPLACE',ACTION='WRITE') ! WRITE(IU,'(2I10,A)') N,1,' !## INPUT,OUTPUT' ! WRITE(IU,'(2I10,A)') M**2,0, ' !## SAMPLE,VALIDATION' ! WRITE(IU,'(99(A,1X))') (TRIM(LABELS(I))//',',I=1,N),'OBJF' ! ! Y=-10.0 ! DO I=1,M ! CALL RANDOM_NUMBER(Y); Y=(Y*20.0)-10.0 !! Y=Y+1.0; X=-10.0 ! DO J=1,M ! CALL RANDOM_NUMBER(X); X=(X*20.0)-10.0 !! X=X+1.0 ! WRITE(IU,'(99(F15.7,1X))') X,Y,X**2.0+Y**2.0 ! ENDDO ! ENDDO ! ! CLOSE(IU) ! ! OPEN(IU,FILE=OUTPUTCSV(:INDEX(OUTPUTCSV,'.',.TRUE.)-1)//'.INI',STATUS='REPLACE',ACTION='WRITE') ! ! WRITE(IU,'(A)') TRIM(OUTPUTCSV(INDEX(OUTPUTCSV,'\',.TRUE.)+1:INDEX(OUTPUTCSV,'.',.TRUE.)-1))//'.OUT' ! WRITE(IU,'(A)') '1 !## IDEBUG' ! WRITE(IU,'(A)') '0.90 !## LEARNING RATE' ! WRITE(IU,'(A)') '0.0 !## MOMENTUM' ! WRITE(IU,'(A)') '0 !## CONVOLUTION LAYERS' ! WRITE(IU,'(A)') '2 !## HIDDENLAYERS' ! WRITE(IU,'(A)') '32 32 !## NEURONS' ! WRITE(IU,'(A)') 'RELU RELU SIGMOID !## ACTIVATION FUNCTION' ! WRITE(IU,'(A)') '10000 !## NUMBER OF TRAINING' ! WRITE(IU,'(A)') '1 !## NUMBER OF BATCHES' ! WRITE(IU,'(A)') TRIM(OUTPUTCSV(INDEX(OUTPUTCSV,'\',.TRUE.)+1:)) ! CLOSE(IU) ! ! MOD_NN_GENERATE_EX1=.TRUE. ! ! END FUNCTION MOD_NN_GENERATE_EX1 END MODULE MOD_AI