diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index ec5ad05fa..83eb840d6 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -11,6 +11,7 @@ module ice_diagnostics use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block use ice_communicate, only: my_task, master_task use ice_constants, only: c0, c1 use ice_calendar, only: istep1 @@ -52,7 +53,7 @@ module ice_diagnostics real (kind=dbl_kind), parameter :: & umax_stab = 1.0_dbl_kind , & ! ice speed threshold for instability (m/s) aice_extmin = 0.15_dbl_kind ! min aice value for ice extent calc - + real (kind=dbl_kind), dimension(npnt), public :: & latpnt , & ! latitude of diagnostic points lonpnt ! longitude of diagnostic points @@ -112,7 +113,6 @@ module ice_diagnostics subroutine runtime_diags (dt) use ice_arrays_column, only: floe_rad_c - use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_constants, only: c1, c1000, c2, p001, p5, & field_loc_center, m2_to_km2 @@ -163,7 +163,7 @@ subroutine runtime_diags (dt) etotn, mtotn, micen, msnwn, pmaxn, ketotn, & etots, mtots, mices, msnws, pmaxs, ketots, & urmsn, albtotn, arean_alb, mpndn, ptotn, spondn, & - urmss, albtots, areas_alb, mpnds, ptots, sponds + urmss, albtots, areas_alb, mpnds, ptots, sponds ! hemispheric flux quantities real (kind=dbl_kind) :: & @@ -191,7 +191,7 @@ subroutine runtime_diags (dt) ! fields at diagnostic points real (kind=dbl_kind), dimension(npnt) :: & - paice, pTair, pQa, pfsnow, pfrain, pfsw, pflw, & + paice, pTair, pQa, pfsnow, pfrain, pfsw, pflw, & pTsfc, pevap, pfswabs, pflwout, pflat, pfsens, & pfsurf, pfcondtop, psst, psss, pTf, hiavg, hsavg, hbravg, & pfhocn, psalt, fsdavg, & @@ -202,7 +202,7 @@ subroutine runtime_diags (dt) work1, work2 real (kind=dbl_kind), parameter :: & - maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect + maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect ! undefined values returned from global_maxval. if global_maxval ! is applied to a region that does not exist (for instance ! southern hemisphere in box cases), global_maxval @@ -290,7 +290,7 @@ subroutine runtime_diags (dt) do n = 1, ncat work1(i,j,iblk) = work1(i,j,iblk) & + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & * trcrn(i,j,nt_hpnd,n,iblk) enddo enddo @@ -369,8 +369,8 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO - - arean_alb = global_sum(aice, distrb_info, field_loc_center, work2) + + arean_alb = global_sum(aice, distrb_info, field_loc_center, work2) albtotn = global_sum_prod(aice, work1, distrb_info, & field_loc_center, work2) @@ -395,7 +395,7 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO - areas_alb = global_sum(aice, distrb_info, field_loc_center, work2) + areas_alb = global_sum(aice, distrb_info, field_loc_center, work2) albtots = global_sum_prod(aice, work1, distrb_info, & field_loc_center, work2) @@ -506,7 +506,7 @@ subroutine runtime_diags (dt) if (pmaxs < maxval_spval) pmaxs = c0 pmaxn = pmaxn / c1000 ! convert to kN/m - pmaxs = pmaxs / c1000 + pmaxs = pmaxs / c1000 if (print_global) then @@ -617,14 +617,14 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO - else ! fsurf is computed by atmosphere model + else ! fsurf is computed by atmosphere model !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = & - (fsurf(i,j,iblk) - flat(i,j,iblk)) & + (fsurf(i,j,iblk) - flat(i,j,iblk)) & * aice(i,j,iblk) enddo enddo @@ -639,7 +639,7 @@ subroutine runtime_diags (dt) field_loc_center, tarean) fhatms = global_sum(work1, distrb_info, & field_loc_center, tareas) - + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -741,7 +741,7 @@ subroutine runtime_diags (dt) ! total ice, snow and pond mass mtotn = micen + msnwn + mpndn mtots = mices + msnws + mpnds - + ! mass change since beginning of time step delmin = mtotn - totmn delmis = mtots - totms @@ -760,14 +760,14 @@ subroutine runtime_diags (dt) fluxs = c0 if( arean > c0) then ! water associated with frazil ice included in fresh - fluxn = rnn + snn + evpn - sfreshn + fluxn = rnn + snn + evpn - sfreshn if (.not. update_ocn_f) then fluxn = fluxn + frzn endif endif if( areas > c0) then ! water associated with frazil ice included in fresh - fluxs = rns + sns + evps - sfreshs + fluxs = rns + sns + evps - sfreshs if (.not. update_ocn_f) then fluxs = fluxs + frzs endif @@ -933,7 +933,7 @@ subroutine runtime_diags (dt) pfsw(n) = fsw(i,j,iblk) ! shortwave radiation pflw(n) = flw(i,j,iblk) ! longwave radiation paice(n) = aice(i,j,iblk) ! ice area - + fsdavg(n) = c0 ! avg floe effective radius hiavg(n) = c0 ! avg snow/ice thickness hsavg(n) = c0 @@ -998,7 +998,7 @@ subroutine runtime_diags (dt) pcongel(n) = congel(i,j,iblk) ! congelation ice pdhi(n) = vice(i,j,iblk) - pdhi(n) ! ice thickness change pdhs(n) = vsno(i,j,iblk) - pdhs(n) ! snow thickness change - pde(n) =-(work1(i,j,iblk)- pde(n))/dt ! ice/snow energy change + pde(n) =-(work1(i,j,iblk)- pde(n))/dt ! ice/snow energy change psst(n) = sst(i,j,iblk) ! sea surface temperature psss(n) = sss(i,j,iblk) ! sea surface salinity pTf(n) = Tf(i,j,iblk) ! freezing temperature @@ -1045,7 +1045,7 @@ subroutine runtime_diags (dt) call broadcast_scalar(psss (n), pmloc(n)) call broadcast_scalar(pTf (n), pmloc(n)) call broadcast_scalar(pfhocn (n), pmloc(n)) - + enddo ! npnt endif ! print_points @@ -1093,7 +1093,7 @@ subroutine runtime_diags (dt) write(nu_diag,901) 'arwt snw mass (kg) = ',msnwn,msnws if (tr_pond_topo) & write(nu_diag,901) 'arwt pnd mass (kg) = ',mpndn,mpnds - + write(nu_diag,901) 'arwt tot mass (kg) = ',mtotn,mtots write(nu_diag,901) 'arwt tot mass chng(kg) = ',delmin,delmis write(nu_diag,901) 'arwt water flux = ',fluxn,fluxs @@ -1249,9 +1249,6 @@ subroutine runtime_diags (dt) endif ! print_points endif ! my_task = master_task - 799 format (27x,a24) - 800 format (a25,2x,f24.17) - 801 format (a25,2x,1pe24.17) 899 format (27x,a24,2x,a24) 900 format (a25,2x,f24.17,2x,f24.17) 901 format (a25,2x,1pe24.17,2x,1pe24.17) @@ -1268,7 +1265,6 @@ end subroutine runtime_diags subroutine init_mass_diags - use ice_blocks, only: nx_block, ny_block use ice_constants, only: field_loc_center use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: n_iso, n_aero, ncat, max_blocks @@ -1387,7 +1383,7 @@ subroutine init_mass_diags do n = 1, ncat work1(i,j,iblk) = work1(i,j,iblk) & + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & * trcrn(i,j,nt_hpnd,n,iblk) enddo enddo @@ -1412,7 +1408,6 @@ end subroutine init_mass_diags subroutine total_energy (work) - use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks use ice_grid, only: tmask @@ -1499,7 +1494,6 @@ end subroutine total_energy subroutine total_salt (work) - use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: ncat, nilyr, max_blocks use ice_grid, only: tmask @@ -1623,7 +1617,7 @@ subroutine init_diags plat(:) = -999._dbl_kind plon(:) = -999._dbl_kind - ! find minimum distance to diagnostic points on this processor + ! find minimum distance to diagnostic points on this processor do n = 1, npnt if (lonpnt(n) > c180) lonpnt(n) = lonpnt(n) - c360 @@ -1638,7 +1632,7 @@ subroutine init_diags ! This is computing closest point, Could add a CRITICAL but it's just initialization !!$XXXOMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,latdis,londis,totdis) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1665,7 +1659,7 @@ subroutine init_diags endif - ! find global minimum distance to diagnostic points + ! find global minimum distance to diagnostic points mindis_g = global_minval(mindis, distrb_info) ! save indices of minimum-distance grid cell @@ -1708,16 +1702,10 @@ end subroutine init_diags subroutine debug_ice(iblk, plabeld) - use ice_kinds_mod - use ice_calendar, only: istep1 - use ice_communicate, only: my_task - use ice_blocks, only: nx_block, ny_block - character (char_len), intent(in) :: plabeld integer (kind=int_kind), intent(in) :: iblk - ! local - integer (kind=int_kind) :: i, j, m + ! local character(len=*), parameter :: subname='(debug_ice)' if (istep1 >= debug_model_step) then @@ -1757,7 +1745,8 @@ subroutine print_state(plabel,i,j,iblk) use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nfsd - use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + use ice_grid, only: TLAT, TLON + use ice_state, only: aice, aice0, aicen, vicen, vsnon, uvel, vvel, & uvelE, vvelE, uvelN, vvelN, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & @@ -1765,7 +1754,7 @@ subroutine print_state(plabel,i,j,iblk) character (len=20), intent(in) :: plabel - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & i, j , & ! horizontal indices iblk ! block index @@ -1799,15 +1788,20 @@ subroutine print_state(plabel,i,j,iblk) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) - write(nu_diag,*) subname,plabel - write(nu_diag,*) 'istep1, my_task, i, j, iblk:', & + write(nu_diag,*) subname,' ',trim(plabel) + write(nu_diag,*) subname,' istep1, my_task, i, j, iblk:', & istep1, my_task, i, j, iblk - write(nu_diag,*) 'Global i and j:', & + write(nu_diag,*) subname,' Global block:', this_block%block_id + write(nu_diag,*) subname,' Global i and j:', & this_block%i_glob(i), & - this_block%j_glob(j) + this_block%j_glob(j) + write (nu_diag,*) subname,' Lat, Lon (degrees):', & + TLAT(i,j,iblk)*rad_to_deg, & + TLON(i,j,iblk)*rad_to_deg write(nu_diag,*) ' ' + write(nu_diag,*) 'aice ', aice(i,j,iblk) write(nu_diag,*) 'aice0', aice0(i,j,iblk) do n = 1, ncat write(nu_diag,*) ' ' @@ -1977,7 +1971,7 @@ subroutine print_points_state(plabel,ilabel) i = piloc(m) j = pjloc(m) iblk = pbloc(m) - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) if (present(ilabel)) then write(llabel,'(i6,a1,i3,a1)') ilabel,':',m,':' @@ -1995,7 +1989,7 @@ subroutine print_points_state(plabel,ilabel) istep1, my_task, i, j, iblk write(nu_diag,*) trim(llabel),'Global i and j=', & this_block%i_glob(i), & - this_block%j_glob(j) + this_block%j_glob(j) write(nu_diag,*) trim(llabel),'aice0=', aice0(i,j,iblk) do n = 1, ncat @@ -2089,20 +2083,18 @@ end subroutine print_points_state ! prints error information prior to aborting - subroutine diagnostic_abort(istop, jstop, iblk, istep1, stop_label) + subroutine diagnostic_abort(istop, jstop, iblk, stop_label) use ice_blocks, only: block, get_block - use ice_communicate, only: my_task use ice_domain, only: blocks_ice use ice_grid, only: TLAT, TLON use ice_state, only: aice integer (kind=int_kind), intent(in) :: & istop, jstop, & ! indices of grid cell where model aborts - iblk , & ! block index - istep1 ! time step number + iblk ! block index - character (char_len), intent(in) :: stop_label + character (len=*), intent(in) :: stop_label ! local variables @@ -2118,20 +2110,17 @@ subroutine diagnostic_abort(istop, jstop, iblk, istep1, stop_label) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - this_block = get_block(blocks_ice(iblk),iblk) - - write (nu_diag,*) 'istep1, my_task, iblk =', & - istep1, my_task, iblk - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write (nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - write (nu_diag,*) 'Lat, Lon:', & - TLAT(istop,jstop,iblk)*rad_to_deg, & - TLON(istop,jstop,iblk)*rad_to_deg - write (nu_diag,*) 'aice:', & - aice(istop,jstop,iblk) + this_block = get_block(blocks_ice(iblk),iblk) + + call flush_fileunit(nu_diag) + if (istop > 0 .and. jstop > 0) then + call print_state(trim(stop_label),istop,jstop,iblk) + else + write (nu_diag,*) subname,' istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) subname,' Global block:', this_block%block_id + endif + call flush_fileunit(nu_diag) call abort_ice (subname//'ERROR: '//trim(stop_label)) end subroutine diagnostic_abort diff --git a/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 b/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 index 74485a5e2..f4528dd5d 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 @@ -41,7 +41,7 @@ module ice_diagnostics_bgc ! Nicole Jeffery, LANL subroutine hbrine_diags - + use ice_arrays_column, only: darcy_V use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_diagnostics, only: npnt, print_points, pmloc, piloc, pjloc, pbloc @@ -84,27 +84,27 @@ subroutine hbrine_diags if (my_task == pmloc(n)) then i = piloc(n) j = pjloc(n) - iblk = pbloc(n) - phinS1(n) = c0 - phinS(n) = c0 - pfbri(n) = trcrn(i,j,nt_fbri,1,iblk) + iblk = pbloc(n) + phinS1(n) = c0 + phinS(n) = c0 + pfbri(n) = trcrn(i,j,nt_fbri,1,iblk) pdarcy_V(n) = darcy_V(i,j,1,iblk) if (aice(i,j,iblk) > c0) & phinS(n) = trcr(i,j,nt_fbri,iblk)*vice(i,j,iblk)/aice(i,j,iblk) if (aicen(i,j,1,iblk)> c0) & phinS1(n) = trcrn(i,j,nt_fbri,1,iblk)*vicen(i,j,1,iblk)/& - aicen(i,j,1,iblk) + aicen(i,j,1,iblk) do k = 1,nilyr pSin1(n,k) = trcrn(i,j,nt_sice+k-1,1,iblk) pSin(n,k) = trcr(i,j,nt_sice+k-1,iblk) enddo endif ! my_task = pmloc - - call broadcast_array (pSin (n,:), pmloc(n)) - call broadcast_array (pSin1 (n,:), pmloc(n)) - call broadcast_scalar(pfbri (n), pmloc(n)) - call broadcast_scalar(phinS1 (n), pmloc(n)) - call broadcast_scalar(phinS (n), pmloc(n)) + + call broadcast_array (pSin (n,:), pmloc(n)) + call broadcast_array (pSin1 (n,:), pmloc(n)) + call broadcast_scalar(pfbri (n), pmloc(n)) + call broadcast_scalar(phinS1 (n), pmloc(n)) + call broadcast_scalar(phinS (n), pmloc(n)) call broadcast_scalar(pdarcy_V(n), pmloc(n)) enddo ! npnt endif ! print_points @@ -125,22 +125,22 @@ subroutine hbrine_diags write(nu_diag,*) '------ hbrine ------' write(nu_diag,900) 'hbrine, (m) = ',phinS(1),phinS(2) write(nu_diag,900) 'fbri, cat1 (m) = ',pfbri(1),pfbri(2) - write(nu_diag,900) 'hbrine cat1, (m) = ',phinS1(1),phinS1(2) - write(nu_diag,900) 'darcy_V cat1, (m/s)= ',pdarcy_V(1),pdarcy_V(2) - if (ktherm == 2) then + write(nu_diag,900) 'hbrine cat1, (m) = ',phinS1(1),phinS1(2) + write(nu_diag,900) 'darcy_V cat1, (m/s)= ',pdarcy_V(1),pdarcy_V(2) + if (ktherm == 2) then write(nu_diag,*) ' ' write(nu_diag,*) '------ Thermosaline Salinity ------' write(nu_diag,803) 'Sice1(1) cat1 S (ppt)','Sice1(2) cat1 S' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nilyr) + write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nilyr) write(nu_diag,*) ' ' write(nu_diag,803) 'Sice(1) bulk S (ppt) ','Sice(2) bulk S' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nilyr) + write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nilyr) write(nu_diag,*) ' ' endif endif ! print_points - endif ! my_task = master_task + endif ! my_task = master_task 802 format (f24.17,2x,f24.17) 803 format (a25,2x,a25) @@ -198,7 +198,7 @@ subroutine bgc_diags integer (kind=int_kind), dimension(icepack_max_doc) :: & nt_bgc_doc, nlt_bgc_DOC integer (kind=int_kind), dimension(icepack_max_don) :: & - nt_bgc_don, nlt_bgc_DON + nt_bgc_don, nlt_bgc_DON integer (kind=int_kind), dimension(icepack_max_aero) :: & nt_zaero, nlt_zaero, nlt_zaero_sw integer (kind=int_kind), dimension(icepack_max_fe) :: & @@ -211,7 +211,7 @@ subroutine bgc_diags real (kind=dbl_kind), dimension(npnt,icepack_max_don) :: & pDON_ac, pDON_sk real (kind=dbl_kind), dimension(npnt,icepack_max_fe ) :: & - pFed_ac, pFed_sk, pFep_ac, pFep_sk + pFed_ac, pFed_sk, pFep_ac, pFep_sk real (kind=dbl_kind), dimension(npnt,icepack_max_aero) :: & pflux_zaero, pflux_snow_zaero, pflux_atm_zaero, & pflux_atm_zaero_s @@ -226,7 +226,7 @@ subroutine bgc_diags real (kind=dbl_kind), dimension(npnt,2,icepack_max_don) :: & pDONs real (kind=dbl_kind), dimension(npnt,2,icepack_max_fe ) :: & - pFeds, pFeps + pFeds, pFeps real (kind=dbl_kind), dimension(npnt,2,icepack_max_aero) :: & pzaeros real (kind=dbl_kind), dimension(npnt,nblyr+1) :: & @@ -240,10 +240,10 @@ subroutine bgc_diags real (kind=dbl_kind), dimension(npnt,nblyr+1,icepack_max_don) :: & pDON real (kind=dbl_kind), dimension(npnt,nblyr+1,icepack_max_fe ) :: & - pFed, pFep - real (kind=dbl_kind), dimension (nblyr+1) :: & + pFed, pFep + real (kind=dbl_kind), dimension (nblyr+1) :: & zspace - real (kind=dbl_kind), dimension (npnt,nslyr+nilyr+2) :: & + real (kind=dbl_kind), dimension (npnt,nslyr+nilyr+2) :: & pchlsw real (kind=dbl_kind), dimension(npnt,nslyr+nilyr+2,icepack_max_aero) :: & pzaerosw @@ -275,7 +275,7 @@ subroutine bgc_diags zspace(:) = c1/real(nblyr,kind=dbl_kind) zspace(1) = zspace(1)*p5 - zspace(nblyr+1) = zspace(nblyr+1)*p5 + zspace(nblyr+1) = zspace(nblyr+1)*p5 klev = 1+nilyr+nslyr !----------------------------------------------------------------- @@ -307,26 +307,26 @@ subroutine bgc_diags pNit_ac(n) = c0 if (tr_bgc_N) then do k = 1,n_algae - pN_ac(n,k) = ocean_bio(i,j,nlt_bgc_N(k),iblk) + pN_ac(n,k) = ocean_bio(i,j,nlt_bgc_N(k),iblk) enddo !n_algae endif !tr_bgc_N if (tr_bgc_C) then do k = 1,n_doc - pDOC_ac(n,k) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) + pDOC_ac(n,k) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) enddo !n_algae endif !tr_bgc_N if (tr_bgc_DON) then do k = 1,n_don - pDON_ac(n,k) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) - enddo + pDON_ac(n,k) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) + enddo endif if (tr_bgc_Fe ) then - do k = 1,n_fed - pFed_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) - enddo - do k = 1,n_fep - pFep_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) - enddo + do k = 1,n_fed + pFed_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) + enddo + do k = 1,n_fep + pFep_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) + enddo endif if (tr_bgc_Nit) & pNit_ac(n) = ocean_bio(i,j,nlt_bgc_Nit,iblk) ! nit(i,j,iblk) @@ -359,10 +359,10 @@ subroutine bgc_diags pDON_sk(n,:) = c0 pFed_sk(n,:) = c0 pFep_sk(n,:) = c0 - - do k = 1,n_algae + + do k = 1,n_algae pN_sk(n,k) = trcr (i,j,nt_bgc_N(k), iblk) - pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k), iblk)*mps_to_cmpdy/c100 + pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k), iblk)*mps_to_cmpdy/c100 enddo if (tr_bgc_C) then do k = 1,n_doc @@ -375,27 +375,27 @@ subroutine bgc_diags enddo endif if (tr_bgc_Fe ) then - do k = 1,n_fed + do k = 1,n_fed pFed_sk (n,k)= trcr (i,j,nt_bgc_Fed(k), iblk) enddo - do k = 1,n_fep + do k = 1,n_fep pFep_sk (n,k)= trcr (i,j,nt_bgc_Fep(k), iblk) enddo endif if (tr_bgc_Nit) then - pNit_sk(n) = trcr (i,j, nt_bgc_Nit, iblk) - pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit, iblk)*mps_to_cmpdy/c100 + pNit_sk(n) = trcr (i,j, nt_bgc_Nit, iblk) + pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit, iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_Am) then pAm_sk(n) = trcr (i,j, nt_bgc_Am, iblk) - pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am, iblk)*mps_to_cmpdy/c100 + pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am, iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_Sil) then - pSil_sk(n) = trcr (i,j, nt_bgc_Sil, iblk) + pSil_sk(n) = trcr (i,j, nt_bgc_Sil, iblk) endif if (tr_bgc_hum) then - phum_sk(n) = trcr (i,j, nt_bgc_hum, iblk) - pflux_hum(n)= flux_bio(i,j,nlt_bgc_hum, iblk)*mps_to_cmpdy/c100 + phum_sk(n) = trcr (i,j, nt_bgc_hum, iblk) + pflux_hum(n)= flux_bio(i,j,nlt_bgc_hum, iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_DMS) then pDMSPp_sk(n) = trcr (i,j,nt_bgc_DMSPp,iblk) @@ -419,26 +419,26 @@ subroutine bgc_diags pflux_atm_zaero(n,:) = c0 pflux_snow_zaero(n,:) = c0 if (tr_bgc_Nit) then - pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 - pflux_atm_NO(n) = fbio_atmice(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 + pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 + pflux_atm_NO(n) = fbio_atmice(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 pflux_snow_NO(n) = fbio_snoice(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_Am) then - pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 - pflux_atm_Am(n) = fbio_atmice(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 + pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 + pflux_atm_Am(n) = fbio_atmice(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 pflux_snow_Am(n) = fbio_snoice(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 - endif + endif if (tr_bgc_hum) then - pflux_hum(n) = flux_bio(i,j,nlt_bgc_hum,iblk)*mps_to_cmpdy/c100 + pflux_hum(n) = flux_bio(i,j,nlt_bgc_hum,iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_N) then do k = 1,n_algae - pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k),iblk)*mps_to_cmpdy/c100 + pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k),iblk)*mps_to_cmpdy/c100 enddo endif if (tr_zaero) then do k = 1,n_zaero - pflux_zaero(n,k) = flux_bio(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 + pflux_zaero(n,k) = flux_bio(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 pflux_atm_zaero_s(n,k)= flux_bio_atm(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 !*aice pflux_atm_zaero(n,k) = fbio_atmice(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 pflux_snow_zaero(n,k) = fbio_snoice(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 @@ -465,35 +465,35 @@ subroutine bgc_diags pPON(n,k) = c0 phum(n,k) = c0 pNO(n,k) = c0 - if (tr_bgc_Nit) pNO(n,k) = trcr(i,j,nt_bgc_Nit+k-1,iblk) - if (tr_bgc_Am) pAm(n,k) = trcr(i,j,nt_bgc_Am+k-1,iblk) + if (tr_bgc_Nit) pNO(n,k) = trcr(i,j,nt_bgc_Nit+k-1,iblk) + if (tr_bgc_Am) pAm(n,k) = trcr(i,j,nt_bgc_Am+k-1,iblk) if (tr_bgc_N) then do nn = 1, n_algae pN(n,k,nn) = trcr(i,j,nt_bgc_N(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_bgc_C) then do nn = 1, n_doc pDOC(n,k,nn) = trcr(i,j,nt_bgc_DOC(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_bgc_DON) then do nn = 1, n_don pDON(n,k,nn) = trcr(i,j,nt_bgc_DON(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_bgc_Fe) then do nn = 1, n_fed pFed(n,k,nn) = trcr(i,j,nt_bgc_Fed(nn)+k-1,iblk) - enddo + enddo do nn = 1, n_fep pFep(n,k,nn) = trcr(i,j,nt_bgc_Fep(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_zaero) then do nn = 1, n_zaero pzaero(n,k,nn) = trcr(i,j,nt_zaero(nn)+k-1,iblk) - enddo + enddo endif if (tr_bgc_PON) pPON(n,k) = trcr(i,j,nt_bgc_PON+k-1,iblk) if (tr_bgc_hum) phum(n,k) = trcr(i,j,nt_bgc_hum+k-1,iblk) @@ -515,7 +515,7 @@ subroutine bgc_diags pPONs(n,k) = c0 phums(n,k) = c0 pNOs(n,k) = c0 - if (tr_bgc_Nit) pNOs(n,k) = trcr(i,j,nt_bgc_Nit+nblyr+k,iblk) + if (tr_bgc_Nit) pNOs(n,k) = trcr(i,j,nt_bgc_Nit+nblyr+k,iblk) if (tr_bgc_Am) pAms(n,k) = trcr(i,j,nt_bgc_Am+nblyr+k,iblk) if (tr_bgc_N) then do nn = 1, n_algae @@ -533,10 +533,10 @@ subroutine bgc_diags enddo endif if (tr_bgc_Fe ) then - do nn = 1, n_fed + do nn = 1, n_fed pFeds(n,k,nn) = trcr(i,j,nt_bgc_Fed(nn)+nblyr+k,iblk) enddo - do nn = 1, n_fep + do nn = 1, n_fep pFeps(n,k,nn) = trcr(i,j,nt_bgc_Fep(nn)+nblyr+k,iblk) enddo endif @@ -547,7 +547,7 @@ subroutine bgc_diags endif if (tr_bgc_PON)pPONs(n,k) =trcr(i,j,nt_bgc_PON+nblyr+k,iblk) if (tr_bgc_hum)phums(n,k) =trcr(i,j,nt_bgc_hum+nblyr+k,iblk) - enddo !k + enddo !k endif pchlsw(n,:) = c0 pzaerosw(n,:,:) = c0 @@ -560,17 +560,17 @@ subroutine bgc_diags enddo endif enddo - endif ! dEdd_algae + endif ! dEdd_algae endif ! my_task = pmloc - - call broadcast_scalar (pNit_ac (n), pmloc(n)) - call broadcast_scalar (pAm_ac (n), pmloc(n)) - call broadcast_scalar (pSil_ac (n), pmloc(n)) - call broadcast_scalar (phum_ac (n), pmloc(n)) - call broadcast_scalar (pDMSP_ac (n), pmloc(n)) - call broadcast_scalar (pDMS_ac (n), pmloc(n)) - call broadcast_scalar (pflux_NO (n), pmloc(n)) - call broadcast_scalar (pflux_Am (n), pmloc(n)) + + call broadcast_scalar (pNit_ac (n), pmloc(n)) + call broadcast_scalar (pAm_ac (n), pmloc(n)) + call broadcast_scalar (pSil_ac (n), pmloc(n)) + call broadcast_scalar (phum_ac (n), pmloc(n)) + call broadcast_scalar (pDMSP_ac (n), pmloc(n)) + call broadcast_scalar (pDMS_ac (n), pmloc(n)) + call broadcast_scalar (pflux_NO (n), pmloc(n)) + call broadcast_scalar (pflux_Am (n), pmloc(n)) call broadcast_scalar (pflux_hum (n), pmloc(n)) call broadcast_array (pN_ac (n,:), pmloc(n)) call broadcast_array (pflux_N (n,:), pmloc(n)) @@ -578,8 +578,8 @@ subroutine bgc_diags call broadcast_array (pDON_ac (n,:), pmloc(n)) call broadcast_array (pFed_ac (n,:), pmloc(n)) call broadcast_array (pFep_ac (n,:), pmloc(n)) - call broadcast_array (pchlsw (n,:), pmloc(n)) - call broadcast_array (pzaerosw (n,:,:), pmloc(n)) + call broadcast_array (pchlsw (n,:), pmloc(n)) + call broadcast_array (pzaerosw (n,:,:), pmloc(n)) if (skl_bgc) then ! skl_bgc call broadcast_array (pN_sk (n,:), pmloc(n)) call broadcast_array (pDOC_sk (n,:), pmloc(n)) @@ -587,24 +587,24 @@ subroutine bgc_diags call broadcast_array (pFed_sk (n,:), pmloc(n)) call broadcast_array (pFep_sk (n,:), pmloc(n)) - call broadcast_scalar(pNit_sk (n), pmloc(n)) - call broadcast_scalar(pAm_sk (n), pmloc(n)) - call broadcast_scalar(pSil_sk (n), pmloc(n)) - call broadcast_scalar(phum_sk (n), pmloc(n)) - call broadcast_scalar(pDMSPp_sk (n), pmloc(n)) - call broadcast_scalar(pDMSPd_sk (n), pmloc(n)) - call broadcast_scalar(pDMS_sk (n), pmloc(n)) + call broadcast_scalar(pNit_sk (n), pmloc(n)) + call broadcast_scalar(pAm_sk (n), pmloc(n)) + call broadcast_scalar(pSil_sk (n), pmloc(n)) + call broadcast_scalar(phum_sk (n), pmloc(n)) + call broadcast_scalar(pDMSPp_sk (n), pmloc(n)) + call broadcast_scalar(pDMSPd_sk (n), pmloc(n)) + call broadcast_scalar(pDMS_sk (n), pmloc(n)) endif !tr_bgc_sk if (z_tracers) then ! z_bgc - call broadcast_array (pN_tot (n,:), pmloc(n)) - call broadcast_array (pflux_zaero (n,:), pmloc(n)) - call broadcast_array (pflux_atm_zaero_s(n,:), pmloc(n)) - call broadcast_array (pflux_atm_zaero (n,:), pmloc(n)) + call broadcast_array (pN_tot (n,:), pmloc(n)) + call broadcast_array (pflux_zaero (n,:), pmloc(n)) + call broadcast_array (pflux_atm_zaero_s(n,:), pmloc(n)) + call broadcast_array (pflux_atm_zaero (n,:), pmloc(n)) call broadcast_array (pflux_snow_zaero (n,:), pmloc(n)) - call broadcast_scalar(pflux_atm_NO (n), pmloc(n)) - call broadcast_scalar(pflux_atm_Am (n), pmloc(n)) - call broadcast_scalar(pflux_snow_NO (n), pmloc(n)) + call broadcast_scalar(pflux_atm_NO (n), pmloc(n)) + call broadcast_scalar(pflux_atm_Am (n), pmloc(n)) + call broadcast_scalar(pflux_snow_NO (n), pmloc(n)) call broadcast_scalar(pflux_snow_Am (n), pmloc(n)) call broadcast_scalar(pgrow_net (n), pmloc(n)) call broadcast_array (pzfswin (n,:), pmloc(n)) @@ -623,12 +623,12 @@ subroutine bgc_diags call broadcast_array (pAms (n,:), pmloc(n)) call broadcast_array (pPONs (n,:), pmloc(n)) call broadcast_array (phums (n,:), pmloc(n)) - call broadcast_array (pNs (n,:,:), pmloc(n)) - call broadcast_array (pDOCs (n,:,:), pmloc(n)) - call broadcast_array (pDONs (n,:,:), pmloc(n)) - call broadcast_array (pFeds (n,:,:), pmloc(n)) - call broadcast_array (pFeps (n,:,:), pmloc(n)) - call broadcast_array (pzaeros (n,:,:), pmloc(n)) + call broadcast_array (pNs (n,:,:), pmloc(n)) + call broadcast_array (pDOCs (n,:,:), pmloc(n)) + call broadcast_array (pDONs (n,:,:), pmloc(n)) + call broadcast_array (pFeds (n,:,:), pmloc(n)) + call broadcast_array (pFeps (n,:,:), pmloc(n)) + call broadcast_array (pzaeros (n,:,:), pmloc(n)) endif ! z_tracers enddo ! npnt endif ! print_points @@ -649,14 +649,14 @@ subroutine bgc_diags if (z_tracers) then write(nu_diag,803) 'zfswin(1) PAR ','zfswin(2) PAR ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pzfswin(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pzfswin(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' write(nu_diag,803) 'Losses: Zoo(1)(mmol/m^3) ','Zoo(2)' write(nu_diag,803) ' Brine Conc. ',' Brine Conc' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pZoo(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' - endif + write(nu_diag,802) ((pZoo(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' + endif if (tr_bgc_Nit) then write(nu_diag,*) '---------------------------------------------------' write(nu_diag,*) ' nitrate conc. (mmol/m^3) or flux (mmol/m^2/d)' @@ -669,17 +669,17 @@ subroutine bgc_diags write(nu_diag,900) 'snow-ice flux = ',pflux_snow_NO(1),pflux_snow_NO(2) write(nu_diag,*) ' snow + ice conc' write(nu_diag,803) ' nitrate(1)',' nitrate(2)' - write(nu_diag,802) ((pNOs(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((pNO(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pNOs(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((pNO(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif endif if (tr_bgc_PON .and. z_tracers) then write(nu_diag,*) '---------------------------------------------------' write(nu_diag,*) ' PON snow + ice conc. (mmol/m^3)' write(nu_diag,803) ' PON(1)',' PON(2)' - write(nu_diag,802) ((pPONs(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((pPON(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,802) ((pPONs(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((pPON(n,k),n=1,2), k = 1,nblyr+1) write(nu_diag,*) ' ' endif if (tr_bgc_hum) then @@ -691,8 +691,8 @@ subroutine bgc_diags write(nu_diag,900) 'Bulk ice conc. = ',phum_sk(1),phum_sk(2) elseif (z_tracers) then write(nu_diag,803) ' hum(1)',' hum(2)' - write(nu_diag,802) ((phums(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((phum(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,802) ((phums(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((phum(n,k),n=1,2), k = 1,nblyr+1) write(nu_diag,*) ' ' endif endif @@ -708,9 +708,9 @@ subroutine bgc_diags write(nu_diag,900) 'snow-ice flux = ',pflux_snow_Am(1),pflux_snow_Am(2) write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' ammonium(1)',' ammonium (2)' - write(nu_diag,802) ((pAms(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((pAm(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pAms(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((pAm(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif endif if (tr_bgc_N) then @@ -727,9 +727,9 @@ subroutine bgc_diags write(nu_diag,900) 'Tot ice (mmolN/m^2) = ',pN_tot(1,kk),pN_tot(2,kk) write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' algal N(1)',' algal N(2) ' - write(nu_diag,802) ((pNs(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pN(n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pNs(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pN(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -744,9 +744,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' DOC(1)',' DOC(2) ' - write(nu_diag,802) ((pDOCs(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pDOC(n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pDOCs(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pDOC(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -761,9 +761,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' DON(1)',' DON(2) ' - write(nu_diag,802) ((pDONs(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pDON(n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pDONs(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pDON(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -778,9 +778,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' Fed (1)',' Fed (2) ' - write(nu_diag,802) ((pFeds (n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pFed (n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pFeds (n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pFed (n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo do kk = 1,n_fep @@ -793,9 +793,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' Fep (1)',' Fep (2) ' - write(nu_diag,802) ((pFeps (n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pFep (n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pFeps (n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pFep (n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -807,7 +807,7 @@ subroutine bgc_diags if (skl_bgc) then write(nu_diag,900) 'Ice DMSPp = ',pDMSPp_sk(1),pDMSPp_sk(2) write(nu_diag,900) 'Ice DMSPd = ',pDMSPd_sk(1),pDMSPd_sk(2) - write(nu_diag,900) 'Ice DMS = ',pDMS_sk(1),pDMS_sk(2) + write(nu_diag,900) 'Ice DMS = ',pDMS_sk(1),pDMS_sk(2) endif endif if (tr_zaero .and. z_tracers) then @@ -821,8 +821,8 @@ subroutine bgc_diags write(nu_diag,900) 'snow-ice flux*aice = ',pflux_snow_zaero(1,kk),pflux_snow_zaero(2,kk) write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' aerosol(1)',' aerosol(2) ' - write(nu_diag,802) ((pzaeros(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pzaero(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,802) ((pzaeros(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pzaero(n,k,kk),n=1,2), k = 1,nblyr+1) write(nu_diag,*) ' ' enddo endif @@ -830,23 +830,22 @@ subroutine bgc_diags if (tr_zaero) then do kk = 1,n_zaero write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,*) ' Cat 1 aerosol conc. (kg/m^3) on delta-Eddington grid ' - write(nu_diag,802) ((pzaerosw(n,k,kk),n=1,2), k = 1,klev +1) + write(nu_diag,*) ' Cat 1 aerosol conc. (kg/m^3) on delta-Eddington grid ' + write(nu_diag,802) ((pzaerosw(n,k,kk),n=1,2), k = 1,klev +1) enddo endif if (tr_bgc_N) then write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,*) ' Cat 1 chl (mg/m^3) on delta-Eddington grid ' - write(nu_diag,802) ((pchlsw(n,k),n=1,2), k = 1,klev +1) + write(nu_diag,*) ' Cat 1 chl (mg/m^3) on delta-Eddington grid ' + write(nu_diag,802) ((pchlsw(n,k),n=1,2), k = 1,klev +1) endif endif endif ! print_points - endif ! my_task = master_task + endif ! my_task = master_task 802 format (f24.17,2x,f24.17) 803 format (a25,2x,a25) 900 format (a25,2x,f24.17,2x,f24.17) - 902 format (a25,10x,f6.1,1x,f6.1,9x,f6.1,1x,f6.1) 1020 format (a30,2x,i6) ! integer end subroutine bgc_diags @@ -878,8 +877,8 @@ subroutine zsal_diags ! fields at diagnostic points real (kind=dbl_kind), dimension(npnt) :: & phinS, phinS1,& - phbrn,pdh_top1,pdh_bot1, psice_rho, pfzsal, & - pfzsal_g, pdarcy_V1 + phbrn,pdh_top1,pdh_bot1, psice_rho, pfzsal, & + pfzsal_g, pdarcy_V1 ! vertical fields of category 1 at diagnostic points for bgc layer model real (kind=dbl_kind), dimension(npnt,nblyr+2) :: & @@ -923,10 +922,10 @@ subroutine zsal_diags j = pjloc(n) iblk = pbloc(n) - pfzsal(n) = fzsal(i,j,iblk) - pfzsal_g(n) = fzsal_g(i,j,iblk) - phinS(n) = c0 - phinS1(n) = c0 + pfzsal(n) = fzsal(i,j,iblk) + pfzsal_g(n) = fzsal_g(i,j,iblk) + phinS(n) = c0 + phinS1(n) = c0 phbrn(n) = c0 psice_rho(n) = c0 pdh_top1(n) = c0 @@ -948,7 +947,7 @@ subroutine zsal_diags pdh_top1(n) = dhbr_top(i,j,1,iblk) pdh_bot1(n) = dhbr_bot(i,j,1,iblk) pdarcy_V1(n) = darcy_V(i,j,1,iblk) - endif + endif do k = 1, nblyr+1 pbTiz(n,k) = c0 piDin(n,k) = c0 @@ -958,7 +957,7 @@ subroutine zsal_diags enddo if (vice(i,j,iblk) > c0) then pbTiz(n,k) = pbTiz(n,k)/vice(i,j,iblk) - piDin(n,k) = piDin(n,k)/vice(i,j,iblk) + piDin(n,k) = piDin(n,k)/vice(i,j,iblk) endif enddo ! k do k = 1, nblyr+2 @@ -974,24 +973,24 @@ subroutine zsal_diags enddo do k = 1,nblyr pSin(n,k) = c0 - pSin1(n,k) = c0 - pSin(n,k)= trcr(i,j,nt_bgc_S+k-1,iblk) + pSin1(n,k) = c0 + pSin(n,k)= trcr(i,j,nt_bgc_S+k-1,iblk) if (aicen(i,j,1,iblk) > c0) pSin1(n,k) = trcrn(i,j,nt_bgc_S+k-1,1,iblk) - enddo + enddo do k = 1,nilyr pSice(n,k) = trcr(i,j,nt_sice+k-1,iblk) enddo endif ! my_task = pmloc - call broadcast_scalar(phinS (n), pmloc(n)) - call broadcast_scalar(phinS1 (n), pmloc(n)) - call broadcast_scalar(phbrn (n), pmloc(n)) - call broadcast_scalar(pdh_top1 (n), pmloc(n)) - call broadcast_scalar(pdh_bot1 (n), pmloc(n)) - call broadcast_scalar(psice_rho(n), pmloc(n)) - call broadcast_scalar(pfzsal_g (n), pmloc(n)) - call broadcast_scalar(pdarcy_V1(n), pmloc(n)) - call broadcast_scalar(pfzsal (n), pmloc(n)) + call broadcast_scalar(phinS (n), pmloc(n)) + call broadcast_scalar(phinS1 (n), pmloc(n)) + call broadcast_scalar(phbrn (n), pmloc(n)) + call broadcast_scalar(pdh_top1 (n), pmloc(n)) + call broadcast_scalar(pdh_bot1 (n), pmloc(n)) + call broadcast_scalar(psice_rho(n), pmloc(n)) + call broadcast_scalar(pfzsal_g (n), pmloc(n)) + call broadcast_scalar(pdarcy_V1(n), pmloc(n)) + call broadcast_scalar(pfzsal (n), pmloc(n)) call broadcast_array (pbTiz (n,:), pmloc(n)) call broadcast_array (piDin (n,:), pmloc(n)) call broadcast_array (pphin (n,:), pmloc(n)) @@ -1050,15 +1049,15 @@ subroutine zsal_diags write(nu_diag,*) ' ' write(nu_diag,803) 'zsal(1) cat 1 ','zsal(2) cat 1 ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nblyr) + write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nblyr) write(nu_diag,*) ' ' write(nu_diag,803) 'zsal(1) Avg S ','zsal(2) Avg S ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nblyr) + write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nblyr) write(nu_diag,*) ' ' write(nu_diag,803) 'Sice(1) Ice S ','Sice(2) Ice S ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSice(n,k),n=1,2), k = 1,nilyr) + write(nu_diag,802) ((pSice(n,k),n=1,2), k = 1,nilyr) write(nu_diag,*) ' ' endif ! print_points @@ -1068,7 +1067,6 @@ subroutine zsal_diags 803 format (a25,2x,a25) 900 format (a25,2x,f24.17,2x,f24.17) 902 format (a25,10x,f6.1,1x,f6.1,9x,f6.1,1x,f6.1) - 903 format (a25,5x,i4,1x,i4,1x,i4,1x,i4,7x,i4,1x,i4,1x,i4,1x,i4) end subroutine zsal_diags diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index f3ca9b33e..caaa56295 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -1,24 +1,24 @@ !======================================================================= ! Driver for core history output ! -! The following variables are currently hard-wired as snapshots +! The following variables are currently hard-wired as snapshots ! (instantaneous rather than time-averages): ! divu, shear, sig1, sig2, sigP, trsig, mlt_onset, frz_onset, hisnap, aisnap ! ! Options for histfreq: '1','h','d','m','y','x', where x means that -! output stream will not be used (recommended for efficiency). -! histfreq_n can be any nonnegative integer, where 0 means that the +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the ! corresponding histfreq frequency will not be used. ! The flags (f_) can be set to '1','h','d','m','y' or 'x', where ! n means the field will not be written. To output the same field at -! more than one frequency, for instance monthy and daily, set +! more than one frequency, for instance monthy and daily, set ! f_ = 'md'. ! ! authors Tony Craig and Bruce Briegleb, NCAR ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -44,7 +44,7 @@ module ice_history implicit none private public :: init_hist, accum_hist - + !======================================================================= contains @@ -294,7 +294,7 @@ subroutine init_hist (dt) f_yieldstress22 = 'x' endif - ! these must be output at the same frequency because of + ! these must be output at the same frequency because of ! cos(zenith angle) averaging if (f_albice(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_albice = f_albsni if (f_albsno(1:1) /= 'x') f_albsno = f_albice @@ -686,7 +686,7 @@ subroutine init_hist (dt) call broadcast_scalar (f_e12, master_task) call broadcast_scalar (f_e22, master_task) call broadcast_scalar (f_s11, master_task) - call broadcast_scalar (f_s12, master_task) + call broadcast_scalar (f_s12, master_task) call broadcast_scalar (f_s22, master_task) call broadcast_scalar (f_yieldstress11, master_task) call broadcast_scalar (f_yieldstress12, master_task) @@ -697,13 +697,13 @@ subroutine init_hist (dt) if (histfreq(ns1) /= 'x') then !!!!! begin example -! call define_hist_field(n_example,"example","m",tstr2D, tcstr, & +! call define_hist_field(n_example,"example","m",tstr2D, tcstr, & ! "example: mean ice thickness", & ! "ice volume per unit grid cell area", c1, c0, & ! ns1, f_example) !!!!! end example - call define_hist_field(n_hi,"hi","m",tstr2D, tcstr, & + call define_hist_field(n_hi,"hi","m",tstr2D, tcstr, & "grid cell mean ice thickness", & "ice volume per unit grid cell area", c1, c0, & ns1, f_hi) @@ -742,12 +742,12 @@ subroutine init_hist (dt) "sea ice speed", & "vector magnitude on E grid", c1, c0, & ns1, f_icespdE) - + call define_hist_field(n_icedirE,"icedirE","deg",estr2D, ecstr, & "sea ice direction", & "vector direction - coming from on E grid", c1, c0, & ns1, f_icedirE) - + call define_hist_field(n_uvelN,"uvelN","m/s",nstr2D, ncstr, & "ice velocity (x)", & "positive is x direction on N grid", c1, c0, & @@ -762,12 +762,12 @@ subroutine init_hist (dt) "sea ice speed", & "vector magnitude on N grid", c1, c0, & ns1, f_icespdN) - + call define_hist_field(n_icedirN,"icedirN","deg",nstr2D, ncstr, & "sea ice direction", & "vector direction - coming from on N grid", c1, c0, & ns1, f_icedirN) - + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & @@ -777,22 +777,22 @@ subroutine init_hist (dt) "ice velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vvel) - + call define_hist_field(n_icespd,"icespd","m/s",ustr2D, ucstr, & "sea ice speed", & "vector magnitude", c1, c0, & ns1, f_icespd) - + call define_hist_field(n_icedir,"icedir","deg",ustr2D, ucstr, & "sea ice direction", & "vector direction - coming from", c1, c0, & ns1, f_icedir) - + call define_hist_field(n_uatm,"uatm","m/s",str2D_gau, cstr_gau, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uatm) - + call define_hist_field(n_vatm,"vatm","m/s",str2D_gav, cstr_gav, & "atm velocity (y)", & "positive is y direction on U grid", c1, c0, & @@ -802,67 +802,67 @@ subroutine init_hist (dt) "atmosphere wind speed", & "vector magnitude", c1, c0, & ns1, f_atmspd) - + call define_hist_field(n_atmdir,"atmdir","deg",str2D_gau, cstr_gau, & "atmosphere wind direction", & "vector direction - coming from", c1, c0, & ns1, f_atmdir) - + call define_hist_field(n_sice,"sice","ppt",tstr2D, tcstr, & "bulk ice salinity", & "none", c1, c0, & ns1, f_sice) - + call define_hist_field(n_fswup,"fswup","W/m^2",tstr2D, tcstr, & "upward solar flux", & "positive upward", c1, c0, & ns1, f_fswup) - + call define_hist_field(n_fswdn,"fswdn","W/m^2",tstr2D, tcstr, & "down solar flux", & "positive downward", c1, c0, & ns1, f_fswdn) - + call define_hist_field(n_flwdn,"flwdn","W/m^2",tstr2D, tcstr, & "down longwave flux", & "positive downward", c1, c0, & ns1, f_flwdn) - + call define_hist_field(n_snow,"snow","cm/day",tstr2D, tcstr, & "snowfall rate (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_snow) - + call define_hist_field(n_snow_ai,"snow_ai","cm/day",tstr2D, tcstr, & "snowfall rate", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_snow_ai) - + call define_hist_field(n_rain,"rain","cm/day",tstr2D, tcstr, & "rainfall rate (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_rain) - + call define_hist_field(n_rain_ai,"rain_ai","cm/day",tstr2D, tcstr, & "rainfall rate", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_rain_ai) - + call define_hist_field(n_sst,"sst","C",tstr2D, tcstr, & "sea surface temperature", & "none", c1, c0, & ns1, f_sst) - + call define_hist_field(n_sss,"sss","ppt",tstr2D, tcstr, & "sea surface salinity", & "none", c1, c0, & ns1, f_sss) - + call define_hist_field(n_uocn,"uocn","m/s",str2D_gou, cstr_gou, & "ocean current (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uocn) - + call define_hist_field(n_vocn,"vocn","m/s",str2D_gov, cstr_gov, & "ocean current (y)", & "positive is y direction on U grid", c1, c0, & @@ -872,17 +872,17 @@ subroutine init_hist (dt) "ocean current speed", & "vector magnitude", c1, c0, & ns1, f_ocnspd) - + call define_hist_field(n_ocndir,"ocndir","deg",str2D_gou, cstr_gou, & "ocean current direction", & "vector direction - going to", c1, c0, & ns1, f_ocndir) - + call define_hist_field(n_frzmlt,"frzmlt","W/m^2",tstr2D, tcstr, & "freeze/melt potential", & "if >0, new ice forms; if <0, ice melts", c1, c0, & ns1, f_frzmlt) - + call define_hist_field(n_fswfac,"scale_factor","1",tstr2D, tcstr, & "shortwave scaling factor", & "ratio of netsw new:old", c1, c0, & @@ -897,22 +897,22 @@ subroutine init_hist (dt) "snow/ice/ocn absorbed solar flux (cpl)", & "positive downward", c1, c0, & ns1, f_fswabs) - + call define_hist_field(n_fswabs_ai,"fswabs_ai","W/m^2",tstr2D, tcstr, & "snow/ice/ocn absorbed solar flux", & "weighted by ice area", c1, c0, & ns1, f_fswabs_ai) - + call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & "snow/ice broad band albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albsni) - + call define_hist_field(n_alvdr,"alvdr","%",tstr2D, tcstr, & "visible direct albedo", & "scaled (divided) by aice", c100, c0, & ns1, f_alvdr) - + call define_hist_field(n_alidr,"alidr","%",tstr2D, tcstr, & "near IR direct albedo", & "scaled (divided) by aice", c100, c0, & @@ -922,7 +922,7 @@ subroutine init_hist (dt) "visible diffuse albedo", & "scaled (divided) by aice", c100, c0, & ns1, f_alvdf) - + call define_hist_field(n_alidf,"alidf","%",tstr2D, tcstr, & "near IR diffuse albedo", & "scaled (divided) by aice", c100, c0, & @@ -932,7 +932,7 @@ subroutine init_hist (dt) "visible direct albedo", & " ", c100, c0, & ns1, f_alvdr_ai) - + call define_hist_field(n_alidr_ai,"alidr_ai","%",tstr2D, tcstr, & "near IR direct albedo", & " ", c100, c0, & @@ -942,7 +942,7 @@ subroutine init_hist (dt) "visible diffuse albedo", & " ", c100, c0, & ns1, f_alvdf_ai) - + call define_hist_field(n_alidf_ai,"alidf_ai","%",tstr2D, tcstr, & "near IR diffuse albedo", & " ", c100, c0, & @@ -952,17 +952,17 @@ subroutine init_hist (dt) "bare ice albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albice) - + call define_hist_field(n_albsno,"albsno","%",tstr2D, tcstr, & "snow albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albsno) - + call define_hist_field(n_albpnd,"albpnd","%",tstr2D, tcstr, & "melt pond albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albpnd) - + call define_hist_field(n_coszen,"coszen","radian",tstr2D, tcstr, & "cosine of the zenith angle", & "negative below horizon", c1, c0, & @@ -972,188 +972,188 @@ subroutine init_hist (dt) "latent heat flux (cpl)", & "positive downward", c1, c0, & ns1, f_flat) - + call define_hist_field(n_flat_ai,"flat_ai","W/m^2",tstr2D, tcstr, & "latent heat flux", & "weighted by ice area", c1, c0, & ns1, f_flat_ai) - + call define_hist_field(n_fsens,"fsens","W/m^2",tstr2D, tcstr, & "sensible heat flux (cpl)", & "positive downward", c1, c0, & ns1, f_fsens) - + call define_hist_field(n_fsens_ai,"fsens_ai","W/m^2",tstr2D, tcstr, & "sensible heat flux", & "weighted by ice area", c1, c0, & ns1, f_fsens_ai) - + call define_hist_field(n_flwup,"flwup","W/m^2",tstr2D, tcstr, & "upward longwave flux (cpl)", & "positive downward", c1, c0, & ns1, f_flwup) - + call define_hist_field(n_flwup_ai,"flwup_ai","W/m^2",tstr2D, tcstr, & "upward longwave flux", & "weighted by ice area", c1, c0, & ns1, f_flwup_ai) - + call define_hist_field(n_evap,"evap","cm/day",tstr2D, tcstr, & "evaporative water flux (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap) - + call define_hist_field(n_evap_ai,"evap_ai","cm/day",tstr2D, tcstr, & "evaporative water flux", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap_ai) - + call define_hist_field(n_Tair,"Tair","C",tstr2D, tcstr, & "air temperature", & "none", c1, -Tffresh, & ns1, f_Tair) - + call define_hist_field(n_Tref,"Tref","C",tstr2D, tcstr, & "2m reference temperature", & "none", c1, -Tffresh, & ns1, f_Tref) - + call define_hist_field(n_Qref,"Qref","g/kg",tstr2D, tcstr, & "2m reference specific humidity", & "none", kg_to_g, c0, & ns1, f_Qref) - + call define_hist_field(n_congel,"congel","cm/day",tstr2D, tcstr, & "congelation ice growth", & "none", mps_to_cmpdy/dt, c0, & ns1, f_congel) - + call define_hist_field(n_frazil,"frazil","cm/day",tstr2D, tcstr, & "frazil ice growth", & "none", mps_to_cmpdy/dt, c0, & ns1, f_frazil) - + call define_hist_field(n_snoice,"snoice","cm/day",tstr2D, tcstr, & "snow-ice formation", & "none", mps_to_cmpdy/dt, c0, & ns1, f_snoice) - + call define_hist_field(n_dsnow,"dsnow","cm/day",tstr2D, tcstr, & "snow formation", & "none", mps_to_cmpdy/dt, c0, & ns1, f_dsnow) - + call define_hist_field(n_meltt,"meltt","cm/day",tstr2D, tcstr, & "top ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltt) - + call define_hist_field(n_melts,"melts","cm/day",tstr2D, tcstr, & "top snow melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_melts) - + call define_hist_field(n_meltb,"meltb","cm/day",tstr2D, tcstr, & "basal ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltb) - + call define_hist_field(n_meltl,"meltl","cm/day",tstr2D, tcstr, & "lateral ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltl) - + call define_hist_field(n_fresh,"fresh","cm/day",tstr2D, tcstr, & "freshwtr flx ice to ocn (cpl)", & "if positive, ocean gains fresh water", & mps_to_cmpdy/rhofresh, c0, & ns1, f_fresh) - + call define_hist_field(n_fresh_ai,"fresh_ai","cm/day",tstr2D, tcstr, & "freshwtr flx ice to ocn", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_fresh_ai) - + call define_hist_field(n_fsalt,"fsalt","kg/m^2/s",tstr2D, tcstr, & "salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns1, f_fsalt) - + call define_hist_field(n_fsalt_ai,"fsalt_ai","kg/m^2/s",tstr2D, tcstr, & "salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fsalt_ai) - + call define_hist_field(n_fbot,"fbot","W/m^2",tstr2D, tcstr, & "heat flux ice to ocean (fbot)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fbot) - + call define_hist_field(n_fhocn,"fhocn","W/m^2",tstr2D, tcstr, & "heat flux ice to ocn (cpl)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fhocn) - + call define_hist_field(n_fhocn_ai,"fhocn_ai","W/m^2",tstr2D, tcstr, & "heat flux ice to ocean (fhocn_ai)", & "weighted by ice area", c1, c0, & ns1, f_fhocn_ai) - + call define_hist_field(n_fswthru,"fswthru","W/m^2",tstr2D, tcstr, & "SW thru ice to ocean (cpl)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fswthru) - + call define_hist_field(n_fswthru_ai,"fswthru_ai","W/m^2",tstr2D, tcstr,& "SW flux thru ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fswthru_ai) - + call define_hist_field(n_strairx,"strairx","N/m^2",ustr2D, ucstr, & "atm/ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strairx) - + call define_hist_field(n_strairy,"strairy","N/m^2",ustr2D, ucstr, & "atm/ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strairy) - + call define_hist_field(n_strtltx,"strtltx","N/m^2",ustr2D, ucstr, & "sea sfc tilt stress (x)", & "none", c1, c0, & ns1, f_strtltx) - + call define_hist_field(n_strtlty,"strtlty","N/m^2",ustr2D, ucstr, & "sea sfc tilt stress (y)", & "none", c1, c0, & ns1, f_strtlty) - + call define_hist_field(n_strcorx,"strcorx","N/m^2",ustr2D, ucstr, & "coriolis stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strcorx) - + call define_hist_field(n_strcory,"strcory","N/m^2",ustr2D, ucstr, & "coriolis stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strcory) - + call define_hist_field(n_strocnx,"strocnx","N/m^2",ustr2D, ucstr, & "ocean/ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strocnx) - + call define_hist_field(n_strocny,"strocny","N/m^2",ustr2D, ucstr, & "ocean/ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strocny) - + call define_hist_field(n_strintx,"strintx","N/m^2",ustr2D, ucstr, & "internal ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strintx) - + call define_hist_field(n_strinty,"strinty","N/m^2",ustr2D, ucstr, & "internal ice stress (y)", & "positive is y direction on U grid", c1, c0, & @@ -1168,92 +1168,92 @@ subroutine init_hist (dt) "seabed (basal) stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_tauby) - + call define_hist_field(n_strairxN,"strairxN","N/m^2",nstr2D, ncstr, & "atm/ice stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strairxN) - + call define_hist_field(n_strairyN,"strairyN","N/m^2",nstr2D, ncstr, & "atm/ice stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strairyN) - + call define_hist_field(n_strairxE,"strairxE","N/m^2",estr2D, ecstr, & "atm/ice stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strairxE) - + call define_hist_field(n_strairyE,"strairyE","N/m^2",estr2D, ecstr, & "atm/ice stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strairyE) - + call define_hist_field(n_strtltxN,"strtltxN","N/m^2",nstr2D, ncstr, & "sea sfc tilt stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strtltxN) - + call define_hist_field(n_strtltyN,"strtltyN","N/m^2",nstr2D, ncstr, & "sea sfc tilt stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strtltyN) - + call define_hist_field(n_strtltxE,"strtltxE","N/m^2",estr2D, ecstr, & "sea sfc tilt stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strtltxE) - + call define_hist_field(n_strtltyE,"strtltyE","N/m^2",estr2D, ecstr, & "sea sfc tilt stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strtltyE) - + call define_hist_field(n_strcorxN,"strcorxN","N/m^2",nstr2D, ncstr, & "coriolis stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strcorxN) - + call define_hist_field(n_strcoryN,"strcoryN","N/m^2",nstr2D, ncstr, & "coriolis stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strcoryN) - + call define_hist_field(n_strcorxE,"strcorxE","N/m^2",estr2D, ecstr, & "coriolis stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strcorxE) - + call define_hist_field(n_strcoryE,"strcoryE","N/m^2",estr2D, ecstr, & "coriolis stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strcoryE) - + call define_hist_field(n_strocnxN,"strocnxN","N/m^2",nstr2D, ncstr, & "ocean/ice stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strocnxN) - + call define_hist_field(n_strocnyN,"strocnyN","N/m^2",nstr2D, ncstr, & "ocean/ice stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strocnyN) - + call define_hist_field(n_strocnxE,"strocnxE","N/m^2",estr2D, ecstr, & "ocean/ice stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strocnxE) - + call define_hist_field(n_strocnyE,"strocnyE","N/m^2",estr2D, ecstr, & "ocean/ice stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strocnyE) - + call define_hist_field(n_strintxN,"strintxN","N/m^2",nstr2D, ncstr, & "internal ice stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strintxN) - + call define_hist_field(n_strintyN,"strintyN","N/m^2",nstr2D, ncstr, & "internal ice stress (y)", & "positive is y direction on N grid", c1, c0, & @@ -1263,7 +1263,7 @@ subroutine init_hist (dt) "internal ice stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strintxE) - + call define_hist_field(n_strintyE,"strintyE","N/m^2",estr2D, ecstr, & "internal ice stress (y)", & "positive is y direction on E grid", c1, c0, & @@ -1278,7 +1278,7 @@ subroutine init_hist (dt) "seabed (basal) stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_taubyN) - + call define_hist_field(n_taubxE,"taubxE","N/m^2",estr2D, ecstr, & "seabed (basal) stress (x)", & "positive is x direction on E grid", c1, c0, & @@ -1288,22 +1288,22 @@ subroutine init_hist (dt) "seabed (basal) stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_taubyE) - + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & "compressive ice strength", & "none", c1, c0, & ns1, f_strength) - + call define_hist_field(n_divu,"divu","%/day",tstr2D, tcstr, & "strain rate (divergence)", & "none", secday*c100, c0, & ns1, f_divu) - + call define_hist_field(n_shear,"shear","%/day",tstr2D, tcstr, & "strain rate (shear)", & "none", secday*c100, c0, & ns1, f_shear) - + select case (grid_ice) case('B') description = ", on U grid (NE corner values)" @@ -1315,42 +1315,42 @@ subroutine init_hist (dt) "norm. principal stress 1", & "sig1 is instantaneous" // trim(description), c1, c0, & ns1, f_sig1) - + call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & "norm. principal stress 2", & "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) - + call define_hist_field(n_sigP,"sigP","1",ustr2D, ucstr, & "ice pressure", & "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) - + call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & "volume tendency thermo", & "none", mps_to_cmpdy, c0, & ns1, f_dvidtt) - + call define_hist_field(n_dvidtd,"dvidtd","cm/day",tstr2D, tcstr, & "volume tendency dynamics", & "none", mps_to_cmpdy, c0, & ns1, f_dvidtd) - + call define_hist_field(n_daidtt,"daidtt","%/day",tstr2D, tcstr, & "area tendency thermo", & "none", secday*c100, c0, & ns1, f_daidtt) - + call define_hist_field(n_daidtd,"daidtd","%/day",tstr2D, tcstr, & "area tendency dynamics", & "none", secday*c100, c0, & ns1, f_daidtd) - + call define_hist_field(n_dagedtt,"dagedtt","day/day",tstr2D, tcstr, & "age tendency thermo", & "excludes time step increment", c1, c0, & ns1, f_dagedtt) - + call define_hist_field(n_dagedtd,"dagedtd","day/day",tstr2D, tcstr, & "age tendency dynamics", & "excludes time step increment", c1, c0, & @@ -1370,22 +1370,22 @@ subroutine init_hist (dt) "ice volume snapshot", & "none", c1, c0, & ns1, f_hisnap) - + call define_hist_field(n_aisnap,"aisnap","1",tstr2D, tcstr, & "ice area snapshot", & "none", c1, c0, & ns1, f_aisnap) - + call define_hist_field(n_trsig,"trsig","N/m",tstr2D, tcstr, & "internal stress tensor trace", & "ice strength approximation", c1, c0, & ns1, f_trsig) - + call define_hist_field(n_icepresent,"ice_present","1",tstr2D, tcstr, & "fraction of time-avg interval that ice is present", & "ice extent flag", c1, c0, & ns1, f_icepresent) - + call define_hist_field(n_fsurf_ai,"fsurf_ai","W/m^2",tstr2D, tcstr, & "net surface heat flux", & "positive downward, excludes conductive flux, weighted by ice area", & @@ -1477,27 +1477,27 @@ subroutine init_hist (dt) "sea ice thickness", & "volume divided by area", c1, c0, & ns1, f_sithick) - + call define_hist_field(n_siage,"siage","s",tstr2D, tcstr, & "sea ice age", & "none", c1, c0, & ns1, f_siage) - + call define_hist_field(n_sisnthick,"sisnthick","m",tstr2D, tcstr, & "sea ice snow thickness", & "snow volume divided by area", c1, c0, & ns1, f_sisnthick) - + call define_hist_field(n_sitemptop,"sitemptop","K",tstr2D, tcstr, & "sea ice surface temperature", & "none", c1, c0, & ns1, f_sitemptop) - + call define_hist_field(n_sitempsnic,"sitempsnic","K",tstr2D, tcstr, & "snow ice interface temperature", & "surface temperature when no snow present", c1, c0, & ns1, f_sitempsnic) - + call define_hist_field(n_sitempbot,"sitempbot","K",tstr2D, tcstr, & "sea ice bottom temperature", & "none", c1, c0, & @@ -1512,37 +1512,37 @@ subroutine init_hist (dt) "ice y velocity component", & "none", c1, c0, & ns1, f_siv) - + call define_hist_field(n_sidmasstranx,"sidmasstranx","kg/s",ustr2D, ucstr, & "x component of snow and sea ice mass transport", & "none", c1, c0, & ns1, f_sidmasstranx) - + call define_hist_field(n_sidmasstrany,"sidmasstrany","kg/s",ustr2D, ucstr, & "y component of snow and sea ice mass transport", & "none", c1, c0, & ns1, f_sidmasstrany) - + call define_hist_field(n_sistrxdtop,"sistrxdtop","N m-2",ustr2D, ucstr, & "x component of atmospheric stress on sea ice", & "none", c1, c0, & ns1, f_sistrxdtop) - + call define_hist_field(n_sistrydtop,"sistrydtop","N m-2",ustr2D, ucstr, & "y component of atmospheric stress on sea ice", & "none", c1, c0, & ns1, f_sistrydtop) - + call define_hist_field(n_sistrxubot,"sistrxubot","N m-2",ustr2D, ucstr, & "x component of ocean stress on sea ice", & "none", c1, c0, & ns1, f_sistrxubot) - + call define_hist_field(n_sistryubot,"sistryubot","N m-2",ustr2D, ucstr, & "y component of ocean stress on sea ice", & "none", c1, c0, & ns1, f_sistryubot) - + call define_hist_field(n_sicompstren,"sicompstren","N m-1",tstr2D, tcstr, & "compressive sea ice strength", & "none", c1, c0, & @@ -1557,37 +1557,37 @@ subroutine init_hist (dt) "ice direction", & "vector direction - going to", c1, c0, & ns1, f_sidir) - + call define_hist_field(n_sialb,"sialb","1",tstr2D, tcstr, & "sea ice albedo", & "none", c1, c0, & ns1, f_sialb) - + call define_hist_field(n_sihc,"sihc","J m-2",tstr2D, tcstr, & "sea ice heat content", & "none", c1, c0, & ns1, f_sihc) - + call define_hist_field(n_sisnhc,"sisnhc","J m-2",tstr2D, tcstr, & "snow heat content", & "none", c1, c0, & ns1, f_sisnhc) - + call define_hist_field(n_sidconcth,"sidconcth","1/s",tstr2D, tcstr, & "sea ice area change from thermodynamics", & "none", c1, c0, & ns1, f_sidconcth) - + call define_hist_field(n_sidconcdyn,"sidconcdyn","1/s",tstr2D, tcstr, & "sea ice area change from dynamics", & "none", c1, c0, & ns1, f_sidconcdyn) - + call define_hist_field(n_sidmassth,"sidmassth","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from thermodynamics", & "none", c1, c0, & ns1, f_sidmassth) - + call define_hist_field(n_sidmassdyn,"sidmassdyn","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from dynamics", & "none", c1, c0, & @@ -1597,37 +1597,37 @@ subroutine init_hist (dt) "sea ice mass change from frazil", & "none", c1, c0, & ns1, f_sidmassgrowthwat) - + call define_hist_field(n_sidmassgrowthbot,"sidmassgrowthbot","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from basal growth", & "none", c1, c0, & ns1, f_sidmassgrowthbot) - + call define_hist_field(n_sidmasssi,"sidmasssi","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from snow-ice formation", & "none", c1, c0, & ns1, f_sidmasssi) - + call define_hist_field(n_sidmassevapsubl,"sidmassevapsubl","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from evaporation and sublimation", & "none", c1, c0, & ns1, f_sidmassevapsubl) - + call define_hist_field(n_sndmasssubl,"sndmassubl","kg m-2 s-1",tstr2D, tcstr, & "snow mass change from evaporation and sublimation", & "none", c1, c0, & ns1, f_sndmasssubl) - + call define_hist_field(n_sidmassmelttop,"sidmassmelttop","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change top melt", & "none", c1, c0, & ns1, f_sidmassmelttop) - + call define_hist_field(n_sidmassmeltbot,"sidmassmeltbot","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change bottom melt", & "none", c1, c0, & ns1, f_sidmassmeltbot) - + call define_hist_field(n_sidmasslat,"sidmasslat","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change lateral melt", & "none", c1, c0, & @@ -1637,37 +1637,37 @@ subroutine init_hist (dt) "snow mass change from snow fall", & "none", c1, c0, & ns1, f_sndmasssnf) - + call define_hist_field(n_sndmassmelt,"sndmassmelt","kg m-2 s-1",tstr2D, tcstr, & "snow mass change from snow melt", & "none", c1, c0, & ns1, f_sndmassmelt) - + call define_hist_field(n_siflswdtop,"siflswdtop","W/m2",tstr2D, tcstr, & "down shortwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_siflswdtop) - + call define_hist_field(n_siflswutop,"siflswutop","W/m2",tstr2D, tcstr, & "upward shortwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_siflswutop) - + call define_hist_field(n_siflswdbot,"siflswdbot","W/m2",tstr2D, tcstr, & "down shortwave flux at bottom of ice", & "positive downward", c1, c0, & ns1, f_siflswdbot) - + call define_hist_field(n_sifllwdtop,"sifllwdtop","W/m2",tstr2D, tcstr, & "down longwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_sifllwdtop) - + call define_hist_field(n_sifllwutop,"sifllwutop","W/m2",tstr2D, tcstr, & "upward longwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_sifllwutop) - + call define_hist_field(n_siflsenstop,"siflsenstop","W/m2",tstr2D, tcstr, & "sensible heat flux over sea ice", & "positive downward", c1, c0, & @@ -1677,37 +1677,37 @@ subroutine init_hist (dt) "sensible heat flux at bottom of sea ice", & "positive downward", c1, c0, & ns1, f_siflsensupbot) - + call define_hist_field(n_sifllatstop,"sifllatstop","W/m2",tstr2D, tcstr, & "latent heat flux over sea ice", & "positive downward", c1, c0, & ns1, f_sifllatstop) - + call define_hist_field(n_siflcondtop,"siflcondtop","W/m2",tstr2D, tcstr, & "conductive heat flux at top of sea ice", & "positive downward", c1, c0, & ns1, f_siflcondtop) - + call define_hist_field(n_siflcondbot,"siflcondbot","W/m2",tstr2D, tcstr, & "conductive heat flux at bottom of sea ice", & "positive downward", c1, c0, & ns1, f_siflcondbot) - + call define_hist_field(n_sipr,"sipr","kg m-2 s-1",tstr2D, tcstr, & "rainfall over sea ice", & "none", c1, c0, & ns1, f_sipr) - + call define_hist_field(n_sifb,"sifb","m",tstr2D, tcstr, & "sea ice freeboard above sea level", & "none", c1, c0, & ns1, f_sifb) - + call define_hist_field(n_siflsaltbot,"siflsaltbot","kg m-2 s-1",tstr2D, tcstr, & "salt flux from sea ice", & "positive downward", c1, c0, & ns1, f_siflsaltbot) - + call define_hist_field(n_siflfwbot,"siflfwbot","kg m-2 s-1",tstr2D, tcstr, & "fresh water flux from sea ice", & "positive downward", c1, c0, & @@ -1717,37 +1717,37 @@ subroutine init_hist (dt) "fresh water drainage through sea ice", & "positive downward", c1, c0, & ns1, f_siflfwdrain) - + call define_hist_field(n_sidragtop,"sidragtop","1",tstr2D, tcstr, & "atmospheric drag over sea ice", & "none", c1, c0, & ns1, f_sidragtop) - + call define_hist_field(n_sirdgthick,"sirdgthick","m",tstr2D, tcstr, & "sea ice ridge thickness", & "vrdg divided by ardg", c1, c0, & ns1, f_sirdgthick) - + call define_hist_field(n_siforcetiltx,"siforcetiltx","N m-2",tstr2D, tcstr, & "sea surface tilt term", & "none", c1, c0, & ns1, f_siforcetiltx) - + call define_hist_field(n_siforcetilty,"siforcetilty","N m-2",tstr2D, tcstr, & "sea surface tile term", & "none", c1, c0, & ns1, f_siforcetilty) - + call define_hist_field(n_siforcecoriolx,"siforcecoriolx","N m-2",tstr2D, tcstr, & "coriolis term", & "none", c1, c0, & ns1, f_siforcecoriolx) - + call define_hist_field(n_siforcecorioly,"siforcecorioly","N m-2",tstr2D, tcstr, & "coriolis term", & "none", c1, c0, & ns1, f_siforcecorioly) - + call define_hist_field(n_siforceintstrx,"siforceintstrx","N m-2",tstr2D, tcstr, & "internal stress term", & "none", c1, c0, & @@ -1762,7 +1762,7 @@ subroutine init_hist (dt) "average normal stress", & "sistreave is instantaneous", c1, c0, & ns1, f_sistreave) - + call define_hist_field(n_sistremax,"sistremax","N m-1",ustr2D, ucstr, & "maximum shear stress", & "sistremax is instantaneous", c1, c0, & @@ -1797,12 +1797,12 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & - "ice area, categories","none", c1, c0, & + call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & + "ice area, categories","none", c1, c0, & ns1, f_aicen) - call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & - "ice volume, categories","none", c1, c0, & + call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & + "ice volume, categories","none", c1, c0, & ns1, f_vicen) call define_hist_field(n_vsnon,"vsnon","m",tstr3Dc, tcstr, & @@ -1814,29 +1814,29 @@ subroutine init_hist (dt) "snow fraction per unit grid cell area", c1, c0, & ns1, f_snowfracn) - call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & + call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & "net surface heat flux, categories","weighted by ice area", c1, c0, & ns1, f_fsurfn_ai) - + call define_hist_field(n_fcondtopn_ai,"fcondtopn_ai","W/m^2",tstr3Dc, tcstr, & "top sfc conductive heat flux, cat","weighted by ice area", c1, c0, & ns1, f_fcondtopn_ai) - call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & - "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & + call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & + "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & ns1, f_fmelttn_ai) - call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & - "latent heat flux, category","weighted by ice area", c1, c0, & + call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & + "latent heat flux, category","weighted by ice area", c1, c0, & ns1, f_flatn_ai) - call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & - "sensible heat flux, category","weighted by ice area", c1, c0, & + call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & + "sensible heat flux, category","weighted by ice area", c1, c0, & ns1, f_fsensn_ai) call define_hist_field(n_keffn_top,"keffn_top","W/m^2/K",tstr3Dc, tcstr, & "effective thermal conductivity of the top ice layer, categories", & - "multilayer scheme", c1, c0, & + "multilayer scheme", c1, c0, & ns1, f_keffn_top) ! CMIP 3D @@ -1876,16 +1876,16 @@ subroutine init_hist (dt) ! do ns1 = 1, nstreams ! if (histfreq(ns1) /= 'x') then -! call define_hist_field(n_field3dz,"field3dz","1",tstr3Dz, tcstr, & +! call define_hist_field(n_field3dz,"field3dz","1",tstr3Dz, tcstr, & ! "example 3dz field", & ! "vertical profile", c1, c0, & ! ns1, f_field3dz) ! endif ! if (histfreq(ns1) /= 'x') then -! enddo ! ns1 +! enddo ! ns1 ! biogeochemistry - call init_hist_bgc_3Db + call init_hist_bgc_3Db call init_hist_bgc_3Da !----------------------------------------------------------------- @@ -1902,12 +1902,12 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & + call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & "ice internal temperatures on CICE grid", & "vertical profile", c1, c0, & ns1, f_Tinz) - call define_hist_field(n_Sinz,"Sinz","ppt",tstr4Di, tcstr, & + call define_hist_field(n_Sinz,"Sinz","ppt",tstr4Di, tcstr, & "ice internal bulk salinity", & "vertical profile", c1, c0, & ns1, f_Sinz) @@ -1918,7 +1918,7 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & + call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & "snow internal temperatures", & "vertical profile", c1, c0, & ns1, f_Tsnz) @@ -2071,8 +2071,8 @@ subroutine init_hist (dt) if (restart .and. yday >= c2) then ! restarting midyear gives erroneous onset dates - mlt_onset = 999._dbl_kind - frz_onset = 999._dbl_kind + mlt_onset = 999._dbl_kind + frz_onset = 999._dbl_kind else mlt_onset = c0 frz_onset = c0 @@ -2150,7 +2150,7 @@ subroutine accum_hist (dt) ravgct , & ! 1/avgct ravgctz ! 1/avgct - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & qn , & ! temporary variable for enthalpy sn ! temporary variable for salinity @@ -2208,7 +2208,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & a2D(:,:,n,:) = c0 enddo - do n = n2D + 1, n3Dccum + do n = n2D + 1, n3Dccum nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & a3Dc(:,:,:,nn,:) = c0 @@ -2267,7 +2267,7 @@ subroutine accum_hist (dt) !$OMP worka,workb,worka3,Tinz4d,Sinz4d,Tsnz4d) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2526,7 +2526,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_fswthru, iblk, fswthru(:,:,iblk), a2D) if (f_fswthru_ai(1:1)/= 'x') & call accum_hist_field(n_fswthru_ai,iblk, fswthru_ai(:,:,iblk), a2D) - + if (f_strairx(1:1) /= 'x') & call accum_hist_field(n_strairx, iblk, strairxU(:,:,iblk), a2D) if (f_strairy(1:1) /= 'x') & @@ -3219,7 +3219,7 @@ subroutine accum_hist (dt) if ( ktherm == 2) then dfresh = -rhoi*(frazil(i,j,iblk)-frazil_diag(i,j,iblk))/dt else - dfresh = -rhoi*frazil(i,j,iblk)/dt + dfresh = -rhoi*frazil(i,j,iblk)/dt endif endif dfsalt = ice_ref_salinity*p001*dfresh @@ -3241,7 +3241,7 @@ subroutine accum_hist (dt) if ( ktherm == 2) then dfresh = -rhoi*(frazil(i,j,iblk)-frazil_diag(i,j,iblk))/dt else - dfresh = -rhoi*frazil(i,j,iblk)/dt + dfresh = -rhoi*frazil(i,j,iblk)/dt endif endif worka(i,j) = aice(i,j,iblk)*(fresh(i,j,iblk)+dfresh) @@ -3395,7 +3395,7 @@ subroutine accum_hist (dt) if (f_fsensn_ai (1:1) /= 'x') & call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - ! Calculate surface heat flux that causes melt (calculated by the + ! Calculate surface heat flux that causes melt (calculated by the ! atmos in HadGEM3 so needed for checking purposes) if (f_fmelttn_ai (1:1) /= 'x') & call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & @@ -3484,7 +3484,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_Sinz-n3Dfcum, iblk, nzilyr, ncat_hist, & Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) endif - + endif ! if (allocated(a3Dc)) if (allocated(a4Ds)) then @@ -3504,7 +3504,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_Tsnz-n4Dicum, iblk, nzslyr, ncat_hist, & Tsnz4d(:,:,1:nzslyr,1:ncat_hist), a4Ds) endif - + endif ! if (allocated(a4Ds)) if (allocated(a3Dc) .and. allocated(a2D)) then @@ -3528,7 +3528,7 @@ subroutine accum_hist (dt) enddo endif - endif + endif !--------------------------------------------------------------- ! accumulate other history output !--------------------------------------------------------------- @@ -3569,14 +3569,14 @@ subroutine accum_hist (dt) if (write_history(ns) .or. write_ic) then !--------------------------------------------------------------- - ! Mask out land points and convert units + ! Mask out land points and convert units !--------------------------------------------------------------- ravgct = c1/avgct(ns) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP n,nn,ravgctz,ravgip,ravgipn) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -3611,7 +3611,7 @@ subroutine accum_hist (dt) endif do n = 1, num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then do j = jlo, jhi do i = ilo, ihi @@ -4125,7 +4125,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:6) == 'albice') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -4145,7 +4145,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:6) == 'albsni') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -4159,7 +4159,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:8) == 'alvdr_ai') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -4216,7 +4216,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Dc nn = n2D + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, ncat_hist do j = jlo, jhi @@ -4265,7 +4265,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Dz nn = n3Dccum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzilyr do j = jlo, jhi @@ -4283,7 +4283,7 @@ subroutine accum_hist (dt) enddo ! n do n = 1, num_avail_hist_fields_3Db nn = n3Dzcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzblyr do j = jlo, jhi do i = ilo, ihi @@ -4301,7 +4301,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Da nn = n3Dbcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzalyr do j = jlo, jhi do i = ilo, ihi @@ -4337,7 +4337,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_4Di nn = n3Dfcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzilyr do ic = 1, ncat_hist do j = jlo, jhi @@ -4357,7 +4357,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_4Ds nn = n4Dicum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzslyr do ic = 1, ncat_hist do j = jlo, jhi @@ -4460,7 +4460,7 @@ subroutine accum_hist (dt) if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns),iblk) = & sig2 (i,j,iblk)*avail_hist_fields(n_sig2(ns))%cona if (n_sigP (ns) /= 0) a2D(i,j,n_sigP(ns),iblk) = & - sigP (i,j,iblk)*avail_hist_fields(n_sigP(ns))%cona + sigP (i,j,iblk)*avail_hist_fields(n_sigP(ns))%cona if (n_sistreave(ns) /= 0) a2D(i,j,n_sistreave(ns),iblk) = & p5*(sig1(i,j,iblk)+sig2(i,j,iblk))*avail_hist_fields(n_sistreave(ns))%cona if (n_sistremax(ns) /= 0) a2D(i,j,n_sistremax(ns),iblk) = & @@ -4557,8 +4557,8 @@ subroutine accum_hist (dt) do n = 1,n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a2D(:,:,n,:) = c0 enddo - do n = n2D + 1, n3Dccum - nn = n - n2D + do n = n2D + 1, n3Dccum + nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Dc(:,:,:,nn,:) = c0 enddo do n = n3Dccum + 1, n3Dzcum @@ -4595,7 +4595,7 @@ subroutine accum_hist (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -4606,13 +4606,13 @@ subroutine accum_hist (dt) do i=ilo,ihi ! reset NH Jan 1 if (lmask_n(i,j,iblk)) mlt_onset(i,j,iblk) = c0 - ! reset SH Jan 1 + ! reset SH Jan 1 if (lmask_s(i,j,iblk)) frz_onset(i,j,iblk) = c0 enddo enddo endif ! new_year - if ( (mmonth .eq. 7) .and. new_month ) then + if ( (mmonth .eq. 7) .and. new_month ) then do j=jlo,jhi do i=ilo,ihi ! reset SH Jul 1 diff --git a/cicecore/cicedynB/analysis/ice_history_bgc.F90 b/cicecore/cicedynB/analysis/ice_history_bgc.F90 index 8802cf431..003e76120 100644 --- a/cicecore/cicedynB/analysis/ice_history_bgc.F90 +++ b/cicecore/cicedynB/analysis/ice_history_bgc.F90 @@ -21,74 +21,74 @@ module ice_history_bgc icepack_query_tracer_indices, icepack_query_parameters, & icepack_query_parameters use ice_domain_size, only: max_nstrm, n_iso, n_aero, & - n_algae, n_dic, n_doc, n_don, n_zaero, n_fed, n_fep + n_algae, n_dic, n_doc, n_don, n_zaero, n_fed, n_fep implicit none private public :: init_hist_bgc_2D, init_hist_bgc_3Dc, & init_hist_bgc_3Db, init_hist_bgc_3Da,& accum_hist_bgc, init_history_bgc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- ! specified in input_templates !-------------------------------------------------------------- - character (len=max_nstrm), public :: & + character (len=max_nstrm), public :: & f_fiso_atm = 'x', f_fiso_ocn = 'x', & f_iso = 'x', & f_faero_atm = 'x', f_faero_ocn = 'x', & f_aero = 'x', & - f_fzsal = 'm', f_fzsal_ai = 'm', & + f_fzsal = 'm', f_fzsal_ai = 'm', & f_fzsal_g = 'm', f_fzsal_g_ai = 'm', & f_zsal = 'x', & - f_fbio = 'x', f_fbio_ai = 'x', & - f_zaero = 'x', f_bgc_S = 'x', & + f_fbio = 'x', f_fbio_ai = 'x', & + f_zaero = 'x', f_bgc_S = 'x', & f_bgc_N = 'x', f_bgc_C = 'x', & f_bgc_DOC = 'x', f_bgc_DIC = 'x', & f_bgc_chl = 'x', f_bgc_Nit = 'x', & f_bgc_Am = 'x', f_bgc_Sil = 'x', & f_bgc_DMSPp = 'x', f_bgc_DMSPd = 'x', & - f_bgc_DMS = 'x', f_bgc_DON = 'x', & + f_bgc_DMS = 'x', f_bgc_DON = 'x', & f_bgc_Fe = 'x', f_bgc_hum = 'x', & f_bgc_PON = 'x', f_bgc_ml = 'x', & - f_upNO = 'x', f_upNH = 'x', & - f_bTin = 'x', f_bphi = 'x', & - f_iDi = 'x', f_iki = 'x', & + f_upNO = 'x', f_upNH = 'x', & + f_bTin = 'x', f_bphi = 'x', & + f_iDi = 'x', f_iki = 'x', & f_fbri = 'x', f_hbri = 'x', & - f_zfswin = 'x', f_grownet = 'x', & - f_bionet = 'x', f_biosnow = 'x', & + f_zfswin = 'x', f_grownet = 'x', & + f_bionet = 'x', f_biosnow = 'x', & f_PPnet = 'x', f_algalpeak = 'x', & f_zbgc_frac = 'x', & !------------------------------------------------ ! specified by combinations of above values !------------------------------------------------- f_bgc_Fed = 'x', f_bgc_Fep = 'x', & - f_DONnet = 'x', & - f_DICnet = 'x', f_DOCnet = 'x', & - f_chlnet = 'x', f_Nitnet = 'x', & - f_Amnet = 'x', f_Cnet = 'x', & - f_Nnet = 'x', f_DMSPpnet = 'x', & - f_DMSPdnet = 'x', f_DMSnet = 'x', & - f_Fednet = 'x', f_Fepnet = 'x', & + f_DONnet = 'x', & + f_DICnet = 'x', f_DOCnet = 'x', & + f_chlnet = 'x', f_Nitnet = 'x', & + f_Amnet = 'x', f_Cnet = 'x', & + f_Nnet = 'x', f_DMSPpnet = 'x', & + f_DMSPdnet = 'x', f_DMSnet = 'x', & + f_Fednet = 'x', f_Fepnet = 'x', & f_Silnet = 'x', f_PONnet = 'x', & - f_zaeronet = 'x', f_humnet = 'x', & - f_chlsnow = 'x', f_Nitsnow = 'x', & - f_Amsnow = 'x', f_Csnow = 'x', & - f_Nsnow = 'x', f_DMSPpsnow = 'x', & - f_DMSPdsnow = 'x', f_DMSsnow = 'x', & - f_Fedsnow = 'x', f_Fepsnow = 'x', & - f_Silsnow = 'x', f_PONsnow = 'x', & + f_zaeronet = 'x', f_humnet = 'x', & + f_chlsnow = 'x', f_Nitsnow = 'x', & + f_Amsnow = 'x', f_Csnow = 'x', & + f_Nsnow = 'x', f_DMSPpsnow = 'x', & + f_DMSPdsnow = 'x', f_DMSsnow = 'x', & + f_Fedsnow = 'x', f_Fepsnow = 'x', & + f_Silsnow = 'x', f_PONsnow = 'x', & f_humsnow = 'x', & - f_DICsnow = 'x', f_DOCsnow = 'x', & + f_DICsnow = 'x', f_DOCsnow = 'x', & f_DONsnow = 'x', f_zaerosnow = 'x', & - f_chlfrac = 'x', f_Nitfrac = 'x', & - f_Amfrac = 'x', & - f_Nfrac = 'x', f_DMSPpfrac = 'x', & - f_DMSPdfrac = 'x', f_DMSfrac = 'x', & - f_Silfrac = 'x', f_PONfrac = 'x', & + f_chlfrac = 'x', f_Nitfrac = 'x', & + f_Amfrac = 'x', & + f_Nfrac = 'x', f_DMSPpfrac = 'x', & + f_DMSPdfrac = 'x', f_DMSfrac = 'x', & + f_Silfrac = 'x', f_PONfrac = 'x', & f_humfrac = 'x', & - f_DICfrac = 'x', f_DOCfrac = 'x', & + f_DICfrac = 'x', f_DOCfrac = 'x', & f_DONfrac = 'x', f_zaerofrac = 'x', & f_Fedfrac = 'x', f_Fepfrac = 'x', & f_fNit = 'x', f_fNit_ai = 'x', & @@ -99,13 +99,13 @@ module ice_history_bgc f_fDON = 'x', f_fDON_ai = 'x', & f_fFed = 'x', f_fFed_ai = 'x', & f_fFep = 'x', f_fFep_ai = 'x', & - f_fSil = 'x', f_fSil_ai = 'x', & - f_fPON = 'x', f_fPON_ai = 'x', & - f_fhum = 'x', f_fhum_ai = 'x', & - f_fDMSPp = 'x', f_fDMSPp_ai = 'x', & - f_fDMSPd = 'x', f_fDMSPd_ai = 'x', & - f_fDMS = 'x', f_fDMS_ai = 'x', & - f_fzaero = 'x', f_fzaero_ai = 'x', & + f_fSil = 'x', f_fSil_ai = 'x', & + f_fPON = 'x', f_fPON_ai = 'x', & + f_fhum = 'x', f_fhum_ai = 'x', & + f_fDMSPp = 'x', f_fDMSPp_ai = 'x', & + f_fDMSPd = 'x', f_fDMSPd_ai = 'x', & + f_fDMS = 'x', f_fDMS_ai = 'x', & + f_fzaero = 'x', f_fzaero_ai = 'x', & f_bgc_Sil_ml = 'x', & f_bgc_Nit_ml = 'x', f_bgc_Am_ml = 'x', & f_bgc_DMSP_ml = 'x', f_bgc_DMS_ml = 'x', & @@ -140,12 +140,12 @@ module ice_history_bgc f_bgc_DMS , f_bgc_DON , & f_bgc_Fe , f_bgc_hum , & f_bgc_PON , f_bgc_ml , & - f_upNO , f_upNH , & + f_upNO , f_upNH , & f_bTin , f_bphi , & - f_iDi , f_iki , & + f_iDi , f_iki , & f_fbri , f_hbri , & - f_zfswin , f_grownet , & - f_bionet , f_biosnow , & + f_zfswin , f_grownet , & + f_bionet , f_biosnow , & f_PPnet , f_algalpeak , & f_zbgc_frac @@ -154,9 +154,9 @@ module ice_history_bgc !--------------------------------------------------------------- integer (kind=int_kind), dimension(max_nstrm), public :: & - n_fzsal , n_fzsal_ai , & - n_fzsal_g , n_fzsal_g_ai , & - n_zsal + n_fzsal , n_fzsal_ai , & + n_fzsal_g , n_fzsal_g_ai , & + n_zsal integer(kind=int_kind), dimension(icepack_max_iso,max_nstrm) :: & n_fiso_atm , & @@ -216,7 +216,7 @@ module ice_history_bgc n_bgc_Fed_cat1, n_bgc_Fep_cat1 integer(kind=int_kind), dimension(max_nstrm) :: & - n_bgc_S , & + n_bgc_S , & n_fNit , n_fNit_ai , & n_fAm , n_fAm_ai , & n_fSil , n_fSil_ai , & @@ -233,25 +233,25 @@ module ice_history_bgc n_bgc_hum_ml , & n_bgc_Nit_ml , n_bgc_Am_ml , & n_bgc_DMSP_ml , n_bgc_DMS_ml , & - n_upNO , n_upNH , & + n_upNO , n_upNH , & n_bTin , n_bphi , & n_iDi , n_iki , & n_bgc_PON , & n_fbri , n_hbri , & - n_zfswin , n_Nitnet , & - n_Amnet , n_Silnet , & + n_zfswin , n_Nitnet , & + n_Amnet , n_Silnet , & n_humnet , & - n_DMSPpnet , n_DMSPdnet , & - n_DMSnet , n_PONnet , & + n_DMSPpnet , n_DMSPdnet , & + n_DMSnet , n_PONnet , & n_Nitsnow , n_Amsnow , & n_Silsnow , n_humsnow , & - n_DMSPpsnow , n_DMSPdsnow , & - n_DMSsnow , n_PONsnow , & + n_DMSPpsnow , n_DMSPdsnow , & + n_DMSsnow , n_PONsnow , & n_Nitfrac , n_Amfrac , & n_Silfrac , & n_humfrac , & - n_DMSPpfrac , n_DMSPdfrac , & - n_DMSfrac , n_PONfrac , & + n_DMSPpfrac , n_DMSPdfrac , & + n_DMSfrac , n_PONfrac , & n_grownet , n_PPnet , & n_bgc_Nit_cat1, n_bgc_Am_cat1 , & n_bgc_Sil_cat1, n_bgc_DMSPd_cat1,& @@ -295,7 +295,7 @@ subroutine init_hist_bgc_2D tr_bgc_PON_out=tr_bgc_PON, & tr_bgc_N_out =tr_bgc_N, tr_bgc_C_out =tr_bgc_C, & tr_bgc_chl_out=tr_bgc_chl, tr_bgc_DON_out=tr_bgc_DON, & - tr_bgc_Fe_out =tr_bgc_Fe, tr_bgc_hum_out=tr_bgc_hum ) + tr_bgc_Fe_out =tr_bgc_Fe, tr_bgc_hum_out=tr_bgc_hum ) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -336,14 +336,14 @@ subroutine init_hist_bgc_2D if (.not. tr_aero) then f_faero_atm = 'x' f_faero_ocn = 'x' - f_aero = 'x' + f_aero = 'x' endif - + if (.not. tr_brine) then f_fbri = 'x' f_hbri = 'x' endif - + f_zaeronet = f_bionet f_zaerosnow = f_biosnow f_zaerofrac = f_zbgc_frac @@ -352,7 +352,7 @@ subroutine init_hist_bgc_2D if (.not. tr_zaero) then f_zaero = 'x' - f_fzaero = 'x' + f_fzaero = 'x' f_fzaero_ai = 'x' f_zaeronet = 'x' f_zaerosnow = 'x' @@ -396,7 +396,7 @@ subroutine init_hist_bgc_2D f_DMSPdnet = f_bionet f_DMSnet = f_bionet f_PONnet = f_bionet - + f_Nitsnow = f_biosnow f_Amsnow = f_biosnow f_Nsnow = f_biosnow @@ -466,7 +466,7 @@ subroutine init_hist_bgc_2D f_fDMSPd_ai = f_fbio_ai f_fDMS_ai = f_fbio_ai - if (.not. tr_bgc_N) then + if (.not. tr_bgc_N) then f_bgc_N = 'x' f_bgc_N_ml = 'x' f_fN = 'x' @@ -478,8 +478,8 @@ subroutine init_hist_bgc_2D endif f_peakval = f_algalpeak - if (.not. tr_bgc_Nit) then - f_upNO = 'x' + if (.not. tr_bgc_Nit) then + f_upNO = 'x' f_bgc_Nit = 'x' f_bgc_Nit_ml= 'x' f_fNit = 'x' @@ -511,8 +511,8 @@ subroutine init_hist_bgc_2D f_chlsnow = 'x' f_chlfrac = 'x' endif - if (.not. tr_bgc_Am) then - f_upNH = 'x' + if (.not. tr_bgc_Am) then + f_upNH = 'x' f_bgc_Am = 'x' f_bgc_Am_ml = 'x' f_fAm = 'x' @@ -560,8 +560,8 @@ subroutine init_hist_bgc_2D f_DMSfrac = 'x' f_DMSPpfrac = 'x' f_DMSPdfrac = 'x' - endif - if (.not. tr_bgc_DON) then + endif + if (.not. tr_bgc_DON) then f_bgc_DON = 'x' f_bgc_DON_ml = 'x' f_DONsnow = 'x' @@ -569,8 +569,8 @@ subroutine init_hist_bgc_2D f_DONnet = 'x' f_fDON = 'x' f_fDON_ai = 'x' - endif - if (.not. tr_bgc_Fe ) then + endif + if (.not. tr_bgc_Fe ) then f_bgc_Fe = 'x' f_bgc_Fed = 'x' f_bgc_Fed_ml = 'x' @@ -587,7 +587,7 @@ subroutine init_hist_bgc_2D f_fFep = 'x' f_fFep_ai = 'x' endif - if (.not. tr_bgc_PON .or. skl_bgc) then + if (.not. tr_bgc_PON .or. skl_bgc) then f_bgc_PON = 'x' f_PONsnow = 'x' f_PONfrac = 'x' @@ -595,19 +595,19 @@ subroutine init_hist_bgc_2D f_fPON = 'x' f_fPON_ai = 'x' endif - - f_bgc_Nit_cat1 = f_bgc_Nit - f_bgc_Am_cat1 = f_bgc_Am + + f_bgc_Nit_cat1 = f_bgc_Nit + f_bgc_Am_cat1 = f_bgc_Am f_bgc_N_cat1 = f_bgc_N f_bgc_DOC_cat1 = f_bgc_DOC f_bgc_DIC_cat1 = f_bgc_DIC f_bgc_DON_cat1 = f_bgc_DON - f_bgc_Fed_cat1 = f_bgc_Fe - f_bgc_Fep_cat1 = f_bgc_Fe - f_bgc_Sil_cat1 = f_bgc_Sil - f_bgc_hum_cat1 = f_bgc_hum + f_bgc_Fed_cat1 = f_bgc_Fe + f_bgc_Fep_cat1 = f_bgc_Fe + f_bgc_Sil_cat1 = f_bgc_Sil + f_bgc_hum_cat1 = f_bgc_hum f_bgc_DMSPd_cat1 = f_bgc_DMSPd - f_bgc_DMS_cat1 = f_bgc_DMS + f_bgc_DMS_cat1 = f_bgc_DMS f_bgc_PON_cat1 = f_bgc_PON if (solve_zsal) then @@ -711,73 +711,73 @@ subroutine init_hist_bgc_2D call broadcast_scalar (f_bgc_Sil_ml, master_task) call broadcast_scalar (f_bgc_hum_ml, master_task) call broadcast_scalar (f_bgc_DMSP_ml, master_task) - call broadcast_scalar (f_bgc_DMS_ml, master_task) - call broadcast_scalar (f_bgc_DON_ml, master_task) - call broadcast_scalar (f_bgc_Fed_ml, master_task) - call broadcast_scalar (f_bgc_Fep_ml, master_task) - call broadcast_scalar (f_upNO, master_task) - call broadcast_scalar (f_upNH, master_task) + call broadcast_scalar (f_bgc_DMS_ml, master_task) + call broadcast_scalar (f_bgc_DON_ml, master_task) + call broadcast_scalar (f_bgc_Fed_ml, master_task) + call broadcast_scalar (f_bgc_Fep_ml, master_task) + call broadcast_scalar (f_upNO, master_task) + call broadcast_scalar (f_upNH, master_task) call broadcast_scalar (f_bTin, master_task) call broadcast_scalar (f_bphi, master_task) - call broadcast_scalar (f_iDi, master_task) - call broadcast_scalar (f_iki, master_task) - call broadcast_scalar (f_bgc_S, master_task) - call broadcast_scalar (f_zfswin, master_task) - call broadcast_scalar (f_PPnet, master_task) - call broadcast_scalar (f_algalpeak, master_task) - call broadcast_scalar (f_zbgc_frac, master_task) - call broadcast_scalar (f_peakval, master_task) - call broadcast_scalar (f_grownet, master_task) - call broadcast_scalar (f_chlnet, master_task) - call broadcast_scalar (f_Nitnet, master_task) - call broadcast_scalar (f_Nnet, master_task) - call broadcast_scalar (f_Cnet, master_task) - call broadcast_scalar (f_DOCnet, master_task) - call broadcast_scalar (f_DICnet, master_task) - call broadcast_scalar (f_Amnet, master_task) - call broadcast_scalar (f_Silnet, master_task) - call broadcast_scalar (f_humnet, master_task) - call broadcast_scalar (f_DMSPpnet, master_task) - call broadcast_scalar (f_DMSPdnet, master_task) - call broadcast_scalar (f_DMSnet, master_task) - call broadcast_scalar (f_PONnet, master_task) - call broadcast_scalar (f_DONnet, master_task) - call broadcast_scalar (f_Fednet, master_task) - call broadcast_scalar (f_Fepnet, master_task) - call broadcast_scalar (f_zaeronet, master_task) - call broadcast_scalar (f_chlsnow, master_task) - call broadcast_scalar (f_Nitsnow, master_task) - call broadcast_scalar (f_Nsnow, master_task) - call broadcast_scalar (f_Csnow, master_task) - call broadcast_scalar (f_DOCsnow, master_task) - call broadcast_scalar (f_DICsnow, master_task) - call broadcast_scalar (f_Amsnow, master_task) - call broadcast_scalar (f_Silsnow, master_task) - call broadcast_scalar (f_humsnow, master_task) - call broadcast_scalar (f_DMSPpsnow, master_task) - call broadcast_scalar (f_DMSPdsnow, master_task) - call broadcast_scalar (f_DMSsnow, master_task) - call broadcast_scalar (f_PONsnow, master_task) - call broadcast_scalar (f_DONsnow, master_task) - call broadcast_scalar (f_Fedsnow, master_task) - call broadcast_scalar (f_Fepsnow, master_task) - call broadcast_scalar (f_zaerosnow, master_task) - call broadcast_scalar (f_chlfrac, master_task) - call broadcast_scalar (f_Nitfrac, master_task) - call broadcast_scalar (f_Nfrac, master_task) - call broadcast_scalar (f_DOCfrac, master_task) - call broadcast_scalar (f_DICfrac, master_task) - call broadcast_scalar (f_Amfrac, master_task) - call broadcast_scalar (f_Silfrac, master_task) - call broadcast_scalar (f_humfrac, master_task) - call broadcast_scalar (f_DMSPpfrac, master_task) - call broadcast_scalar (f_DMSPdfrac, master_task) - call broadcast_scalar (f_DMSfrac, master_task) - call broadcast_scalar (f_PONfrac, master_task) - call broadcast_scalar (f_DONfrac, master_task) - call broadcast_scalar (f_Fedfrac, master_task) - call broadcast_scalar (f_Fepfrac, master_task) - call broadcast_scalar (f_zaerofrac, master_task) + call broadcast_scalar (f_iDi, master_task) + call broadcast_scalar (f_iki, master_task) + call broadcast_scalar (f_bgc_S, master_task) + call broadcast_scalar (f_zfswin, master_task) + call broadcast_scalar (f_PPnet, master_task) + call broadcast_scalar (f_algalpeak, master_task) + call broadcast_scalar (f_zbgc_frac, master_task) + call broadcast_scalar (f_peakval, master_task) + call broadcast_scalar (f_grownet, master_task) + call broadcast_scalar (f_chlnet, master_task) + call broadcast_scalar (f_Nitnet, master_task) + call broadcast_scalar (f_Nnet, master_task) + call broadcast_scalar (f_Cnet, master_task) + call broadcast_scalar (f_DOCnet, master_task) + call broadcast_scalar (f_DICnet, master_task) + call broadcast_scalar (f_Amnet, master_task) + call broadcast_scalar (f_Silnet, master_task) + call broadcast_scalar (f_humnet, master_task) + call broadcast_scalar (f_DMSPpnet, master_task) + call broadcast_scalar (f_DMSPdnet, master_task) + call broadcast_scalar (f_DMSnet, master_task) + call broadcast_scalar (f_PONnet, master_task) + call broadcast_scalar (f_DONnet, master_task) + call broadcast_scalar (f_Fednet, master_task) + call broadcast_scalar (f_Fepnet, master_task) + call broadcast_scalar (f_zaeronet, master_task) + call broadcast_scalar (f_chlsnow, master_task) + call broadcast_scalar (f_Nitsnow, master_task) + call broadcast_scalar (f_Nsnow, master_task) + call broadcast_scalar (f_Csnow, master_task) + call broadcast_scalar (f_DOCsnow, master_task) + call broadcast_scalar (f_DICsnow, master_task) + call broadcast_scalar (f_Amsnow, master_task) + call broadcast_scalar (f_Silsnow, master_task) + call broadcast_scalar (f_humsnow, master_task) + call broadcast_scalar (f_DMSPpsnow, master_task) + call broadcast_scalar (f_DMSPdsnow, master_task) + call broadcast_scalar (f_DMSsnow, master_task) + call broadcast_scalar (f_PONsnow, master_task) + call broadcast_scalar (f_DONsnow, master_task) + call broadcast_scalar (f_Fedsnow, master_task) + call broadcast_scalar (f_Fepsnow, master_task) + call broadcast_scalar (f_zaerosnow, master_task) + call broadcast_scalar (f_chlfrac, master_task) + call broadcast_scalar (f_Nitfrac, master_task) + call broadcast_scalar (f_Nfrac, master_task) + call broadcast_scalar (f_DOCfrac, master_task) + call broadcast_scalar (f_DICfrac, master_task) + call broadcast_scalar (f_Amfrac, master_task) + call broadcast_scalar (f_Silfrac, master_task) + call broadcast_scalar (f_humfrac, master_task) + call broadcast_scalar (f_DMSPpfrac, master_task) + call broadcast_scalar (f_DMSPdfrac, master_task) + call broadcast_scalar (f_DMSfrac, master_task) + call broadcast_scalar (f_PONfrac, master_task) + call broadcast_scalar (f_DONfrac, master_task) + call broadcast_scalar (f_Fedfrac, master_task) + call broadcast_scalar (f_Fepfrac, master_task) + call broadcast_scalar (f_zaerofrac, master_task) ! 2D variables @@ -820,28 +820,28 @@ subroutine init_hist_bgc_2D enddo endif - ! zsalinity - + ! zsalinity + call define_hist_field(n_fzsal,"fzsal","kg/m^2/s",tstr2D, tcstr, & "prognostic salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns, f_fzsal) - + call define_hist_field(n_fzsal_ai,"fzsal_ai","kg/m^2/s",tstr2D, tcstr, & "prognostic salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fzsal_ai) - + call define_hist_field(n_fzsal_g,"fzsal_g","kg/m^2/s",tstr2D, tcstr, & "Gravity drainage salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns, f_fzsal_g) - + call define_hist_field(n_fzsal_g_ai,"fzsal_g_ai","kg/m^2/s",tstr2D, tcstr, & "Gravity drainage salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fzsal_g_ai) - + call define_hist_field(n_zsal,"zsal_tot","g/m^2",tstr2D, tcstr, & "Total Salt content", & "In ice volume*fbri", c1, c0, & @@ -971,8 +971,8 @@ subroutine init_hist_bgc_2D "skeletal layer: bottom 3 cm", c1, c0, & ns, f_bgc_Fep ) enddo - endif !f_bgc_Fe - + endif !f_bgc_Fe + if (f_bgc_Nit(1:1) /= 'x') & call define_hist_field(n_bgc_Nit,"Nit","mmol/m^2",tstr2D, tcstr, & "Bulk skeletal nutrient (nitrate)", & @@ -1013,7 +1013,7 @@ subroutine init_hist_bgc_2D "Bulk dissolved skl trace gas (DMS)", & "skeletal layer: bottom 3 cm", c1, c0, & ns, f_bgc_DMS) - + endif !skl_bgc ! vertical and skeletal layer biogeochemistry @@ -1049,7 +1049,7 @@ subroutine init_hist_bgc_2D enddo endif if (f_bgc_Fed_ml (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'ml_dFe', trim(nchar) call define_hist_field(n_bgc_Fed_ml (n,:),vname_in,"nM",tstr2D, tcstr, & @@ -1059,7 +1059,7 @@ subroutine init_hist_bgc_2D enddo endif if (f_bgc_Fep_ml (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'ml_pFe', trim(nchar) call define_hist_field(n_bgc_Fep_ml (n,:),vname_in,"nM",tstr2D, tcstr, & @@ -1097,7 +1097,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_bgc_hum_ml,"ml_hum","mmol/m^3",tstr2D, tcstr, & "mixed layer humic material (carbon)", & "upper ocean", c1, c0, & - ns, f_bgc_hum_ml) + ns, f_bgc_hum_ml) if (f_bgc_DMSP_ml(1:1) /= 'x') & call define_hist_field(n_bgc_DMSP_ml,"ml_DMSP","mmol/m^3",tstr2D, tcstr, & "mixed layer precursor (DMSP)", & @@ -1108,30 +1108,30 @@ subroutine init_hist_bgc_2D "mixed layer trace gas (DMS)", & "upper ocean", c1, c0, & ns, f_bgc_DMS_ml) - + if (f_fNit(1:1) /= 'x') & call define_hist_field(n_fNit,"fNit","mmol/m^2/s",tstr2D, tcstr, & "nitrate flux ice to ocn (cpl)", & "if positive, ocean gains nitrate", c1, c0, & ns, f_fNit) - + if (f_fNit_ai(1:1) /= 'x') & call define_hist_field(n_fNit_ai,"fNit_ai","mmol/m^2/s",tstr2D, tcstr, & "nitrate flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fNit_ai) - + if (f_fAm(1:1) /= 'x') & call define_hist_field(n_fAm,"fAm","mmol/m^2/s",tstr2D, tcstr, & "ammonium flux ice to ocn (cpl)", & "if positive, ocean gains ammonium", c1, c0, & ns, f_fAm) - + if (f_fAm_ai(1:1) /= 'x') & call define_hist_field(n_fAm_ai,"fAm_ai","mmol/m^2/s",tstr2D, tcstr, & "ammonium flux ice to ocean", & "weighted by ice area", c1, c0, & - ns, f_fAm_ai) + ns, f_fAm_ai) if (f_fN(1:1) /= 'x') then do n = 1, n_algae write(nchar,'(i3.3)') n @@ -1171,7 +1171,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fDOC_ai) enddo - endif + endif if (f_fDIC(1:1) /= 'x') then do n = 1, n_dic write(nchar,'(i3.3)') n @@ -1191,7 +1191,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fDIC_ai) enddo - endif + endif if (f_fDON(1:1) /= 'x') then do n = 1, n_don write(nchar,'(i3.3)') n @@ -1211,7 +1211,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fDON_ai) enddo - endif + endif if (f_fFed(1:1) /= 'x') then do n = 1, n_fed write(nchar,'(i3.3)') n @@ -1221,9 +1221,9 @@ subroutine init_hist_bgc_2D "positive to ocean", c1, c0, & ns, f_fFed ) enddo - endif + endif if (f_fFed_ai (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'fdFe_ai', trim(nchar) call define_hist_field(n_fFed_ai (n,:),vname_in,"umol/m^2/s",tstr2D, tcstr, & @@ -1231,7 +1231,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fFed_ai ) enddo - endif + endif if (f_fFep(1:1) /= 'x') then do n = 1, n_fep write(nchar,'(i3.3)') n @@ -1243,7 +1243,7 @@ subroutine init_hist_bgc_2D enddo endif if (f_fFep_ai (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'fpFe_ai', trim(nchar) call define_hist_field(n_fFep_ai (n,:),vname_in,"umol/m^2/s",tstr2D, tcstr, & @@ -1251,25 +1251,25 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fFep_ai ) enddo - endif + endif if (f_fSil(1:1) /= 'x') & call define_hist_field(n_fSil,"fSil","mmol/m^2/s",tstr2D, tcstr, & "silicate flux ice to ocn (cpl)", & "positive into ocean", c1, c0, & ns, f_fSil) - + if (f_fSil_ai(1:1) /= 'x') & call define_hist_field(n_fSil_ai,"fSil_ai","mmol/m^2/s",tstr2D, tcstr, & "silicate flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fSil_ai) - + if (f_fhum(1:1) /= 'x') & call define_hist_field(n_fhum,"fhum","mmol/m^2/s",tstr2D, tcstr, & "humic matter (carbon) flux ice to ocn (cpl)", & "positive into ocean", c1, c0, & ns, f_fhum) - + if (f_fhum_ai(1:1) /= 'x') & call define_hist_field(n_fhum_ai,"fhum_ai","mmol/m^2/s",tstr2D, tcstr, & "humic matter (carbon) flux ice to ocean", & @@ -1336,19 +1336,19 @@ subroutine init_hist_bgc_2D "weighted by brine or skl volume ", c1, c0, & ns, f_grownet) - if (f_upNO(1:1) /= 'x') & + if (f_upNO(1:1) /= 'x') & call define_hist_field(n_upNO,"upNO","mmol/m^2/d",tstr2D, tcstr, & "Tot algal Nit uptake rate", & "weighted by ice area", c1, c0, & ns, f_upNO) - if (f_upNH(1:1) /= 'x') & + if (f_upNH(1:1) /= 'x') & call define_hist_field(n_upNH,"upNH","mmol/m^2/d",tstr2D, tcstr, & "Tot algal Am uptake rate", & "weighted by ice area", c1, c0,& ns, f_upNH) - ! vertical biogeochemistry + ! vertical biogeochemistry if (z_tracers) then if (f_fzaero(1:1) /= 'x') then @@ -1463,7 +1463,7 @@ subroutine init_hist_bgc_2D enddo endif !f_DONnet if (f_Fednet (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'dFe_net', trim(nchar) call define_hist_field(n_Fednet (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1471,9 +1471,9 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fednet ) enddo - endif !f_Fednet + endif !f_Fednet if (f_Fepnet (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'pFe_net', trim(nchar) call define_hist_field(n_Fepnet (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1481,7 +1481,7 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fepnet ) enddo - endif !f_Fepnet + endif !f_Fepnet if (f_Nitnet(1:1) /= 'x') & call define_hist_field(n_Nitnet,"Nit_net","mmol/m^2",tstr2D, tcstr, & "Net Nitrate", & @@ -1501,7 +1501,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_humnet,"hum_net","mmol/m^2",tstr2D, tcstr, & "Net humic material (carbon)", & "weighted by ice area", c1, c0, & - ns, f_humnet) + ns, f_humnet) if (f_DMSPpnet(1:1) /= 'x') & call define_hist_field(n_DMSPpnet,"DMSPp_net","mmol/m^2",tstr2D, tcstr, & "Net DMSPp", & @@ -1524,7 +1524,7 @@ subroutine init_hist_bgc_2D ns, f_PONnet) if (f_zaerosnow(1:1) /= 'x') then - do n = 1, n_zaero + do n = 1, n_zaero write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'zaero_snow', trim(nchar) call define_hist_field(n_zaerosnow(n,:),vname_in,"kg/m^2",tstr2D, tcstr, & @@ -1594,7 +1594,7 @@ subroutine init_hist_bgc_2D enddo endif !f_DONsnow if (f_Fedsnow (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'dFe_snow', trim(nchar) call define_hist_field(n_Fedsnow (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1602,9 +1602,9 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fedsnow ) enddo - endif !f_Fedsnow + endif !f_Fedsnow if (f_Fepsnow (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'pFe_snow', trim(nchar) call define_hist_field(n_Fepsnow (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1612,7 +1612,7 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fepsnow ) enddo - endif !f_Fepsnow + endif !f_Fepsnow if (f_Nitsnow(1:1) /= 'x') & call define_hist_field(n_Nitsnow,"Nit_snow","mmol/m^2",tstr2D, tcstr, & "Snow Nitrate", & @@ -1632,7 +1632,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_humsnow,"hum_snow","mmol/m^2",tstr2D, tcstr, & "Snow humic material (carbon)", & "weighted by ice area", c1, c0, & - ns, f_humsnow) + ns, f_humsnow) if (f_DMSPpsnow(1:1) /= 'x') & call define_hist_field(n_DMSPpsnow,"DMSPp_snow","mmol/m^2",tstr2D, tcstr, & "Snow DMSPp", & @@ -1655,7 +1655,7 @@ subroutine init_hist_bgc_2D ns, f_PONsnow) if (f_zaerofrac(1:1) /= 'x') then - do n = 1, n_zaero + do n = 1, n_zaero write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'zaero_frac', trim(nchar) call define_hist_field(n_zaerofrac(n,:),vname_in,"1",tstr2D, tcstr, & @@ -1715,7 +1715,7 @@ subroutine init_hist_bgc_2D enddo endif !f_DONfrac if (f_Fedfrac (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'dFe_frac', trim(nchar) call define_hist_field(n_Fedfrac (n,:),vname_in,"1",tstr2D, tcstr, & @@ -1723,9 +1723,9 @@ subroutine init_hist_bgc_2D "averaged over depth ", c1, c0, & ns, f_Fedfrac ) enddo - endif !f_Fedfrac + endif !f_Fedfrac if (f_Fepfrac (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'pFe_frac', trim(nchar) call define_hist_field(n_Fepfrac (n,:),vname_in,"1",tstr2D, tcstr, & @@ -1733,7 +1733,7 @@ subroutine init_hist_bgc_2D "averaged over depth ", c1, c0, & ns, f_Fepfrac ) enddo - endif !f_Fepfrac + endif !f_Fepfrac if (f_Nitfrac(1:1) /= 'x') & call define_hist_field(n_Nitfrac,"Nit_frac","1",tstr2D, tcstr, & "Mobile frac Nitrate", & @@ -1753,7 +1753,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_humfrac,"hum_frac","1",tstr2D, tcstr, & "Mobile frac humic material", & "averaged over depth", c1, c0, & - ns, f_humfrac) + ns, f_humfrac) if (f_DMSPpfrac(1:1) /= 'x') & call define_hist_field(n_DMSPpfrac,"DMSPp_frac","1",tstr2D, tcstr, & "Mobile frac DMSPp", & @@ -1787,8 +1787,8 @@ subroutine init_hist_bgc_2D endif ! histfreq(ns) /= 'x' enddo ! nstreams - endif ! tr_aero, etc - + endif ! tr_aero, etc + end subroutine init_hist_bgc_2D !======================================================================= @@ -1834,7 +1834,7 @@ subroutine init_hist_bgc_3Db real (kind=dbl_kind) :: secday logical (kind=log_kind) :: solve_zsal, z_tracers character(len=*), parameter :: subname = '(init_hist_bgc_3Db)' - + ! biology vertical grid call icepack_query_parameters(secday_out=secday) @@ -1848,7 +1848,7 @@ subroutine init_hist_bgc_3Db do ns = 1, nstreams if (histfreq(ns) /= 'x') then - + if (f_bTin(1:1) /= 'x') & call define_hist_field(n_bTin,"bTizn","C",tstr3Db, tcstr, & "ice internal temperatures on bio grid", & @@ -1859,27 +1859,27 @@ subroutine init_hist_bgc_3Db call define_hist_field(n_bphi,"bphizn","%",tstr3Db, tcstr, & "porosity", "brine volume fraction", c100, c0, & ns, f_bphi) - - if (f_iDi(1:1) /= 'x') & + + if (f_iDi(1:1) /= 'x') & call define_hist_field(n_iDi,"iDin","m^2/d",tstr3Db, tcstr, & "interface diffusivity", "on bio interface grid", secday, c0, & ns, f_iDi) - - if (f_iki(1:1) /= 'x') & + + if (f_iki(1:1) /= 'x') & call define_hist_field(n_iki,"ikin","mm^2",tstr3Db, tcstr, & "permeability", "on bio interface grid", 1.0e6_dbl_kind, c0, & ns, f_iki) - + if (f_bgc_S(1:1) /= 'x') & call define_hist_field(n_bgc_S,"bgc_S","ppt",tstr3Db, tcstr, & "bulk salinity", "on bio grid", c1, c0, & ns, f_bgc_S) - + if (f_zfswin(1:1) /= 'x') & call define_hist_field(n_zfswin,"zfswin","W/m^2",tstr3Db, tcstr, & "internal ice PAR", "on bio interface grid", c1, c0, & ns, f_zfswin) - + endif ! histfreq(ns) /= 'x' enddo ! ns @@ -1903,8 +1903,8 @@ subroutine accum_hist_bgc (iblk) use ice_flux, only: sss use ice_flux_bgc, only: fiso_atm, fiso_ocn, faero_atm, faero_ocn, & flux_bio, flux_bio_ai, fzsal_ai, fzsal_g_ai - use ice_history_shared, only: n2D, a2D, a3Dc, & - n3Dzcum, n3Dbcum, a3Db, a3Da, & + use ice_history_shared, only: n2D, a2D, a3Dc, & + n3Dzcum, n3Dbcum, a3Db, a3Da, & ncat_hist, accum_hist_field, nzblyr, nzalyr use ice_state, only: trcrn, trcr, aicen, aice, vicen @@ -1914,24 +1914,24 @@ subroutine accum_hist_bgc (iblk) ! local variables integer (kind=int_kind) :: & - i, j, n, k, & ! loop indices + i, j, n, k, & ! loop indices ilo,ihi,jlo,jhi ! beginning and end of physical domain - real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+4) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+4) :: & workz, workz2 - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & maxv, rhos, rhoi, rhow, puny, sk_l - real (kind=dbl_kind), dimension (nblyr+1) :: & + real (kind=dbl_kind), dimension (nblyr+1) :: & workv - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & workni, worknj - integer (kind=int_kind), dimension (1) :: & + integer (kind=int_kind), dimension (1) :: & worki - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & workii logical (kind=log_kind) :: & @@ -1949,9 +1949,9 @@ subroutine accum_hist_bgc (iblk) integer (kind=int_kind), dimension(icepack_max_aero) :: & nlt_zaero_sw ! points to aerosol in trcrn_sw - + integer (kind=int_kind), dimension(icepack_max_algae) :: & - nt_bgc_N, nlt_bgc_N, & ! algae + nt_bgc_N, nlt_bgc_N, & ! algae nt_bgc_C, nlt_bgc_C, & ! nt_bgc_chl, nlt_bgc_chl ! @@ -2009,8 +2009,8 @@ subroutine accum_hist_bgc (iblk) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - - this_block = get_block(blocks_ice(iblk),iblk) + + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2026,15 +2026,15 @@ subroutine accum_hist_bgc (iblk) if (tr_iso .or. tr_aero .or. tr_brine .or. solve_zsal .or. skl_bgc) then ! zsalinity - if (f_fzsal (1:1) /= 'x') & + if (f_fzsal (1:1) /= 'x') & call accum_hist_field(n_fzsal, iblk, fzsal(:,:,iblk), a2D) - if (f_fzsal_ai(1:1)/= 'x') & + if (f_fzsal_ai(1:1)/= 'x') & call accum_hist_field(n_fzsal_ai, iblk, fzsal_ai(:,:,iblk), a2D) - if (f_fzsal_g (1:1) /= 'x') & + if (f_fzsal_g (1:1) /= 'x') & call accum_hist_field(n_fzsal_g, iblk, fzsal_g(:,:,iblk), a2D) - if (f_fzsal_g_ai(1:1)/= 'x') & + if (f_fzsal_g_ai(1:1)/= 'x') & call accum_hist_field(n_fzsal_g_ai,iblk, fzsal_g_ai(:,:,iblk), a2D) - if (f_zsal (1:1) /= 'x') & + if (f_zsal (1:1) /= 'x') & call accum_hist_field(n_zsal, iblk, zsal_tot(:,:,iblk), a2D) ! isotopes @@ -2120,13 +2120,13 @@ subroutine accum_hist_bgc (iblk) enddo endif if (f_bgc_Fed (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_bgc_Fed (n,:), iblk, & sk_l*trcr(:,:,nt_bgc_Fed (n), iblk), a2D) enddo endif if (f_bgc_Fep (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_bgc_Fep (n,:), iblk, & sk_l*trcr(:,:,nt_bgc_Fep (n), iblk), a2D) enddo @@ -2139,32 +2139,32 @@ subroutine accum_hist_bgc (iblk) endif if (f_bgc_Nit(1:1)/= 'x') & call accum_hist_field(n_bgc_Nit, iblk, & - sk_l*trcr(:,:,nt_bgc_Nit, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_Nit, iblk), a2D) if (f_bgc_Am(1:1)/= 'x') & call accum_hist_field(n_bgc_Am, iblk, & - sk_l*trcr(:,:,nt_bgc_Am, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_Am, iblk), a2D) if (f_bgc_Sil(1:1)/= 'x') & call accum_hist_field(n_bgc_Sil, iblk, & - sk_l*trcr(:,:,nt_bgc_Sil, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_Sil, iblk), a2D) if (f_bgc_hum(1:1)/= 'x') & call accum_hist_field(n_bgc_hum, iblk, & - sk_l*trcr(:,:,nt_bgc_hum, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_hum, iblk), a2D) if (f_bgc_PON(1:1)/= 'x') & call accum_hist_field(n_bgc_PON, iblk, & - sk_l*trcr(:,:,nt_bgc_PON, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_PON, iblk), a2D) if (f_bgc_DMSPp(1:1)/= 'x') & call accum_hist_field(n_bgc_DMSPp,iblk, & - sk_l*trcr(:,:,nt_bgc_DMSPp,iblk), a2D) + sk_l*trcr(:,:,nt_bgc_DMSPp,iblk), a2D) if (f_bgc_DMSPd(1:1)/= 'x') & call accum_hist_field(n_bgc_DMSPd,iblk, & - sk_l*trcr(:,:,nt_bgc_DMSPd,iblk), a2D) + sk_l*trcr(:,:,nt_bgc_DMSPd,iblk), a2D) if (f_bgc_DMS(1:1)/= 'x') & call accum_hist_field(n_bgc_DMS, iblk, & - sk_l*trcr(:,:,nt_bgc_DMS, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_DMS, iblk), a2D) - endif !skl_bgc + endif !skl_bgc - ! skeletal layer and vertical bgc + ! skeletal layer and vertical bgc if (f_bgc_DOC_ml(1:1)/= 'x') then do n=1,n_doc @@ -2185,13 +2185,13 @@ subroutine accum_hist_bgc (iblk) enddo endif if (f_bgc_Fed_ml (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_bgc_Fed_ml (n,:), iblk, & ocean_bio(:,:,nlt_bgc_Fed (n), iblk), a2D) enddo endif if (f_bgc_Fep_ml (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_bgc_Fep_ml (n,:), iblk, & ocean_bio(:,:,nlt_bgc_Fep (n), iblk), a2D) enddo @@ -2204,22 +2204,22 @@ subroutine accum_hist_bgc (iblk) endif if (f_bgc_Nit_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_Nit_ml, iblk, & - ocean_bio(:,:,nlt_bgc_Nit, iblk), a2D) + ocean_bio(:,:,nlt_bgc_Nit, iblk), a2D) if (f_bgc_Am_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_Am_ml, iblk, & - ocean_bio(:,:,nlt_bgc_Am, iblk), a2D) + ocean_bio(:,:,nlt_bgc_Am, iblk), a2D) if (f_bgc_Sil_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_Sil_ml, iblk, & - ocean_bio(:,:,nlt_bgc_Sil, iblk), a2D) + ocean_bio(:,:,nlt_bgc_Sil, iblk), a2D) if (f_bgc_hum_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_hum_ml, iblk, & - ocean_bio(:,:,nlt_bgc_hum, iblk), a2D) + ocean_bio(:,:,nlt_bgc_hum, iblk), a2D) if (f_bgc_DMSP_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_DMSP_ml, iblk, & - ocean_bio(:,:,nlt_bgc_DMSPd, iblk), a2D) + ocean_bio(:,:,nlt_bgc_DMSPd, iblk), a2D) if (f_bgc_DMS_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_DMS_ml, iblk, & - ocean_bio(:,:,nlt_bgc_DMS, iblk), a2D) + ocean_bio(:,:,nlt_bgc_DMS, iblk), a2D) if (f_fNit (1:1) /= 'x') & call accum_hist_field(n_fNit, iblk, & @@ -2283,25 +2283,25 @@ subroutine accum_hist_bgc (iblk) enddo endif if (f_fFed (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_fFed (n,:), iblk, & flux_bio(:,:,nlt_bgc_Fed (n),iblk), a2D) enddo endif if (f_fFed_ai (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_fFed_ai (n,:), iblk, & flux_bio_ai(:,:,nlt_bgc_Fed (n),iblk), a2D) enddo endif if (f_fFep (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_fFep (n,:), iblk, & flux_bio(:,:,nlt_bgc_Fep (n),iblk), a2D) enddo endif if (f_fFep_ai (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_fFep_ai (n,:), iblk, & flux_bio_ai(:,:,nlt_bgc_Fep (n),iblk), a2D) enddo @@ -2347,7 +2347,7 @@ subroutine accum_hist_bgc (iblk) PP_net(:,:,iblk), a2D) if (f_grownet (1:1) /= 'x') & call accum_hist_field(n_grownet, iblk, & - grow_net(:,:,iblk), a2D) + grow_net(:,:,iblk), a2D) if (f_upNO (1:1) /= 'x') & call accum_hist_field(n_upNO, iblk, & upNO(:,:,iblk), a2D) @@ -2355,7 +2355,7 @@ subroutine accum_hist_bgc (iblk) call accum_hist_field(n_upNH, iblk, & upNH(:,:,iblk), a2D) - ! vertical biogeochemistry + ! vertical biogeochemistry if (z_tracers) then @@ -2396,7 +2396,7 @@ subroutine accum_hist_bgc (iblk) enddo ! n endif !f_algalpeak - ! + ! ! ice_bio_net ! if (f_zaeronet (1:1) /= 'x') then @@ -2424,35 +2424,35 @@ subroutine accum_hist_bgc (iblk) enddo endif !f_Cnet if (f_DOCnet (1:1) /= 'x') then - do n=1,n_doc + do n=1,n_doc call accum_hist_field(n_DOCnet(n,:), iblk, & ice_bio_net(:,:,nlt_bgc_DOC(n), iblk), a2D) enddo endif !f_DOCnet if (f_DICnet (1:1) /= 'x') then - do n=1,n_dic + do n=1,n_dic call accum_hist_field(n_DICnet(n,:), iblk, & ice_bio_net(:,:,nlt_bgc_DIC(n), iblk), a2D) enddo endif !f_DICnet if (f_DONnet (1:1) /= 'x') then - do n=1,n_don + do n=1,n_don call accum_hist_field(n_DONnet(n,:), iblk, & ice_bio_net(:,:,nlt_bgc_DON(n), iblk), a2D) enddo endif !f_DONnet if (f_Fednet (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_Fednet (n,:), iblk, & ice_bio_net(:,:,nlt_bgc_Fed (n), iblk), a2D) enddo - endif !f_Fednet + endif !f_Fednet if (f_Fepnet (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_Fepnet (n,:), iblk, & ice_bio_net(:,:,nlt_bgc_Fep (n), iblk), a2D) enddo - endif !f_Fepnet + endif !f_Fepnet if (f_Nitnet (1:1) /= 'x') & call accum_hist_field(n_Nitnet, iblk, & @@ -2480,7 +2480,7 @@ subroutine accum_hist_bgc (iblk) ice_bio_net(:,:,nlt_bgc_PON, iblk), a2D) ! ! snow_bio_net - ! + ! if (f_zaerosnow (1:1) /= 'x') then do n=1,n_zaero call accum_hist_field(n_zaerosnow(n,:), iblk, & @@ -2506,35 +2506,35 @@ subroutine accum_hist_bgc (iblk) enddo endif !f_Csnow if (f_DOCsnow (1:1) /= 'x') then - do n=1,n_doc + do n=1,n_doc call accum_hist_field(n_DOCsnow(n,:), iblk, & snow_bio_net(:,:,nlt_bgc_DOC(n), iblk), a2D) enddo endif !f_DOCsnow if (f_DICsnow (1:1) /= 'x') then - do n=1,n_dic + do n=1,n_dic call accum_hist_field(n_DICsnow(n,:), iblk, & snow_bio_net(:,:,nlt_bgc_DIC(n), iblk), a2D) enddo endif !f_DICsnow if (f_DONsnow (1:1) /= 'x') then - do n=1,n_don + do n=1,n_don call accum_hist_field(n_DONsnow(n,:), iblk, & snow_bio_net(:,:,nlt_bgc_DON(n), iblk), a2D) enddo endif !f_DONsnow if (f_Fedsnow (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_Fedsnow (n,:), iblk, & snow_bio_net(:,:,nlt_bgc_Fed (n), iblk), a2D) enddo - endif !f_Fedsnow + endif !f_Fedsnow if (f_Fepsnow (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_Fepsnow (n,:), iblk, & snow_bio_net(:,:,nlt_bgc_Fep (n), iblk), a2D) enddo - endif !f_Fepsnow + endif !f_Fepsnow if (f_Nitsnow (1:1) /= 'x') & call accum_hist_field(n_Nitsnow, iblk, & @@ -2562,7 +2562,7 @@ subroutine accum_hist_bgc (iblk) snow_bio_net(:,:,nlt_bgc_PON, iblk), a2D) ! ! mobile frac - ! + ! if (f_zaerofrac (1:1) /= 'x') then do n=1,n_zaero call accum_hist_field(n_zaerofrac(n,:), iblk, & @@ -2582,35 +2582,35 @@ subroutine accum_hist_bgc (iblk) enddo endif !f_Nfrac if (f_DOCfrac (1:1) /= 'x') then - do n=1,n_doc + do n=1,n_doc call accum_hist_field(n_DOCfrac(n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DOC(n), iblk), a2D) enddo endif !f_DOCfrac if (f_DICfrac (1:1) /= 'x') then - do n=1,n_dic + do n=1,n_dic call accum_hist_field(n_DICfrac(n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DIC(n), iblk), a2D) enddo endif !f_DICfrac if (f_DONfrac (1:1) /= 'x') then - do n=1,n_don + do n=1,n_don call accum_hist_field(n_DONfrac(n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DON(n), iblk), a2D) enddo endif !f_DONfrac if (f_Fedfrac (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_Fedfrac (n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_Fed (n), iblk), a2D) enddo - endif !f_Fedfrac + endif !f_Fedfrac if (f_Fepfrac (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_Fepfrac (n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_Fep (n), iblk), a2D) enddo - endif !f_Fepfrac + endif !f_Fepfrac if (f_Nitfrac (1:1) /= 'x') & call accum_hist_field(n_Nitfrac, iblk, & @@ -2623,7 +2623,7 @@ subroutine accum_hist_bgc (iblk) trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_Sil, iblk), a2D) if (f_humfrac (1:1) /= 'x') & call accum_hist_field(n_humfrac, iblk, & - trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_hum, iblk), a2D) + trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_hum, iblk), a2D) if (f_DMSPpfrac (1:1) /= 'x') & call accum_hist_field(n_DMSPpfrac, iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DMSPp, iblk), a2D) @@ -2736,11 +2736,11 @@ subroutine accum_hist_bgc (iblk) do i = ilo, ihi if (aicen(i,j,n,iblk) > c0) then workz(i,j,k) = workz(i,j,k) + iDi(i,j,k,n,iblk)*vicen(i,j,n,iblk)**2/aicen(i,j,n,iblk) - workz(i,j,nzblyr) = workz(i,j,nzblyr-1) + workz(i,j,nzblyr) = workz(i,j,nzblyr-1) endif enddo ! i enddo ! j - enddo ! k + enddo ! k enddo ! n call accum_hist_field(n_iDi-n3Dzcum, iblk, nzblyr, & workz(:,:,1:nzblyr), a3Db) @@ -2760,7 +2760,7 @@ subroutine accum_hist_bgc (iblk) endif enddo ! i enddo ! j - enddo ! k + enddo ! k enddo ! n call accum_hist_field(n_iki-n3Dzcum, iblk, nzblyr, & workz(:,:,1:nzblyr), a3Db) @@ -2778,7 +2778,7 @@ subroutine accum_hist_bgc (iblk) workz(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_zaero(k)+nblyr+1:nt_zaero(k)+nblyr+2,iblk)/rhos workz(i,j,3:nblyr+3) = & !ice @@ -2786,7 +2786,7 @@ subroutine accum_hist_bgc (iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_zaero(k),iblk)/rhow !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_zaeros(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) enddo !k @@ -2797,14 +2797,14 @@ subroutine accum_hist_bgc (iblk) workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_N(k)+nblyr+1:nt_bgc_N(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_N(k):nt_bgc_N(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_N(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_N(k)+nblyr+1:nt_bgc_N(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2812,7 +2812,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_N(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_N(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_N_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2824,7 +2824,7 @@ subroutine accum_hist_bgc (iblk) workz(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow R_C2N(k)*trcr(i,j,nt_bgc_N(k)+nblyr+1:nt_bgc_N(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice @@ -2832,25 +2832,25 @@ subroutine accum_hist_bgc (iblk) workz(i,j,nblyr+4) = R_C2N(k)*ocean_bio(i,j,nlt_bgc_N(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_C(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) enddo !k endif !f_bgc_C if (f_bgc_DOC (1:1) /= 'x') then - do k = 1,n_doc + do k = 1,n_doc workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DOC(k)+nblyr+1:nt_bgc_DOC(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_DOC(k):nt_bgc_DOC(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DOC(k)+nblyr+1:nt_bgc_DOC(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2858,7 +2858,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DOC(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DOC_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2866,19 +2866,19 @@ subroutine accum_hist_bgc (iblk) enddo !k endif !f_bgc_DOC if (f_bgc_DIC (1:1) /= 'x') then - do k = 1,n_dic + do k = 1,n_dic workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DIC(k)+nblyr+1:nt_bgc_DIC(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_DIC(k):nt_bgc_DIC(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DIC(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DIC(k)+nblyr+1:nt_bgc_DIC(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2886,7 +2886,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DIC(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DIC(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DIC_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2894,19 +2894,19 @@ subroutine accum_hist_bgc (iblk) enddo !k endif !f_bgc_DIC if (f_bgc_DON (1:1) /= 'x') then - do k = 1,n_don + do k = 1,n_don workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DON(k)+nblyr+1:nt_bgc_DON(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_DON(k):nt_bgc_DON(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DON(k)+nblyr+1:nt_bgc_DON(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2914,7 +2914,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DON(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DON_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2922,19 +2922,19 @@ subroutine accum_hist_bgc (iblk) enddo !k endif !f_bgc_DON if (f_bgc_Fed (1:1) /= 'x') then - do k = 1,n_fed + do k = 1,n_fed workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Fed (k)+nblyr+1:nt_bgc_Fed (k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_Fed (k):nt_bgc_Fed (k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Fed (k)+nblyr+1:nt_bgc_Fed (k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2942,27 +2942,27 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Fed (k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Fed_cat1 (k,:)-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) enddo !k - endif !f_bgc_Fed + endif !f_bgc_Fed if (f_bgc_Fep (1:1) /= 'x') then - do k = 1,n_fep + do k = 1,n_fep workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Fep (k)+nblyr+1:nt_bgc_Fep (k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_Fep (k):nt_bgc_Fep (k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Fep (k)+nblyr+1:nt_bgc_Fep (k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2970,19 +2970,19 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Fep (k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Fep_cat1 (k,:)-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) enddo !k - endif !f_bgc_Fep + endif !f_bgc_Fep if (f_bgc_chl (1:1) /= 'x') then do k = 1,n_algae workz(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_chl(k)+nblyr+1:nt_bgc_chl(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice @@ -2990,12 +2990,12 @@ subroutine accum_hist_bgc (iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_chl(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_chl(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) enddo !k endif !f_bgc_chl - + if (f_bgc_Nit (1:1) /= 'x') then workz(:,:,:) = c0 workz2(:,:,:) = c0 @@ -3005,18 +3005,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Nit+nblyr+2:nt_bgc_Nit+nblyr+3,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,iblk) + trcr(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Nit,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Nit+nblyr+2:nt_bgc_Nit+nblyr+3,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,1,iblk) + trcrn(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Nit,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Nit-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Nit_cat1-n3Dbcum, iblk, nzalyr, & @@ -3032,18 +3032,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Am+nblyr+1:nt_bgc_Am+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,iblk) + trcr(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Am,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Am+nblyr+1:nt_bgc_Am+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,1,iblk) + trcrn(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Am,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Am-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Am_cat1-n3Dbcum, iblk, nzalyr, & @@ -3059,24 +3059,24 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Sil+nblyr+1:nt_bgc_Sil+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,iblk) + trcr(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Sil,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Sil+nblyr+1:nt_bgc_Sil+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,1,iblk) + trcrn(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Sil,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Sil-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Sil_cat1-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) endif - + if (f_bgc_hum (1:1) /= 'x') then workz(:,:,:) = c0 @@ -3087,24 +3087,24 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_hum+nblyr+1:nt_bgc_hum+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,iblk) + trcr(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_hum,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_hum+nblyr+1:nt_bgc_hum+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,1,iblk) + trcrn(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_hum,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_hum-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_hum_cat1-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) endif - + if (f_bgc_DMSPd (1:1) /= 'x') then workz(:,:,:) = c0 workz2(:,:,:) = c0 @@ -3114,23 +3114,23 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DMSPd+nblyr+1:nt_bgc_DMSPd+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,iblk) + trcr(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMSPd,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DMSPd+nblyr+1:nt_bgc_DMSPd+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,1,iblk) + trcrn(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMSPd,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DMSPd-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DMSPd_cat1-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) - endif + endif if (f_bgc_DMSPp (1:1) /= 'x') then workz(:,:,:) = c0 @@ -3140,11 +3140,11 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DMSPp+nblyr+1:nt_bgc_DMSPp+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_DMSPp:nt_bgc_DMSPp+nblyr,iblk) + trcr(i,j,nt_bgc_DMSPp:nt_bgc_DMSPp+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMSPp,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DMSPp-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) endif @@ -3158,18 +3158,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DMS+nblyr+1:nt_bgc_DMS+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,iblk) + trcr(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMS,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DMS+nblyr+1:nt_bgc_DMS+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,1,iblk) + trcrn(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMS,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DMS-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DMS_cat1-n3Dbcum, iblk, nzalyr, & @@ -3185,18 +3185,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_PON+nblyr+1:nt_bgc_PON+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,iblk) + trcr(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_PON,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_PON+nblyr+1:nt_bgc_PON+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,1,iblk) + trcrn(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_PON,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_PON-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_PON_cat1-n3Dbcum, iblk, nzalyr, & @@ -3220,19 +3220,19 @@ subroutine init_hist_bgc_3Da character (len=3) :: nchar character (len=16):: vname_in ! variable name character(len=*), parameter :: subname = '(init_hist_bgc_3Da)' - + call icepack_query_parameters(z_tracers_out=z_tracers) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) ! snow+bio grid - + if (z_tracers) then do ns = 1, nstreams if (histfreq(ns) /= 'x') then - + !---------------------------------------------------------------------------- ! snow+bio grid ==> ! 1:2 snow (surface layer +interior), 3:nblyr+2 ice (bio grid), nblyr+3 ocean @@ -3247,12 +3247,12 @@ subroutine init_hist_bgc_3Da ns, f_zaero) enddo endif - - if (f_bgc_Nit(1:1) /= 'x') & + + if (f_bgc_Nit(1:1) /= 'x') & call define_hist_field(n_bgc_Nit,"bgc_Nit","mmol/m^3",tstr3Da, tcstr, & "bulk nitrate ", "snow+bio grid", c1, c0, & ns, f_bgc_Nit) - + if (f_bgc_Am(1:1) /= 'x') & call define_hist_field(n_bgc_Am,"bgc_Am","mmol/m^3",tstr3Da, tcstr, & "bulk ammonia/um ", "snow+bio grid", c1, c0, & @@ -3313,7 +3313,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fed (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fed', trim(nchar) call define_hist_field(n_bgc_Fed (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3322,7 +3322,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fep (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fep', trim(nchar) call define_hist_field(n_bgc_Fep (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3330,32 +3330,32 @@ subroutine init_hist_bgc_3Da ns, f_bgc_Fep ) enddo endif - + if (f_bgc_Sil(1:1) /= 'x') & call define_hist_field(n_bgc_Sil,"bgc_Sil","mmol/m^3",tstr3Da, tcstr, & "bulk silicate ", "snow+bio grid", c1, c0, & ns, f_bgc_Sil) - + if (f_bgc_hum(1:1) /= 'x') & call define_hist_field(n_bgc_hum,"bgc_hum","mmol/m^3",tstr3Da, tcstr, & "bulk humic (carbon) material ", "snow+bio grid", c1, c0, & ns, f_bgc_hum) - + if (f_bgc_DMSPp(1:1) /= 'x') & call define_hist_field(n_bgc_DMSPp,"bgc_DMSPp","mmol/m^3",tstr3Da, tcstr, & "bulk algal DMSP ", "snow+bio grid", c1, c0,& ns, f_bgc_DMSPp) - + if (f_bgc_DMSPd(1:1) /= 'x') & call define_hist_field(n_bgc_DMSPd,"bgc_DMSPd","mmol/m^3",tstr3Da, tcstr, & "bulk dissolved DMSP ", "snow+bio grid", c1, c0, & ns, f_bgc_DMSPd) - + if (f_bgc_DMS(1:1) /= 'x') & call define_hist_field(n_bgc_DMS,"bgc_DMS","mmol/m^3",tstr3Da, tcstr, & "bulk DMS gas ", "snow+bio grid", c1, c0, & ns, f_bgc_DMS) - + if (f_bgc_PON(1:1) /= 'x') & call define_hist_field(n_bgc_PON,"bgc_PON","mmol/m^3",tstr3Da, tcstr, & "other bulk nitrogen pool ", "snow+bio grid", c1, c0, & @@ -3365,11 +3365,11 @@ subroutine init_hist_bgc_3Da ! Category 1 BGC !---------------------------------------------- - if (f_bgc_Nit_cat1(1:1) /= 'x') & + if (f_bgc_Nit_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_Nit_cat1,"bgc_Nit_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk nitrate in cat 1 ", "snow+bio grid", c1, c0, & ns, f_bgc_Nit_cat1) - + if (f_bgc_Am_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_Am_cat1,"bgc_Am_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk ammonia/um in cat 1", "snow+bio grid", c1, c0, & @@ -3412,7 +3412,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fed_cat1 (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fed_cat1', trim(nchar) call define_hist_field(n_bgc_Fed_cat1 (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3421,7 +3421,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fep_cat1 (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fep_cat1', trim(nchar) call define_hist_field(n_bgc_Fep_cat1 (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3429,32 +3429,32 @@ subroutine init_hist_bgc_3Da ns, f_bgc_Fep_cat1 ) enddo endif - + if (f_bgc_Sil_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_Sil_cat1,"bgc_Sil_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk silicate in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_Sil_cat1) - + if (f_bgc_hum_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_hum,"bgc_hum_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk humic (carbon) material in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_hum_cat1) - + if (f_bgc_DMSPd_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_DMSPd_cat1,"bgc_DMSPd_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk dissolved DMSP in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_DMSPd_cat1) - + if (f_bgc_DMS_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_DMS_cat1,"bgc_DMS_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk DMS gas in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_DMS_cat1) - + if (f_bgc_PON_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_PON_cat1,"bgc_PON_cat1","mmol/m^3",tstr3Da, tcstr, & "other bulk nitrogen pool in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_PON_cat1) - + endif ! histfreq(ns) /= 'x' enddo !ns @@ -3473,7 +3473,7 @@ subroutine init_history_bgc use ice_arrays_column, only: PP_net, grow_net, hbri, & ice_bio_net, snow_bio_net, fbio_snoice, fbio_atmice, & - fzsal, fzsal_g, zfswin + fzsal, fzsal_g, zfswin use ice_flux_bgc, only: flux_bio, flux_bio_ai, fnit, fsil, & famm, fdmsp, fdms, fhum, fdust, falgalN, fdoc, fdic, & fdon, ffep, ffed diff --git a/cicecore/cicedynB/analysis/ice_history_drag.F90 b/cicecore/cicedynB/analysis/ice_history_drag.F90 index c0a1f99bd..fba19b364 100644 --- a/cicecore/cicedynB/analysis/ice_history_drag.F90 +++ b/cicecore/cicedynB/analysis/ice_history_drag.F90 @@ -1,7 +1,7 @@ !======================================================================= ! 2013 module for form drag parameters -! authors Michel Tsamados, David Schroeder, CPOM +! authors Michel Tsamados, David Schroeder, CPOM module ice_history_drag @@ -17,7 +17,7 @@ module ice_history_drag implicit none private public :: accum_hist_drag, init_hist_drag_2D - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -31,7 +31,7 @@ module ice_history_drag !--------------------------------------------------------------- namelist / icefields_drag_nml / & - f_Cdn_atm, f_Cdn_ocn , & + f_Cdn_atm, f_Cdn_ocn , & f_drag !--------------------------------------------------------------- @@ -47,7 +47,7 @@ module ice_history_drag n_Cdn_atm_skin , n_Cdn_atm_floe, & n_Cdn_atm_pond , n_Cdn_atm_rdg, & n_Cdn_ocn_skin , n_Cdn_ocn_floe, & - n_Cdn_ocn_keel , n_Cdn_atm_ratio + n_Cdn_ocn_keel , n_Cdn_atm_ratio !======================================================================= @@ -124,43 +124,43 @@ subroutine init_hist_drag_2D "hdraft: draught", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_hridge,"hridge","m",tstr2D, tcstr, & "hridge: ridge height", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_distrdg,"distrdg","m",tstr2D, tcstr, & "distrdg: distance between ridges", & "none", c1, c0, & - ns, f_drag) + ns, f_drag) if (f_drag(1:1) /= 'x') & call define_hist_field(n_hkeel,"hkeel","m",tstr2D, tcstr, & "hkeel: keel depth", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_dkeel,"dkeel","m",tstr2D, tcstr, & "dkeel: distance between keels", & "none", c1, c0, & - ns, f_drag) + ns, f_drag) if (f_drag(1:1) /= 'x') & call define_hist_field(n_lfloe,"lfloe","m",tstr2D, tcstr, & "lfloe: floe length", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_dfloe,"dfloe","m",tstr2D, tcstr, & "dfloe: distance between floes", & "none", c1, c0, & - ns, f_drag) - + ns, f_drag) + if (f_Cdn_atm(1:1) /= 'x') & call define_hist_field(n_Cdn_atm,"Cdn_atm","none",tstr2D, tcstr, & "Ca: total ice-atm drag coefficient", & @@ -172,49 +172,49 @@ subroutine init_hist_drag_2D "Cdn_ocn: total ice-ocn drag coefficient", & "none", c1, c0, & ns, f_Cdn_ocn) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_skin,"Cdn_atm_skin","none", & tstr2D, tcstr, & "Cdn_atm_skin: neutral skin ice-atm drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_floe,"Cdn_atm_floe","none", & tstr2D, tcstr, & "Cdn_atm_floe: neutral floe edge ice-atm drag coefficient", & "none", c1, c0, & - ns, f_drag) - + ns, f_drag) + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_pond,"Cdn_atm_pond","none", & tstr2D, tcstr, & "Cdn_atm_pond: neutral pond edge ice-atm drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_rdg,"Cdn_atm_rdg","none", & tstr2D, tcstr, & "Cdn_atm_rdg: neutral ridge ice-atm drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_ocn_skin,"Cdn_ocn_skin","none", & tstr2D, tcstr, & "Cdn_ocn_skin: neutral skin ice-ocn drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_ocn_floe,"Cdn_ocn_floe","none", & tstr2D, tcstr, & "Cdn_ocn_floe: neutral floe edge ice-ocn drag coefficient", & "none", c1, c0, & - ns, f_drag) - + ns, f_drag) + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_ocn_keel,"Cdn_ocn_keel","none", & tstr2D, tcstr, & @@ -281,21 +281,21 @@ subroutine accum_hist_drag (iblk) call accum_hist_field(n_lfloe, iblk, lfloe(:,:,iblk), a2D) call accum_hist_field(n_dfloe, iblk, dfloe(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_rdg, & - iblk, Cdn_atm_rdg(:,:,iblk), a2D) + iblk, Cdn_atm_rdg(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_floe, & iblk, Cdn_atm_floe(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_pond, & iblk, Cdn_atm_pond(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_skin, & - iblk, Cdn_atm_skin(:,:,iblk), a2D) + iblk, Cdn_atm_skin(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_ratio, & iblk, Cdn_atm_ratio(:,:,iblk), a2D) call accum_hist_field(n_Cdn_ocn_keel, & - iblk, Cdn_ocn_keel(:,:,iblk), a2D) + iblk, Cdn_ocn_keel(:,:,iblk), a2D) call accum_hist_field(n_Cdn_ocn_floe, & iblk, Cdn_ocn_floe(:,:,iblk), a2D) call accum_hist_field(n_Cdn_ocn_skin, & - iblk, Cdn_ocn_skin(:,:,iblk), a2D) + iblk, Cdn_ocn_skin(:,:,iblk), a2D) end if endif ! if(allocated(a2D)) diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedynB/analysis/ice_history_fsd.F90 index c64ecbefa..50fee99e7 100644 --- a/cicecore/cicedynB/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedynB/analysis/ice_history_fsd.F90 @@ -21,7 +21,7 @@ module ice_history_fsd private public :: accum_hist_fsd, init_hist_fsd_2D, init_hist_fsd_3Df, & init_hist_fsd_4Df - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -79,7 +79,6 @@ subroutine init_hist_fsd_2D integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag - real (kind=dbl_kind) :: secday logical (kind=log_kind) :: tr_fsd, wave_spec character(len=*), parameter :: subname = '(init_hist_fsd_2D)' @@ -273,12 +272,12 @@ subroutine init_hist_fsd_4Df if (histfreq(ns) /= 'x') then if (f_afsdn(1:1) /= 'x') & - call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & + call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & "areal floe size and thickness distribution", & "per unit bin width", c1, c0, ns, f_afsdn) endif ! if (histfreq(ns) /= 'x') then - enddo ! ns + enddo ! ns endif ! tr_fsd @@ -398,7 +397,7 @@ subroutine accum_hist_fsd (iblk) if (f_fsdrad(1:1) /= 'x') then do j = 1, ny_block do i = 1, nx_block - worka(i,j) = c0 + worka(i,j) = c0 if (aice_init(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist @@ -450,7 +449,7 @@ subroutine accum_hist_fsd (iblk) end do call accum_hist_field(n_afsd-n3Dacum, iblk, nfsd_hist, worke, a3Df) endif - + if (f_dafsd_newi(1:1)/= 'x') & call accum_hist_field(n_dafsd_newi-n3Dacum, iblk, nfsd_hist, & d_afsd_newi(:,:,1:nfsd_hist,iblk), a3Df) @@ -473,7 +472,7 @@ subroutine accum_hist_fsd (iblk) if (f_afsdn(1:1) /= 'x') then do n = 1, ncat_hist - do k = 1, nfsd_hist + do k = 1, nfsd_hist do j = 1, ny_block do i = 1, nx_block workd(i,j,k,n) = trcrn(i,j,nt_fsd+k-1,n,iblk) & diff --git a/cicecore/cicedynB/analysis/ice_history_mechred.F90 b/cicecore/cicedynB/analysis/ice_history_mechred.F90 index 920a83b47..98c58bc39 100644 --- a/cicecore/cicedynB/analysis/ice_history_mechred.F90 +++ b/cicecore/cicedynB/analysis/ice_history_mechred.F90 @@ -20,7 +20,7 @@ module ice_history_mechred implicit none private public :: accum_hist_mechred, init_hist_mechred_2D, init_hist_mechred_3Dc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -188,13 +188,13 @@ subroutine init_hist_mechred_2D "ice area ridging rate", & "none", secday*c100, c0, & ns, f_dardg1dt) - + if (f_dardg2dt(1:1) /= 'x') & call define_hist_field(n_dardg2dt,"dardg2dt","%/day",tstr2D, tcstr, & "ridge area formation rate", & "none", secday*c100, c0, & ns, f_dardg2dt) - + if (f_dvirdgdt(1:1) /= 'x') & call define_hist_field(n_dvirdgdt,"dvirdgdt","cm/day",tstr2D, tcstr, & "ice volume ridging rate", & diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedynB/analysis/ice_history_pond.F90 index ef9a5237e..f6e4b8737 100644 --- a/cicecore/cicedynB/analysis/ice_history_pond.F90 +++ b/cicecore/cicedynB/analysis/ice_history_pond.F90 @@ -20,7 +20,7 @@ module ice_history_pond implicit none private public :: accum_hist_pond, init_hist_pond_2D, init_hist_pond_3Dc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -40,9 +40,9 @@ module ice_history_pond namelist / icefields_pond_nml / & f_apondn, f_apeffn , & f_hpondn, & - f_apond, f_apond_ai , & - f_hpond, f_hpond_ai , & - f_ipond, f_ipond_ai , & + f_apond, f_apond_ai , & + f_hpond, f_hpond_ai , & + f_ipond, f_ipond_ai , & f_apeff, f_apeff_ai !--------------------------------------------------------------- @@ -50,7 +50,7 @@ module ice_history_pond !--------------------------------------------------------------- integer (kind=int_kind), dimension(max_nstrm) :: & - n_apondn , n_apeffn , & + n_apondn , n_apeffn , & n_hpondn , & n_apond , n_apond_ai, & n_hpond , n_hpond_ai, & @@ -147,7 +147,7 @@ subroutine init_hist_pond_2D ns, f_apond) if (f_apond_ai(1:1) /= 'x') & - call define_hist_field(n_apond_ai,"apond_ai","1",tstr2D, tcstr, & + call define_hist_field(n_apond_ai,"apond_ai","1",tstr2D, tcstr, & "melt pond fraction of grid cell", & "weighted by ice area", c1, c0, & ns, f_apond_ai) @@ -159,7 +159,7 @@ subroutine init_hist_pond_2D ns, f_hpond) if (f_hpond_ai(1:1) /= 'x') & - call define_hist_field(n_hpond_ai,"hpond_ai","m",tstr2D, tcstr, & + call define_hist_field(n_hpond_ai,"hpond_ai","m",tstr2D, tcstr, & "mean melt pond depth over grid cell", & "weighted by ice area", c1, c0, & ns, f_hpond) @@ -171,7 +171,7 @@ subroutine init_hist_pond_2D ns, f_ipond) if (f_ipond_ai(1:1) /= 'x') & - call define_hist_field(n_ipond_ai,"ipond_ai","m",tstr2D, tcstr, & + call define_hist_field(n_ipond_ai,"ipond_ai","m",tstr2D, tcstr, & "mean pond ice thickness over grid cell", & "weighted by ice area", c1, c0, & ns, f_ipond_ai) @@ -192,7 +192,7 @@ subroutine init_hist_pond_2D enddo ! nstreams endif ! tr_pond - + end subroutine init_hist_pond_2D !======================================================================= @@ -212,14 +212,14 @@ subroutine init_hist_pond_3Dc file=__FILE__, line=__LINE__) if (tr_pond) then - + ! 3D (category) variables must be looped separately do ns = 1, nstreams if (histfreq(ns) /= 'x') then if (f_apondn(1:1) /= 'x') & call define_hist_field(n_apondn,"apondn","1",tstr3Dc, tcstr, & - "melt pond fraction, category","none", c1, c0, & + "melt pond fraction, category","none", c1, c0, & ns, f_apondn) if (f_hpondn(1:1) /= 'x') & @@ -376,7 +376,7 @@ subroutine accum_hist_pond (iblk) * trcr(:,:,nt_ipnd,iblk), a2D) endif ! ponds - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index 66c4401c7..ee48a9996 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -2,17 +2,17 @@ ! ! Output files: netCDF or binary data, Fortran unformatted dumps ! -! The following variables are currently hard-wired as snapshots +! The following variables are currently hard-wired as snapshots ! (instantaneous rather than time-averages): ! divu, shear, sig1, sig2, sigP, trsig, mlt_onset, frz_onset, hisnap, aisnap ! ! Options for histfreq: '1','h','d','m','y','x', where x means that -! output stream will not be used (recommended for efficiency). -! histfreq_n can be any nonnegative integer, where 0 means that the +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the ! corresponding histfreq frequency will not be used. ! The flags (f_) can be set to '1','h','d','m','y' or 'x', where ! n means the field will not be written. To output the same field at -! more than one frequency, for instance monthy and daily, set +! more than one frequency, for instance monthy and daily, set ! f_ = 'md'. ! ! authors Tony Craig and Bruce Briegleb, NCAR @@ -34,7 +34,7 @@ module ice_history_shared private public :: define_hist_field, accum_hist_field, icefields_nml, construct_filename - + integer (kind=int_kind), public :: history_precision logical (kind=log_kind), public :: & @@ -142,7 +142,7 @@ module ice_history_shared a4Di(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, ice a4Ds(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, snow a4Df(:,:,:,:,:,:) ! field accumulations/averages, 4D floe size, thickness categories - + real (kind=dbl_kind), allocatable, public :: & Tinz4d (:,:,:,:) , & ! array for Tin Tsnz4d (:,:,:,:) , & ! array for Tsn @@ -199,7 +199,7 @@ module ice_history_shared nstr4Df = 'NLON NLAT NFSD NCAT', & ! vcoord for N cell, 4D, fsd estr4Df = 'ELON ELAT NFSD NCAT' ! vcoord for E cell, 4D, fsd !ferret -! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time +! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time ! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. ! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead ! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) @@ -367,10 +367,10 @@ module ice_history_shared f_keffn_top = 'x', & f_Tinz = 'x', f_Sinz = 'x', & f_Tsnz = 'x', & - f_a11 = 'x', f_a12 = 'x', & - f_e11 = 'x', f_e12 = 'x', & + f_a11 = 'x', f_a12 = 'x', & + f_e11 = 'x', f_e12 = 'x', & f_e22 = 'x', & - f_s11 = 'x', f_s12 = 'x', & + f_s11 = 'x', f_s12 = 'x', & f_s22 = 'x', & f_yieldstress11 = 'x', & f_yieldstress12 = 'x', & @@ -411,7 +411,7 @@ module ice_history_shared f_atmspd, f_atmdir , & f_fswup, & f_fswdn, f_flwdn , & - f_snow, f_snow_ai , & + f_snow, f_snow_ai , & f_rain, f_rain_ai , & f_sst, f_sss , & f_uocn, f_vocn , & @@ -436,8 +436,8 @@ module ice_history_shared f_snoice, f_dsnow , & f_meltt, f_melts , & f_meltb, f_meltl , & - f_fresh, f_fresh_ai , & - f_fsalt, f_fsalt_ai , & + f_fresh, f_fresh_ai , & + f_fsalt, f_fsalt_ai , & f_fbot, & f_fhocn, f_fhocn_ai , & f_fswthru, f_fswthru_ai,& @@ -715,7 +715,7 @@ module ice_history_shared n_trsig , n_icepresent , & n_iage , n_FY , & n_fsurf_ai , & - n_fcondtop_ai, n_fmeltt_ai , & + n_fcondtop_ai, n_fmeltt_ai , & n_aicen , n_vicen , & n_fsurfn_ai , & n_fcondtopn_ai, & @@ -765,7 +765,7 @@ subroutine construct_filename(ncfile,suffix,ns) iyear = myear imonth = mmonth iday = mday - isec = msec - dt + isec = int(msec - dt,int_kind) ! construct filename if (write_ic) then @@ -863,7 +863,7 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & character (len=*), intent(in) :: & vhistfreq ! history frequency - + integer (kind=int_kind), intent(in) :: & ns ! history file stream index @@ -970,7 +970,7 @@ subroutine accum_hist_field_2D(id, iblk, field_accum, field) integer (int_kind), dimension(:), intent(in) :: & ! max_nstrm id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk real (kind=dbl_kind), intent(in) :: & @@ -1030,7 +1030,7 @@ subroutine accum_hist_field_3D(id, iblk, ndim, field_accum, field) integer (int_kind), dimension(:), intent(in) :: & ! max_nstrm id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk integer (kind=int_kind), intent(in) :: & @@ -1095,7 +1095,7 @@ subroutine accum_hist_field_4D(id, iblk, ndim3, ndim4, field_accum, field) integer (int_kind), dimension(:), intent(in) :: & ! max_nstrm id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk integer (kind=int_kind), intent(in) :: & diff --git a/cicecore/cicedynB/analysis/ice_history_snow.F90 b/cicecore/cicedynB/analysis/ice_history_snow.F90 index 090759759..0ec4144bf 100644 --- a/cicecore/cicedynB/analysis/ice_history_snow.F90 +++ b/cicecore/cicedynB/analysis/ice_history_snow.F90 @@ -18,7 +18,7 @@ module ice_history_snow implicit none private public :: accum_hist_snow, init_hist_snow_2D, init_hist_snow_3Dc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -193,7 +193,7 @@ subroutine init_hist_snow_2D (dt) endif ! histfreq(ns) /= 'x' enddo ! nstreams endif ! tr_snow - + end subroutine init_hist_snow_2D !======================================================================= @@ -206,7 +206,7 @@ subroutine init_hist_snow_3Dc integer (kind=int_kind) :: ns logical (kind=log_kind) :: tr_snow character(len=*), parameter :: subname = '(init_hist_pond_3Dc)' - + call icepack_query_tracer_flags(tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -263,7 +263,6 @@ subroutine accum_hist_snow (iblk) use ice_arrays_column, only: meltsliq use ice_blocks, only: block, nx_block, ny_block - use ice_domain, only: blocks_ice use ice_flux, only: fsloss use ice_history_shared, only: n2D, a2D, a3Dc, ncat_hist, & accum_hist_field, nzslyr @@ -275,7 +274,7 @@ subroutine accum_hist_snow (iblk) ! local variables integer (kind=int_kind) :: & - i, j, k, n + k, n integer (kind=int_kind) :: & nt_smice, nt_smliq, nt_rhos, nt_rsnw @@ -356,7 +355,7 @@ subroutine accum_hist_snow (iblk) if (f_fsloss(1:1)/= 'x') & call accum_hist_field(n_fsloss, iblk, & fsloss(:,:,iblk), a2D) - + endif ! allocated(a2D) ! 3D category fields @@ -422,7 +421,7 @@ subroutine accum_hist_snow (iblk) endif ! allocated(a3Dc) endif ! tr_snow - + end subroutine accum_hist_snow !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 5cf0b5dbc..f71d959da 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -1271,7 +1271,7 @@ subroutine stress_eap (nx_block, ny_block, & csigmne, csigmnw, csigmse, csigmsw , & csig12ne, csig12nw, csig12se, csig12sw , & str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp + strp_tmp, strm_tmp real (kind=dbl_kind) :: & alpharne, alpharnw, alpharsw, alpharse, & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index ecd283642..c2060285a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -98,12 +98,12 @@ subroutine evp (dt) stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U - use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & + use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & iceumask, iceemask, icenmask, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earear, narear, grid_average_X2Y, tarea, uarea, & + tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & @@ -871,7 +871,7 @@ subroutine evp (dt) shearU (:,:,iblk), & shear (:,:,iblk), divu (:,:,iblk), & rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - + endif enddo !$OMP END PARALLEL DO @@ -1408,7 +1408,7 @@ subroutine stress (nx_block, ny_block, & csigmne, csigmnw, csigmse, csigmsw , & csig12ne, csig12nw, csig12se, csig12sw , & str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp, tmp + strp_tmp, strm_tmp character(len=*), parameter :: subname = '(stress)' diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index 2f5389d06..fe04a3d63 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -779,8 +779,7 @@ subroutine stepu_last(NA_len, rhow, lb, ub, Cw, aiu, uocn, vocn, & use ice_kinds_mod use ice_constants, only : c0, c1 - use ice_dyn_shared, only : brlx, revp, u0, cosw, sinw, & - seabed_stress + use ice_dyn_shared, only : brlx, revp, u0, cosw, sinw implicit none diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 237861c60..95d2eedb1 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1896,7 +1896,7 @@ subroutine deformationsC_T (nx_block, ny_block, & !----------------------------------------------------------------- ! deformations for mechanical redistribution !----------------------------------------------------------------- - + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + shearU(i ,j-1)**2 * uarea(i ,j-1) & + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & @@ -2326,7 +2326,7 @@ end subroutine visc_replpress subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld1) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2342,12 +2342,6 @@ subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - - real (kind=dbl_kind), dimension (nx_block,ny_block,1,max_blocks) :: & - fldbundle ! work array for boundary updates - character(len=*), parameter :: subname = '(dyn_haloUpdate1)' call ice_timer_start(timer_bound) @@ -2370,7 +2364,7 @@ end subroutine dyn_haloUpdate1 subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2387,9 +2381,6 @@ subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks) :: & fldbundle ! work array for boundary updates @@ -2434,7 +2425,7 @@ end subroutine dyn_haloUpdate2 subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2452,9 +2443,6 @@ subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,3,max_blocks) :: & fldbundle ! work array for boundary updates @@ -2503,7 +2491,7 @@ end subroutine dyn_haloUpdate3 subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2522,9 +2510,6 @@ subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,4,max_blocks) :: & fldbundle ! work array for boundary updates @@ -2577,7 +2562,7 @@ end subroutine dyn_haloUpdate4 subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4, fld5) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2597,9 +2582,6 @@ subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,5,max_blocks) :: & fldbundle ! work array for boundary updates diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 7e0bdb745..17fd0b73f 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -69,7 +69,8 @@ module ice_dyn_vp dim_pgmres , & ! size of pgmres Krylov subspace maxits_fgmres , & ! max nb of iteration for fgmres maxits_pgmres , & ! max nb of iteration for pgmres - fpfunc_andacc , & ! fixed point function for Anderson acceleration: 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) + fpfunc_andacc , & ! fixed point function for Anderson acceleration: + ! 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) dim_andacc , & ! size of Anderson minimization matrix (number of saved previous residuals) start_andacc ! acceleration delay factor (acceleration starts at this iteration) @@ -87,7 +88,8 @@ module ice_dyn_vp reltol_andacc ! relative tolerance for Anderson acceleration character (len=char_len), public :: & - precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) + precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), + ! 'pgmres' (Jacobi-preconditioned GMRES) algo_nonlin , & ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) ortho_type ! type of orthogonalization for FGMRES ('cgs' or 'mgs') @@ -1095,7 +1097,8 @@ subroutine anderson_solver (icellt , icellu , & endif #else ! Anderson solver is not usable without LAPACK; abort - call abort_ice(error_message=subname // " CICE was not compiled with LAPACK, and Anderson solver was chosen (algo_nonlin = 'anderson')" , & + call abort_ice(error_message=subname // " CICE was not compiled with LAPACK, "// & + "and Anderson solver was chosen (algo_nonlin = 'anderson')" , & file=__FILE__, line=__LINE__) #endif endif diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 390631eaa..43fe5af13 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -19,6 +19,7 @@ module ice_transport_driver field_type_scalar, field_type_vector, & field_loc_NEcorner, & field_loc_Nface, field_loc_Eface + use ice_diagnostics, only: diagnostic_abort use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -690,7 +691,7 @@ subroutine transport_remap (dt) if (ckflag) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & istep1, my_task, iblk, n - call abort_ice(subname//'ERROR: monotonicity error') + call diagnostic_abort(istop,jstop,iblk,' monotonicity error') endif enddo ! n diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 330816529..6fd037b7b 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -30,11 +30,13 @@ module ice_transport_remap use ice_kinds_mod use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: istep1 use ice_communicate, only: my_task use ice_constants, only: c0, c1, c2, c12, p333, p4, p5, p6, & eps13, eps16, & field_loc_center, field_type_scalar, & field_loc_NEcorner, field_type_vector + use ice_diagnostics, only: diagnostic_abort use ice_domain_size, only: max_blocks, ncat use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice @@ -329,7 +331,6 @@ subroutine horizontal_remap (dt, ntrace, & tarear, hm, & xav, yav, xxav, yyav ! xyav, xxxav, xxyav, xyyav, yyyav - use ice_calendar, only: istep1 use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), intent(in) :: & @@ -556,14 +557,7 @@ subroutine horizontal_remap (dt, ntrace, & istop, jstop) if (l_stop) then - write(nu_diag,*) 'istep1, my_task, iblk =', & - istep1, my_task, iblk - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - call abort_ice(subname//'ERROR: bad departure points') + call diagnostic_abort(istop,jstop,iblk,'bad departure points') endif enddo ! iblk @@ -832,15 +826,7 @@ subroutine horizontal_remap (dt, ntrace, & mm (:,:,0,iblk)) if (l_stop) then - this_block = get_block(blocks_ice(iblk),iblk) - write (nu_diag,*) 'istep1, my_task, iblk, cat =', & - istep1, my_task, iblk, '0' - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - call abort_ice (subname//'ERROR: negative area (open water)') + call diagnostic_abort(istop,jstop,iblk,'negative area (open water)') endif ! ice categories @@ -860,12 +846,7 @@ subroutine horizontal_remap (dt, ntrace, & if (l_stop) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & istep1, my_task, iblk, n - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - call abort_ice (subname//'ERROR: negative area (ice)') + call diagnostic_abort(istop,jstop,iblk,'negative area (ice)') endif enddo ! n diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 845491d2a..a7e5aa584 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -311,7 +311,7 @@ module ice_flux mlt_onset, &! day of year that sfc melting begins frz_onset, &! day of year that freezing begins (congel or frazil) frazil_diag ! frazil ice growth diagnostic (m/step-->cm/day) - + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & fsurfn, & ! category fsurf @@ -330,7 +330,7 @@ module ice_flux ! ice diagnostics and history files as these are more accurate. ! (The others suffer from problem of incorrect values at grid boxes ! that change from an ice free state to an icy state.) - + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & fresh_ai, & ! fresh water flux to ocean (kg/m^2/s) fsalt_ai, & ! salt flux to ocean (kg/m^2/s) @@ -344,7 +344,7 @@ module ice_flux real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & fswthrun_ai ! per-category fswthru * ai (W/m^2) - + logical (kind=log_kind), public :: send_i2x_per_cat = .false. !----------------------------------------------------------------- @@ -360,7 +360,7 @@ module ice_flux coszen , & ! cosine solar zenith angle, < 0 for sun below horizon rdg_conv, & ! convergence term for ridging (1/s) rdg_shear ! shear term for ridging (1/s) - + real (kind=dbl_kind), dimension(:,:,:,:), allocatable, public :: & salinz ,& ! initial salinity profile (ppt) Tmltz ! initial melting temperature (^oC) @@ -448,7 +448,8 @@ subroutine alloc_flux Tf (nx_block,ny_block,max_blocks), & ! freezing temperature (C) qdp (nx_block,ny_block,max_blocks), & ! deep ocean heat flux (W/m^2), negative upward hmix (nx_block,ny_block,max_blocks), & ! mixed layer depth (m) - daice_da (nx_block,ny_block,max_blocks), & ! data assimilation concentration increment rate (concentration s-1)(only used in hadgem drivers) + daice_da (nx_block,ny_block,max_blocks), & ! data assimilation concentration increment rate (concentration s-1) + ! (only used in hadgem drivers) fsens (nx_block,ny_block,max_blocks), & ! sensible heat flux (W/m^2) flat (nx_block,ny_block,max_blocks), & ! latent heat flux (W/m^2) fswabs (nx_block,ny_block,max_blocks), & ! shortwave flux absorbed in ice and ocean (W/m^2) @@ -791,7 +792,7 @@ subroutine init_coupler_flux fdon (:,:,:,:)= c0 ffep (:,:,:,:)= c0 ffed (:,:,:,:)= c0 - + allocate(fswthrun_ai(nx_block,ny_block,ncat,max_blocks)) fswthrun_ai(:,:,:,:) = c0 @@ -1278,7 +1279,7 @@ subroutine scale_fluxes (nx_block, ny_block, & ! Scale fluxes for history output if (present(fsurf) .and. present(fcondtop) ) then - + do j = 1, ny_block do i = 1, nx_block if (tmask(i,j) .and. aice(i,j) > c0) then @@ -1291,9 +1292,9 @@ subroutine scale_fluxes (nx_block, ny_block, & endif ! tmask and aice > 0 enddo ! i enddo ! j - + endif ! present(fsurf & fcondtop) - + end subroutine scale_fluxes !======================================================================= diff --git a/cicecore/cicedynB/general/ice_flux_bgc.F90 b/cicecore/cicedynB/general/ice_flux_bgc.F90 index 56e644431..0d9184fb7 100644 --- a/cicecore/cicedynB/general/ice_flux_bgc.F90 +++ b/cicecore/cicedynB/general/ice_flux_bgc.F90 @@ -26,13 +26,13 @@ module ice_flux_bgc real (kind=dbl_kind), & ! coupling variable for both tr_aero and tr_zaero dimension (:,:,:,:), allocatable, public :: & fiso_atm, & ! isotope deposition rate (kg/m^2 s) - faero_atm ! aerosol deposition rate (kg/m^2 s) + faero_atm ! aerosol deposition rate (kg/m^2 s) real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & flux_bio_atm ! all bio fluxes to ice from atmosphere - ! out to ocean + ! out to ocean real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & @@ -45,8 +45,8 @@ module ice_flux_bgc flux_bio_ai ! all bio fluxes to ocean, averaged over grid cell real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - fzsal_ai, & ! salt flux to ocean from zsalinity (kg/m^2/s) - fzsal_g_ai ! gravity drainage salt flux to ocean (kg/m^2/s) + fzsal_ai, & ! salt flux to ocean from zsalinity (kg/m^2/s) + fzsal_g_ai ! gravity drainage salt flux to ocean (kg/m^2/s) ! internal @@ -58,7 +58,7 @@ module ice_flux_bgc dsnown ! change in snow thickness in category n (m) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - nit , & ! ocean nitrate (mmol/m^3) + nit , & ! ocean nitrate (mmol/m^3) amm , & ! ammonia/um (mmol/m^3) sil , & ! silicate (mmol/m^3) dmsp , & ! dmsp (mmol/m^3) @@ -85,15 +85,15 @@ module ice_flux_bgc fdon ! ice-ocean don flux (mmol/m^2/s) (proteins and amino acids) real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - dic , & ! ocean dic (mmol/m^3) - fdic ! ice-ocean dic flux (mmol/m^2/s) + dic , & ! ocean dic (mmol/m^3) + fdic ! ice-ocean dic flux (mmol/m^2/s) real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - fed, fep , & ! ocean dissolved and particulate fe (nM) - ffed, ffep ! ice-ocean dissolved and particulate fe flux (umol/m^2/s) + fed, fep , & ! ocean dissolved and particulate fe (nM) + ffed, ffep ! ice-ocean dissolved and particulate fe flux (umol/m^2/s) real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - zaeros ! ocean aerosols (mmol/m^3) + zaeros ! ocean aerosols (mmol/m^3) ! isotopes real (kind=dbl_kind), & ! coupling variable for tr_iso @@ -114,16 +114,16 @@ module ice_flux_bgc !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_flux_bgc integer (int_kind) :: ierr allocate( & - fzsal_ai (nx_block,ny_block,max_blocks), & ! salt flux to ocean from zsalinity (kg/m^2/s) - fzsal_g_ai (nx_block,ny_block,max_blocks), & ! gravity drainage salt flux to ocean (kg/m^2/s) - nit (nx_block,ny_block,max_blocks), & ! ocean nitrate (mmol/m^3) + fzsal_ai (nx_block,ny_block,max_blocks), & ! salt flux to ocean from zsalinity (kg/m^2/s) + fzsal_g_ai (nx_block,ny_block,max_blocks), & ! gravity drainage salt flux to ocean (kg/m^2/s) + nit (nx_block,ny_block,max_blocks), & ! ocean nitrate (mmol/m^3) amm (nx_block,ny_block,max_blocks), & ! ammonia/um (mmol/m^3) sil (nx_block,ny_block,max_blocks), & ! silicate (mmol/m^3) dmsp (nx_block,ny_block,max_blocks), & ! dmsp (mmol/m^3) @@ -138,32 +138,32 @@ subroutine alloc_flux_bgc fdust (nx_block,ny_block,max_blocks), & ! ice-ocean dust flux (kg/m^2/s), positive to ocean hin_old (nx_block,ny_block,ncat,max_blocks), & ! old ice thickness dsnown (nx_block,ny_block,ncat,max_blocks), & ! change in snow thickness in category n (m) - HDO_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of HDO (kg/kg) - H2_16O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_16O (kg/kg) - H2_18O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_18O (kg/kg) - Qa_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope specific humidity (kg/kg) - Qref_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! 2m atm reference isotope spec humidity (kg/kg) - fiso_atm (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope deposition rate (kg/m^2 s) - fiso_evap (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope evaporation rate (kg/m^2 s) - fiso_ocn (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope flux to ocean (kg/m^2/s) - faero_atm (nx_block,ny_block,icepack_max_aero,max_blocks), & ! aerosol deposition rate (kg/m^2 s) + HDO_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of HDO (kg/kg) + H2_16O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_16O (kg/kg) + H2_18O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_18O (kg/kg) + Qa_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope specific humidity (kg/kg) + Qref_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! 2m atm reference isotope spec humidity (kg/kg) + fiso_atm (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope deposition rate (kg/m^2 s) + fiso_evap (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope evaporation rate (kg/m^2 s) + fiso_ocn (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope flux to ocean (kg/m^2/s) + faero_atm (nx_block,ny_block,icepack_max_aero,max_blocks), & ! aerosol deposition rate (kg/m^2 s) faero_ocn (nx_block,ny_block,icepack_max_aero,max_blocks), & ! aerosol flux to ocean (kg/m^2/s) - zaeros (nx_block,ny_block,icepack_max_aero,max_blocks), & ! ocean aerosols (mmol/m^3) + zaeros (nx_block,ny_block,icepack_max_aero,max_blocks), & ! ocean aerosols (mmol/m^3) flux_bio_atm(nx_block,ny_block,icepack_max_nbtrcr,max_blocks), & ! all bio fluxes to ice from atmosphere flux_bio (nx_block,ny_block,icepack_max_nbtrcr,max_blocks), & ! all bio fluxes to ocean flux_bio_ai (nx_block,ny_block,icepack_max_nbtrcr,max_blocks), & ! all bio fluxes to ocean, averaged over grid cell algalN (nx_block,ny_block,icepack_max_algae,max_blocks), & ! ocean algal nitrogen (mmol/m^3) (diatoms, pico, phaeo) - falgalN (nx_block,ny_block,icepack_max_algae,max_blocks), & ! ice-ocean algal nitrogen flux (mmol/m^2/s) (diatoms, pico, phaeo) + falgalN (nx_block,ny_block,icepack_max_algae,max_blocks), & ! ice-ocn algalN flux (mmol/m^2/s) (diatoms, pico, phaeo) doc (nx_block,ny_block,icepack_max_doc,max_blocks), & ! ocean doc (mmol/m^3) (saccharids, lipids, tbd ) fdoc (nx_block,ny_block,icepack_max_doc,max_blocks), & ! ice-ocean doc flux (mmol/m^2/s) (saccharids, lipids, tbd) don (nx_block,ny_block,icepack_max_don,max_blocks), & ! ocean don (mmol/m^3) (proteins and amino acids) fdon (nx_block,ny_block,icepack_max_don,max_blocks), & ! ice-ocean don flux (mmol/m^2/s) (proteins and amino acids) - dic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ocean dic (mmol/m^3) - fdic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ice-ocean dic flux (mmol/m^2/s) - fed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean dissolved fe (nM) - fep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean particulate fe (nM) - ffed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean dissolved fe flux (umol/m^2/s) - ffep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean particulate fe flux (umol/m^2/s) + dic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ocean dic (mmol/m^3) + fdic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ice-ocean dic flux (mmol/m^2/s) + fed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean dissolved fe (nM) + fep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean particulate fe (nM) + ffed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean dissolved fe flux (umol/m^2/s) + ffep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean particulate fe flux (umol/m^2/s) stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux_bgc): Out of memory') @@ -214,10 +214,10 @@ subroutine bgcflux_ice_to_ocn(nx_block, & ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i,j , & ! horizontal indices k ! tracer index - + logical (kind=log_kind) :: & skl_bgc, solve_zbgc, & tr_bgc_Nit, tr_bgc_N, & @@ -226,14 +226,14 @@ subroutine bgcflux_ice_to_ocn(nx_block, & integer (kind=int_kind) :: & nlt_bgc_Nit, nlt_bgc_Am, & - nlt_bgc_Sil, nlt_bgc_DMSPd, nlt_bgc_DMS, nlt_bgc_hum + nlt_bgc_Sil, nlt_bgc_DMSPd, nlt_bgc_DMS, nlt_bgc_hum integer (kind=int_kind), dimension(icepack_max_algae) :: & nlt_bgc_N, nlt_bgc_C ! algae integer (kind=int_kind), dimension(icepack_max_doc) :: & nlt_bgc_DOC ! disolved organic carbon integer (kind=int_kind), dimension(icepack_max_don) :: & - nlt_bgc_DON ! + nlt_bgc_DON ! integer (kind=int_kind), dimension(icepack_max_dic) :: & nlt_bgc_DIC ! disolved inorganic carbon integer (kind=int_kind), dimension(icepack_max_fe) :: & diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 36dbfe88c..edff03b9f 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -76,7 +76,7 @@ module ice_forcing sst_file, & sss_file, & sublim_file, & - snow_file + snow_file character (char_len_long), dimension(:), allocatable, public :: & ! input data file names topmelt_file, & @@ -106,7 +106,7 @@ module ice_forcing rhoa_data, & flw_data, & sst_data, & - sss_data, & + sss_data, & uocn_data, & vocn_data, & sublim_data, & @@ -116,7 +116,7 @@ module ice_forcing topmelt_data, & botmelt_data - character(char_len), public :: & + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf atm_data_type, & ! 'default', 'monthly', 'ncar', 'box2001' @@ -132,15 +132,15 @@ module ice_forcing logical (kind=log_kind), public :: & rotate_wind ! rotate wind/stress to computational grid from true north directed - - character(char_len_long), public :: & + + character(char_len_long), public :: & atm_data_dir , & ! top directory for atmospheric data ocn_data_dir , & ! top directory for ocean data wave_spec_dir, & ! dir name for wave spectrum wave_spec_file,& ! file name for wave spectrum oceanmixed_file ! file name for ocean forcing data - integer (kind=int_kind), parameter :: & + integer (kind=int_kind), parameter :: & nfld = 8 ! number of fields to search for in forcing file ! as in the dummy atm (latm) @@ -159,7 +159,7 @@ module ice_forcing integer (kind=int_kind), public :: & trestore ! restoring time scale (days) - real (kind=dbl_kind), public :: & + real (kind=dbl_kind), public :: & trest ! restoring time scale (sec) logical (kind=log_kind), public :: & @@ -196,7 +196,7 @@ module ice_forcing !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_forcing integer (int_kind) :: ierr @@ -288,7 +288,7 @@ subroutine init_forcing_atmo endif !------------------------------------------------------------------- - ! Get filenames for input forcing data + ! Get filenames for input forcing data !------------------------------------------------------------------- ! default forcing values from init_flux_atm @@ -310,7 +310,7 @@ subroutine init_forcing_atmo call monthly_files(fyear) elseif (trim(atm_data_type) == 'oned') then call oned_files - elseif (trim(atm_data_type) == 'ISPOL') then + elseif (trim(atm_data_type) == 'ISPOL') then call ISPOL_files elseif (trim(atm_data_type) == 'box2001') then call box2001_data_atm @@ -331,7 +331,8 @@ subroutine init_forcing_atmo elseif (trim(atm_data_type) == 'default') then ! don't need to do anything more else - call abort_ice (error_message=subname//' ERROR atm_data_type unknown = '//trim(atm_data_type), file=__FILE__, line=__LINE__) + call abort_ice (error_message=subname//' ERROR atm_data_type unknown = '// & + trim(atm_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_atmo @@ -340,13 +341,13 @@ end subroutine init_forcing_atmo subroutine init_forcing_ocn(dt) -! Set sea surface salinity and freezing temperature to annual mean value +! Set sea surface salinity and freezing temperature to annual mean value ! using a 12-month climatology. ! Read sst data for current month, and adjust sst based on freezing ! temperature. No interpolation in time. -! Note: SST is subsequently prognosed if CICE is run -! with a mixed layer ocean (oceanmixed_ice = T), and can be +! Note: SST is subsequently prognosed if CICE is run +! with a mixed layer ocean (oceanmixed_ice = T), and can be ! restored to data (restore_ocn = T). use ice_blocks, only: nx_block, ny_block @@ -362,14 +363,14 @@ subroutine init_forcing_ocn(dt) integer (kind=int_kind) :: & i, j, iblk , & ! horizontal indices k , & ! month index - fid , & ! file id for netCDF file + fid , & ! file id for netCDF file nbits logical (kind=log_kind) :: diag real (kind=dbl_kind) :: secday - character (char_len) :: & + character (char_len) :: & fieldname ! field name in netcdf file real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & @@ -488,7 +489,7 @@ subroutine init_forcing_ocn(dt) elseif (trim(ocn_data_type) == 'hadgem_sst' .or. & trim(ocn_data_type) == 'hadgem_sst_uvocn') then - diag = .true. ! write diagnostic information + diag = .true. ! write diagnostic information sst_file = trim (ocn_data_dir)//'/MONTHLY/sst.1997.nc' @@ -500,11 +501,11 @@ subroutine init_forcing_ocn(dt) call ice_open_nc(sst_file,fid) endif - + fieldname='sst' call ice_read_nc(fid,mmonth,fieldname,sst,diag) - if (my_task == master_task) call ice_close_nc(fid) + if (my_task == master_task) call ice_close_nc(fid) ! Make sure sst is not less than freezing temperature Tf !$OMP PARALLEL DO PRIVATE(iblk,i,j) @@ -539,7 +540,8 @@ subroutine init_forcing_ocn(dt) elseif (trim(ocn_data_type) == 'default') then ! don't need to do anything more else - call abort_ice (error_message=subname//' ERROR ocn_data_type unknown = '//trim(ocn_data_type), file=__FILE__, line=__LINE__) + call abort_ice (error_message=subname//' ERROR ocn_data_type unknown = '// & + trim(ocn_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_ocn @@ -694,7 +696,7 @@ subroutine get_forcing_atmo ilo, ihi, jlo, jhi, & hm (:,:,iblk), & Tair (:,:,iblk), & - fsw (:,:,iblk), & + fsw (:,:,iblk), & cldf (:,:,iblk), & flw (:,:,iblk), & frain (:,:,iblk), & @@ -761,10 +763,10 @@ subroutine get_forcing_ocn (dt) call ocn_data_clim(dt) elseif (trim(ocn_data_type) == 'ncar' .or. & trim(ocn_data_type) == 'ISPOL') then - call ocn_data_ncar(dt) + call ocn_data_ncar(dt) elseif (trim(ocn_data_type) == 'hadgem_sst' .or. & trim(ocn_data_type) == 'hadgem_sst_uvocn') then - call ocn_data_hadgem(dt) + call ocn_data_hadgem(dt) elseif (trim(ocn_data_type) == 'oned') then call ocn_data_oned elseif (trim(ocn_data_type) == 'hycom') then @@ -1039,7 +1041,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & arg = 1 nrec = recd + n2 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) @@ -1053,7 +1055,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + ixx - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) @@ -1079,7 +1081,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + n4 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) endif ! ixp /= -99 @@ -1311,21 +1313,21 @@ subroutine read_clim_data_nc (readflag, recd, ixm, ixx, ixp, & if (ixm /= -99) then arg = 1 nrec = recd + ixm - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & debug_forcing, field_loc, field_type) endif arg = arg + 1 nrec = recd + ixx - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & debug_forcing, field_loc, field_type) if (ixp /= -99) then arg = arg + 1 nrec = recd + ixp - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & debug_forcing, field_loc, field_type) endif @@ -1449,7 +1451,7 @@ subroutine interp_coeff (recnum, recslot, secint, dataloc) else ! recslot = 1 if (dataloc==1) then ! data located at middle of interval t1 = (rcnum-p5)*secint - else + else t1 = rcnum*secint ! data located at end of interval endif t2 = t1 + secint ! + 1 interval @@ -1574,7 +1576,7 @@ end subroutine file_year subroutine prepare_forcing (nx_block, ny_block, & ilo, ihi, jlo, jhi, & hm, & - Tair, fsw, & + Tair, fsw, & cldf, flw, & frain, fsnow, & Qa, rhoa, & @@ -1597,7 +1599,7 @@ subroutine prepare_forcing (nx_block, ny_block, & sst , & ! sea surface temperature aice , & ! ice area fraction hm ! land mask - + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(inout) :: & fsw , & ! incoming shortwave radiation (W/m^2) cldf , & ! cloud fraction @@ -1654,7 +1656,7 @@ subroutine prepare_forcing (nx_block, ny_block, & rhoa (i,j) = max(rhoa(i,j),c0) Qa (i,j) = max(Qa(i,j),c0) -! if (rhoa(i,j) .lt. puny) rhoa(i,j) = 1.3_dbl_kind +! if (rhoa(i,j) .lt. puny) rhoa(i,j) = 1.3_dbl_kind ! if (Tair(i,j) .lt. puny) Tair(i,j) = Tffresh ! if (Qa(i,j) .lt. puny) Qa(i,j) = 0.0035_dbl_kind enddo ! i @@ -1699,12 +1701,12 @@ subroutine prepare_forcing (nx_block, ny_block, & enddo enddo #endif - elseif (trim(atm_data_type) == 'oned') then ! rectangular grid + elseif (trim(atm_data_type) == 'oned') then ! rectangular grid ! precip is in kg/m^2/s zlvl0 = c10 - + do j = jlo, jhi do i = ilo, ihi @@ -1736,7 +1738,7 @@ subroutine prepare_forcing (nx_block, ny_block, & elseif (trim(precip_units) == 'mm_per_day') then precip_factor = c1/secday elseif (trim(precip_units) == 'mm_per_sec' .or. & - trim(precip_units) == 'mks') then + trim(precip_units) == 'mks') then precip_factor = c1 ! mm/sec = kg/m^2 s elseif (trim(precip_units) == 'm_per_sec') then precip_factor = c1000 @@ -1753,20 +1755,20 @@ subroutine prepare_forcing (nx_block, ny_block, & swvdf(i,j) = fsw(i,j)*frcvdf ! visible diffuse swidr(i,j) = fsw(i,j)*frcidr ! near IR direct swidf(i,j) = fsw(i,j)*frcidf ! near IR diffuse - + ! convert precipitation units to kg/m^2 s fsnow(i,j) = fsnow(i,j) * precip_factor enddo ! i enddo ! j ! determine whether precip is rain or snow - ! HadGEM forcing provides separate snowfall and rainfall rather + ! HadGEM forcing provides separate snowfall and rainfall rather ! than total precipitation if (trim(atm_data_type) /= 'hadgem') then do j = jlo, jhi do i = ilo, ihi - frain(i,j) = c0 + frain(i,j) = c0 if (Tair(i,j) >= Tffresh) then frain(i,j) = fsnow(i,j) fsnow(i,j) = c0 @@ -1789,8 +1791,8 @@ subroutine prepare_forcing (nx_block, ny_block, & ! then interpolate to the U-cell centers (otherwise we ! interpolate across the pole). ! Use ANGLET which is on the T grid ! - ! Atmo variables are needed in T cell centers in subroutine - ! atmo_boundary_layer, and are interpolated to the U grid later as + ! Atmo variables are needed in T cell centers in subroutine + ! atmo_boundary_layer, and are interpolated to the U grid later as ! necessary. !----------------------------------------------------------------- workx = uatm(i,j) ! wind velocity, m/s @@ -1838,12 +1840,12 @@ subroutine longwave_parkinson_washington(Tair, cldf, flw) ! (for now) ! Parkinson, C. L. and W. M. Washington (1979), ! Large-scale numerical-model of sea ice, - ! JGR, 84, 311-337, doi:10.1029/JC084iC01p00311 + ! JGR, 84, 311-337, doi:10.1029/JC084iC01p00311 real(kind=dbl_kind), intent(in) :: & Tair , & ! air temperature (K) cldf ! cloud fraction - + real(kind=dbl_kind), intent(out) :: & flw ! incoming longwave radiation (W/m^2) @@ -1859,12 +1861,12 @@ subroutine longwave_parkinson_washington(Tair, cldf, flw) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - + flw = stefan_boltzmann*Tair**4 & * (c1 - 0.261_dbl_kind & * exp(-7.77e-4_dbl_kind*(Tffresh - Tair)**2)) & * (c1 + 0.275_dbl_kind*cldf) - + end subroutine longwave_parkinson_washington !======================================================================= @@ -1874,11 +1876,11 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & Qa, Tair, & hm, flw) - ! based on - ! Rosati, A. and K. Miyakoda (1988), - ! A general-circulation model for upper ocean simulation, - ! J. Physical Oceanography, 18, 1601-1626, - ! doi:10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2 + ! based on + ! Rosati, A. and K. Miyakoda (1988), + ! A general-circulation model for upper ocean simulation, + ! J. Physical Oceanography, 18, 1601-1626, + ! doi:10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2 real(kind=dbl_kind), intent(in) :: & cldf , & ! cloud fraction @@ -1897,7 +1899,7 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & sstk , & ! ice/ocean surface temperature (K) rtea , & ! square root of the vapour pressure ptem , & ! potential air temperature (K) - qlwm + qlwm real(kind=dbl_kind) :: & Tffresh, stefan_boltzmann, emissivity @@ -1924,7 +1926,7 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & + c4*(sstk-ptem) ) flw = emissivity*stefan_boltzmann * ( sstk**4 - qlwm ) flw = flw * hm ! land mask - + end subroutine longwave_rosati_miyakoda !======================================================================= @@ -2068,7 +2070,7 @@ subroutine ncar_data else call abort_ice (error_message=subname//'nonbinary atm_data_format unavailable', & file=__FILE__, line=__LINE__) -! The routine exists, for example: +! The routine exists, for example: ! call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & ! maxrec, fsw_file, 'fsw', fsw_data, & ! field_loc_center, field_type_scalar) @@ -2197,7 +2199,7 @@ subroutine LY_files (yr) if (my_task == master_task) then write (nu_diag,*) ' ' - write (nu_diag,*) 'Forcing data year = ', fyear + write (nu_diag,*) 'Forcing data year = ', fyear write (nu_diag,*) 'Atmospheric data files:' write (nu_diag,*) trim(flw_file) write (nu_diag,*) trim(rain_file) @@ -2287,7 +2289,7 @@ subroutine LY_data use ice_grid, only: hm, tlon, tlat, tmask, umask use ice_state, only: aice - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i, j , & ixm,ixx,ixp , & ! record numbers for neighboring months recnum , & ! record number @@ -2321,9 +2323,9 @@ subroutine LY_data file=__FILE__, line=__LINE__) !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -2348,7 +2350,7 @@ subroutine LY_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. @@ -2362,7 +2364,7 @@ subroutine LY_data !------------------------------------------------------------------- ! 6-hourly data - ! + ! ! Assume that the 6-hourly value is located at the end of the ! 6-hour period. This is the convention for NCEP reanalysis data. ! E.g. record 1 gives conditions at 6 am GMT on 1 January. @@ -2464,29 +2466,29 @@ subroutine LY_data if (debug_forcing) then if (my_task == master_task) write (nu_diag,*) 'LY_bulk_data' vmin = global_minval(fsw,distrb_info,tmask) - + vmax = global_maxval(fsw,distrb_info,tmask) if (my_task.eq.master_task) & - write (nu_diag,*) 'fsw',vmin,vmax + write (nu_diag,*) 'fsw',vmin,vmax vmin = global_minval(cldf,distrb_info,tmask) vmax = global_maxval(cldf,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'cldf',vmin,vmax vmin =global_minval(fsnow,distrb_info,tmask) vmax =global_maxval(fsnow,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'fsnow',vmin,vmax vmin = global_minval(Tair,distrb_info,tmask) vmax = global_maxval(Tair,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'Tair',vmin,vmax vmin = global_minval(uatm,distrb_info,umask) vmax = global_maxval(uatm,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'uatm',vmin,vmax vmin = global_minval(vatm,distrb_info,umask) vmax = global_maxval(vatm,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'vatm',vmin,vmax vmin = global_minval(Qa,distrb_info,tmask) vmax = global_maxval(Qa,distrb_info,tmask) @@ -2503,9 +2505,9 @@ subroutine JRA55_data use ice_blocks, only: block, get_block use ice_global_reductions, only: global_minval, global_maxval - use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_domain, only: nblocks, distrb_info use ice_flux, only: fsnow, Tair, uatm, vatm, Qa, fsw, flw - use ice_grid, only: hm, tlon, tlat, tmask, umask + use ice_grid, only: hm, tmask, umask use ice_state, only: aice use ice_calendar, only: days_per_year @@ -2782,7 +2784,7 @@ subroutine compute_shortwave(nx_block, ny_block, & secday , & pi , & lontmp , & - deg2rad + deg2rad integer (kind=int_kind) :: & i, j @@ -2823,7 +2825,7 @@ subroutine compute_shortwave(nx_block, ny_block, & sw0 = max(sw0,c0) ! total downward shortwave for cice - Fsw(i,j) = sw0*(c1-p6*cldf(i,j)**3) + Fsw(i,j) = sw0*(c1-p6*cldf(i,j)**3) Fsw(i,j) = Fsw(i,j)*hm(i,j) enddo enddo @@ -2865,7 +2867,7 @@ subroutine Qa_fixLY(nx_block, ny_block, Tair, Qa) /(c1 + 0.00412_dbl_kind*worka) & ! 2+ converts ea mb -> Pa + 0.00422_dbl_kind*worka ! for ice ! vapor pressure - worka = (c10**worka) ! saturated + worka = (c10**worka) ! saturated worka = max(worka,puny) ! puny over land to prevent division by zero ! specific humidity worka = 0.622_dbl_kind*worka/(1.e5_dbl_kind-0.378_dbl_kind*worka) @@ -2981,13 +2983,13 @@ subroutine hadgem_files (yr) endif ! calc_strair ! -------------------------------------------------------------- - ! Atmosphere properties. Even if these fields are not + ! Atmosphere properties. Even if these fields are not ! being used to force the ice (i.e. calc_Tsfc=.false.), they ! are still needed to generate forcing for mixed layer model or ! to calculate wind stress ! -------------------------------------------------------------- - if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then + if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then fsw_file = & trim(atm_data_dir)//'/MONTHLY/SW_incoming.1996.nc' @@ -3032,14 +3034,14 @@ subroutine hadgem_files (yr) trim(atm_data_dir)//'/MONTHLY/topmeltn',n,'.1996.nc' call file_year(topmelt_file(n),yr) - ! 'botmelt' = fcondtop. + ! 'botmelt' = fcondtop. write(botmelt_file(n), '(a,i1,a)') & trim(atm_data_dir)//'/MONTHLY/botmeltn',n,'.1996.nc' call file_year(botmelt_file(n),yr) enddo - ! 'sublim' = - flat / Lsub. + ! 'sublim' = - flat / Lsub. sublim_file = & trim(atm_data_dir)//'/MONTHLY/sublim.1996.nc' call file_year(sublim_file,yr) @@ -3085,7 +3087,7 @@ subroutine hadgem_data botmelt, & sublim - character (char_len) :: & + character (char_len) :: & fieldname ! field name in netcdf file real (kind=dbl_kind) :: & @@ -3212,15 +3214,15 @@ subroutine hadgem_data endif ! calc_strair ! ----------------------------------------------------------- - ! SW incoming, LW incoming, air temperature, density and - ! humidity at 10m. + ! SW incoming, LW incoming, air temperature, density and + ! humidity at 10m. ! - ! Even if these fields are not being used to force the ice - ! (i.e. calc_Tsfc=.false.), they are still needed to generate + ! Even if these fields are not being used to force the ice + ! (i.e. calc_Tsfc=.false.), they are still needed to generate ! forcing for mixed layer model or to calculate wind stress ! ----------------------------------------------------------- - if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then + if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then fieldname='SW_incoming' call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & @@ -3287,7 +3289,7 @@ subroutine hadgem_data ! botmelt = fcondtop (as zero layer) ! ! Convert UM sublimation data into CICE LH flux - ! (sublim = - flatn / Lsub) and have same value for all + ! (sublim = - flatn / Lsub) and have same value for all ! categories !-------------------------------------------------------- @@ -3296,7 +3298,7 @@ subroutine hadgem_data do j = 1, ny_block do i = 1, nx_block fcondtopn_f(i,j,n,iblk) = botmelt(i,j,iblk) - fsurfn_f(i,j,n,iblk) = topmelt(i,j,iblk) & + fsurfn_f(i,j,n,iblk) = topmelt(i,j,iblk) & + botmelt(i,j,iblk) flatn_f(i,j,n,iblk) = - sublim(i,j,iblk)*Lsub enddo @@ -3306,12 +3308,12 @@ subroutine hadgem_data enddo ! ncat - endif ! .not. calc_Tsfc + endif ! .not. calc_Tsfc end subroutine hadgem_data !======================================================================= -! monthly forcing +! monthly forcing !======================================================================= subroutine monthly_files (yr) @@ -3359,7 +3361,7 @@ subroutine monthly_files (yr) if (my_task == master_task) then write (nu_diag,*) ' ' - write (nu_diag,*) 'Forcing data year = ', fyear + write (nu_diag,*) 'Forcing data year = ', fyear write (nu_diag,*) 'Atmospheric data files:' write (nu_diag,*) trim(flw_file) write (nu_diag,*) trim(rain_file) @@ -3382,7 +3384,7 @@ subroutine monthly_data use ice_flux, only: fsnow, Tair, Qa, wind, strax, stray, fsw use ice_grid, only: hm, tlon, tlat, tmask, umask - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i, j , & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number @@ -3398,15 +3400,15 @@ subroutine monthly_data type (block) :: & this_block ! block information for current block - + character(len=*), parameter :: subname = '(monthly_data)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -3431,7 +3433,7 @@ subroutine monthly_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. @@ -3505,30 +3507,30 @@ subroutine monthly_data vmin = global_minval(fsw,distrb_info,tmask) vmax = global_maxval(fsw,distrb_info,tmask) if (my_task.eq.master_task) & - write (nu_diag,*) 'fsw',vmin,vmax + write (nu_diag,*) 'fsw',vmin,vmax vmin = global_minval(cldf,distrb_info,tmask) vmax = global_maxval(cldf,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'cldf',vmin,vmax vmin =global_minval(fsnow,distrb_info,tmask) vmax =global_maxval(fsnow,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'fsnow',vmin,vmax vmin = global_minval(Tair,distrb_info,tmask) vmax = global_maxval(Tair,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'Tair',vmin,vmax vmin = global_minval(wind,distrb_info,umask) vmax = global_maxval(wind,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'wind',vmin,vmax vmin = global_minval(strax,distrb_info,umask) vmax = global_maxval(strax,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'strax',vmin,vmax vmin = global_minval(stray,distrb_info,umask) vmax = global_maxval(stray,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'stray',vmin,vmax vmin = global_minval(Qa,distrb_info,tmask) vmax = global_maxval(Qa,distrb_info,tmask) @@ -3549,7 +3551,7 @@ subroutine oned_data ! local parameters - character (char_len_long) :: & + character (char_len_long) :: & met_file, & ! netcdf filename fieldname ! field name in netcdf file @@ -3570,79 +3572,79 @@ subroutine oned_data Psat , & ! saturation vapour pressure (hPa) ws ! saturation mixing ratio - real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa - ps1 = 0.58002206e4_dbl_kind, & ! (K) + real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa + ps1 = 0.58002206e4_dbl_kind, & ! (K) ps2 = 1.3914993_dbl_kind, & ! - ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) + ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) ps4 = 0.41764768e-4_dbl_kind, & ! (K^-2) ps5 = 0.14452093e-7_dbl_kind, & ! (K^-3) ps6 = 6.5459673_dbl_kind, & ! - ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio - Pair = 1020._dbl_kind ! Sea level pressure (hPa) - + ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio + Pair = 1020._dbl_kind ! Sea level pressure (hPa) + character(len=*), parameter :: subname = '(oned_data)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' - diag = .false. ! write diagnostic information - + diag = .false. ! write diagnostic information + if (trim(atm_data_format) == 'nc') then ! read nc file - ! hourly data beginning Jan 1, 1989, 01:00 + ! hourly data beginning Jan 1, 1989, 01:00 ! HARDWIRED for dt = 1 hour! met_file = uwind_file call ice_open_nc(met_file,fid) - fieldname='Uatm' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='Uatm' + call ice_read_nc(fid,istep1,fieldname,work,diag) uatm(:,:,:) = work - fieldname='Vatm' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='Vatm' + call ice_read_nc(fid,istep1,fieldname,work,diag) vatm(:,:,:) = work - fieldname='Tair' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='Tair' + call ice_read_nc(fid,istep1,fieldname,work,diag) Temp = work - Tair(:,:,:) = Temp + Tair(:,:,:) = Temp call ice_close_nc(fid) - ! hourly solar data beginning Jan 1, 1989, 01:00 + ! hourly solar data beginning Jan 1, 1989, 01:00 met_file = fsw_file call ice_open_nc(met_file,fid) - fieldname='fsw' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='fsw' + call ice_read_nc(fid,istep1,fieldname,work,diag) fsw(:,:,:) = work call ice_close_nc(fid) - ! hourly interpolated monthly data beginning Jan 1, 1989, 01:00 + ! hourly interpolated monthly data beginning Jan 1, 1989, 01:00 met_file = humid_file call ice_open_nc(met_file,fid) - fieldname='rh' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='rh' + call ice_read_nc(fid,istep1,fieldname,work,diag) rh = work - - fieldname='fsnow' - call ice_read_nc(fid,istep1,fieldname,work,diag) + + fieldname='fsnow' + call ice_read_nc(fid,istep1,fieldname,work,diag) fsnow(:,:,:) = work call ice_close_nc(fid) !------------------------------------------------------------------- ! Find specific humidity using Hyland-Wexler formulation - ! Hyland, R.W. and A. Wexler, Formulations for the Thermodynamic - ! Properties of the saturated phases of H20 from 173.15K to 473.15K, + ! Hyland, R.W. and A. Wexler, Formulations for the Thermodynamic + ! Properties of the saturated phases of H20 from 173.15K to 473.15K, ! ASHRAE Trans, 89(2A), 500-519, 1983 !------------------------------------------------------------------- - - Psat = exp(-ps1/Temp + ps2 - ps3*Temp + ps4*Temp**2 - ps5 * Temp**3 & + + Psat = exp(-ps1/Temp + ps2 - ps3*Temp + ps4*Temp**2 - ps5 * Temp**3 & + ps6 * log(Temp))*p01 ! saturation vapour pressure ws = ws1 * Psat/(Pair - Psat) ! saturation mixing ratio - Qa(:,:,:) = rh * ws * p01/(c1 + rh * ws * p01) * p001 + Qa(:,:,:) = rh * ws * p01/(c1 + rh * ws * p01) * p001 ! specific humidity (kg/kg) endif ! atm_data_format @@ -3650,7 +3652,7 @@ subroutine oned_data rhoa (:,:,:) = 1.3_dbl_kind ! air density (kg/m^3) cldf (:,:,:) = p25 ! cloud fraction frain(:,:,:) = c0 ! this is available in hourlymet_rh file - + end subroutine oned_data !======================================================================= @@ -3831,19 +3833,19 @@ end subroutine ocn_data_clim subroutine ocn_data_ncar_init ! Reads NCAR pop ocean forcing data set 'pop_frc_gx1v3_010815.nc' -! +! ! List of ocean forcing fields: Note that order is important! ! (order is determined by field list in vname). -! -! For ocean mixed layer-----------------------------units -! -! 1 sst------temperature---------------------------(C) -! 2 sss------salinity------------------------------(ppt) -! 3 hbl------depth---------------------------------(m) -! 4 u--------surface u current---------------------(m/s) -! 5 v--------surface v current---------------------(m/s) -! 6 dhdx-----surface tilt x direction--------------(m/m) -! 7 dhdy-----surface tilt y direction--------------(m/m) +! +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) +! 5 v--------surface v current---------------------(m/s) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) ! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) ! ! Fields 4, 5, 6, 7 are on the U-grid; 1, 2, 3, and 8 are @@ -3858,7 +3860,7 @@ subroutine ocn_data_ncar_init use netcdf #endif - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n , & ! field index m , & ! month index nrec, & ! record number for direct access @@ -3870,12 +3872,10 @@ subroutine ocn_data_ncar_init 'T', 'S', 'hblt', 'U', 'V', & 'dhdx', 'dhdy', 'qdp' / - integer (kind=int_kind) :: & - fid , & ! file id - dimid ! dimension id - integer (kind=int_kind) :: & status , & ! status flag + fid , & ! file id + dimid , & ! dimension id nlat , & ! number of longitudes of data nlon ! number of latitudes of data @@ -3894,7 +3894,7 @@ subroutine ocn_data_ncar_init write (nu_diag,*) 'WARNING: Alter ice_dyn_evp.F90 if desired.' if (restore_ocn) write (nu_diag,*) & - 'SST restoring timescale = ',trestore,' days' + 'SST restoring timescale = ',trestore,' days' sst_file = trim(ocn_data_dir)//'/'//trim(oceanmixed_file) ! not just sst @@ -3914,7 +3914,7 @@ subroutine ocn_data_ncar_init ! status = nf90_inq_dimid(fid,'nlon',dimid) status = nf90_inq_dimid(fid,'ni',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlon) - + ! status = nf90_inq_dimid(fid,'nlat',dimid) status = nf90_inq_dimid(fid,'nj',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlat) @@ -3933,7 +3933,7 @@ subroutine ocn_data_ncar_init ! Read in ocean forcing data for all 12 months do n=1,nfld do m=1,12 - + ! Note: netCDF does single to double conversion if necessary ! if (n >= 4 .and. n <= 7) then ! call ice_read_nc(fid, m, vname(n), work1, debug_forcing, & @@ -3989,19 +3989,19 @@ end subroutine ocn_data_ncar_init subroutine ocn_data_ncar_init_3D ! Reads NCAR pop ocean forcing data set 'oceanmixed_ice_depth.nc' -! +! ! List of ocean forcing fields: Note that order is important! ! (order is determined by field list in vname). -! -! For ocean mixed layer-----------------------------units -! -! 1 sst------temperature---------------------------(C) -! 2 sss------salinity------------------------------(ppt) -! 3 hbl------depth---------------------------------(m) -! 4 u--------surface u current---------------------(m/s) -! 5 v--------surface v current---------------------(m/s) -! 6 dhdx-----surface tilt x direction--------------(m/m) -! 7 dhdy-----surface tilt y direction--------------(m/m) +! +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) +! 5 v--------surface v current---------------------(m/s) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) ! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) ! ! All fields are on the T-grid. @@ -4018,7 +4018,7 @@ subroutine ocn_data_ncar_init_3D #endif #ifdef USE_NETCDF - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n , & ! field index m , & ! month index nzlev ! z level of currents @@ -4030,8 +4030,8 @@ subroutine ocn_data_ncar_init_3D 'dhdx', 'dhdy', 'qdp' / integer (kind=int_kind) :: & - fid , & ! file id - dimid ! dimension id + fid , & ! file id + dimid ! dimension id integer (kind=int_kind) :: & status , & ! status flag @@ -4054,7 +4054,7 @@ subroutine ocn_data_ncar_init_3D write (nu_diag,*) 'WARNING: Alter ice_dyn_evp.F if desired.' if (restore_ocn) write (nu_diag,*) & - 'SST restoring timescale = ',trestore,' days' + 'SST restoring timescale = ',trestore,' days' sst_file = trim(ocn_data_dir)//'/'//trim(oceanmixed_file) ! not just sst @@ -4075,7 +4075,7 @@ subroutine ocn_data_ncar_init_3D ! status = nf90_inq_dimid(fid,'nlon',dimid) status = nf90_inq_dimid(fid,'ni',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlon) - + ! status = nf90_inq_dimid(fid,'nlat',dimid) status = nf90_inq_dimid(fid,'nj',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlat) @@ -4094,7 +4094,7 @@ subroutine ocn_data_ncar_init_3D ! Read in ocean forcing data for all 12 months do n=1,nfld do m=1,12 - + ! Note: netCDF does single to double conversion if necessary if (n == 4 .or. n == 5) then ! 3D currents nzlev = 1 ! surface currents @@ -4105,7 +4105,7 @@ subroutine ocn_data_ncar_init_3D field_loc_center, field_type_scalar) endif - ! the land mask used in ocean_mixed_depth.nc does not + ! the land mask used in ocean_mixed_depth.nc does not ! match our gx1v3 mask (hm) where (work1(:,:,:) < -900.) work1(:,:,:) = c0 @@ -4168,7 +4168,7 @@ subroutine ocn_data_ncar(dt) real (kind=dbl_kind), intent(in) :: & dt ! time step - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i, j, n, iblk , & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number @@ -4186,12 +4186,12 @@ subroutine ocn_data_ncar(dt) if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- - + midmonth = 15 ! data is given on 15th of every month ! midmonth = fix(p5 * real(daymo(mmonth),kind=dbl_kind)) ! exact middle @@ -4228,8 +4228,8 @@ subroutine ocn_data_ncar(dt) call interpolate_data (sst_data,work1) ! masking by hm is necessary due to NaNs in the data file - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block if (n == 2) sss (i,j,:) = c0 if (n == 3) hmix (i,j,:) = c0 if (n == 4) uocn (i,j,:) = c0 @@ -4252,21 +4252,21 @@ subroutine ocn_data_ncar(dt) enddo enddo - do j = 1, ny_block - do i = 1, nx_block - sss (i,j,:) = max (sss(i,j,:), c0) - hmix(i,j,:) = max(hmix(i,j,:), c0) - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + sss (i,j,:) = max (sss(i,j,:), c0) + hmix(i,j,:) = max(hmix(i,j,:), c0) + enddo + enddo call ocn_freezing_temperature if (restore_ocn) then - do j = 1, ny_block - do i = 1, nx_block - sst(i,j,:) = sst(i,j,:) + (work1(i,j,:)-sst(i,j,:))*dt/trest - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,:) = sst(i,j,:) + (work1(i,j,:)-sst(i,j,:))*dt/trest + enddo + enddo ! else sst is only updated in ice_ocean.F endif @@ -4275,16 +4275,16 @@ subroutine ocn_data_ncar(dt) call interpolate_data (sst_data,sst) !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block if (hm(i,j,iblk) == c1) then - sst(i,j,iblk) = max (sst(i,j,iblk), Tf(i,j,iblk)) + sst(i,j,iblk) = max (sst(i,j,iblk), Tf(i,j,iblk)) else sst(i,j,iblk) = c0 endif - enddo - enddo - enddo + enddo + enddo + enddo !$OMP END PARALLEL DO endif @@ -4365,12 +4365,13 @@ subroutine ocn_data_hadgem(dt) ! Reads in HadGEM ocean forcing data as required from netCDF files ! Current options (selected by ocn_data_type) -! hadgem_sst: read in sst only +! hadgem_sst: read in sst only ! hadgem_sst_uvocn: read in sst plus uocn and vocn ! authors: Ann Keen, Met Office use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks use ice_flux, only: sst, uocn, vocn use ice_grid, only: grid_average_X2Y, ANGLET @@ -4387,17 +4388,14 @@ subroutine ocn_data_hadgem(dt) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & sstdat ! data value toward which SST is restored - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 ! temporary array - real (kind=dbl_kind) :: workx, worky logical (kind=log_kind) :: readm - character (char_len) :: & + character (char_len) :: & fieldname ! field name in netcdf file - character (char_len_long) :: & + character (char_len_long) :: & filename ! name of netCDF file character(len=*), parameter :: subname = '(ocn_data_hadgem)' @@ -4458,7 +4456,7 @@ subroutine ocn_data_hadgem(dt) call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & maxrec, sst_file, fieldname, sst_data, & field_loc_center, field_type_scalar) - + ! Interpolate to current time step call interpolate_data (sst_data, sstdat) @@ -4474,14 +4472,14 @@ subroutine ocn_data_hadgem(dt) enddo enddo !$OMP END PARALLEL DO - endif + endif ! ----------------------------------------------------------- ! Ocean currents ! -------------- - ! Values read in are on T grid and oriented geographically, hence + ! Values read in are on T grid and oriented geographically, hence ! vectors need to be rotated to model grid and then interpolated - ! to U grid. + ! to U grid. ! Also need to be converted from cm s-1 (UM) to m s-1 (CICE) ! ----------------------------------------------------------- @@ -4492,7 +4490,7 @@ subroutine ocn_data_hadgem(dt) call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & maxrec, filename, fieldname, uocn_data, & field_loc_center, field_type_vector) - + ! Interpolate to current time step call interpolate_data (uocn_data, uocn) @@ -4501,25 +4499,25 @@ subroutine ocn_data_hadgem(dt) call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & maxrec, filename, fieldname, vocn_data, & field_loc_center, field_type_vector) - + ! Interpolate to current time step call interpolate_data (vocn_data, vocn) - !----------------------------------------------------------------- - ! Rotate zonal/meridional vectors to local coordinates, + !----------------------------------------------------------------- + ! Rotate zonal/meridional vectors to local coordinates, ! and change units - !----------------------------------------------------------------- + !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,workx,worky) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - workx = uocn(i,j,iblk) + workx = uocn(i,j,iblk) worky = vocn(i,j,iblk) - uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & - + worky*sin(ANGLET(i,j,iblk)) - vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & + uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & + + worky*sin(ANGLET(i,j,iblk)) + vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & - workx*sin(ANGLET(i,j,iblk)) uocn(i,j,iblk) = uocn(i,j,iblk) * cm_to_m @@ -4530,15 +4528,11 @@ subroutine ocn_data_hadgem(dt) enddo ! nblocks !$OMP END PARALLEL DO - !----------------------------------------------------------------- - ! Interpolate to U grid - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Interpolate to U grid + !----------------------------------------------------------------- ! tcraig, this is now computed in dynamics for consistency - !work1 = uocn - !call grid_average_X2Y('F',work1,'T',uocn,'U') - !work1 = vocn - !call grid_average_X2Y('F',work1,'T',vocn,'U') endif ! ocn_data_type = hadgem_sst_uvocn @@ -4688,7 +4682,7 @@ subroutine hycom_atm_data call icepack_query_parameters(Tffresh_out=Tffresh) call icepack_query_parameters(secday_out=secday) - ! current time in HYCOM jday units (HYCOM ref year: 1900,12,31,000000) + ! current time in HYCOM jday units (HYCOM ref year: 1900,12,31,000000) hcdate = real(compute_days_between(1900,12,31,myear,mmonth,mday)) + msec/secday ! Init recnum try @@ -4897,13 +4891,13 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & ! write(nu_diag,*) 'ixm, ixx, ixp ', ixm, ixx, ixp ! write(nu_diag,*) 'maxrec ', maxrec ! write(nu_diag,*) 'fieldname ', fieldname - + call ice_open_nc (data_file, fid) arg = 1 nrec = recd + n2 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) @@ -4918,7 +4912,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + ixx - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) @@ -4944,7 +4938,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + n4 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) endif ! ixp /= -99 @@ -4966,7 +4960,7 @@ subroutine ISPOL_files if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' fsw_file = & - trim(atm_data_dir)//'/fsw_sfc_4Xdaily.nc' + trim(atm_data_dir)//'/fsw_sfc_4Xdaily.nc' flw_file = & trim(atm_data_dir)//'/flw_sfc_4Xdaily.nc' @@ -4978,10 +4972,10 @@ subroutine ISPOL_files trim(atm_data_dir)//'/uatm_10m_daily.nc' vwind_file = & - trim(atm_data_dir)//'/vatm_10m_daily.nc' + trim(atm_data_dir)//'/vatm_10m_daily.nc' tair_file = & - trim(atm_data_dir)//'/Tair_2m_daily.nc' + trim(atm_data_dir)//'/Tair_2m_daily.nc' humid_file = & trim(atm_data_dir)//'/Qa_2m_daily.nc' @@ -5004,7 +4998,7 @@ end subroutine ISPOL_files subroutine ISPOL_data -! Defines atmospheric data fields for Antarctic Weddell sea location +! Defines atmospheric data fields for Antarctic Weddell sea location ! authors: Nicole Jeffery, LANL ! @@ -5013,7 +5007,7 @@ subroutine ISPOL_data !local parameters - character (char_len_long) :: & + character (char_len_long) :: & met_file, & ! netcdf filename fieldname ! field name in netcdf file @@ -5022,19 +5016,19 @@ subroutine ISPOL_data Qa_data_p, fsnow_data_p, & fsw_data_p, flw_data_p, & uatm_data_p, vatm_data_p - - real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa - ps1 = 0.58002206e4_dbl_kind, & ! (K) + + real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa + ps1 = 0.58002206e4_dbl_kind, & ! (K) ps2 = 1.3914993_dbl_kind, & ! - ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) + ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) ps4 = 0.41764768e-4_dbl_kind, & ! (K^-2) ps5 = 0.14452093e-7_dbl_kind, & ! (K^-3) ps6 = 6.5459673_dbl_kind, & ! - ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio - Pair = 1020._dbl_kind, & ! Sea level pressure (hPa) + ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio + Pair = 1020._dbl_kind, & ! Sea level pressure (hPa) lapse_rate = 0.0065_dbl_kind ! (K/m) lapse rate over sea level - - ! for interpolation of hourly data + + ! for interpolation of hourly data integer (kind=int_kind) :: & ixm,ixx,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number @@ -5043,7 +5037,7 @@ subroutine ISPOL_data ! = 2 for date located at end of time interval real (kind=dbl_kind) :: & secday , & - Qa_pnt + Qa_pnt real (kind=dbl_kind) :: & sec1hr ! number of seconds in 1 hour @@ -5062,20 +5056,20 @@ subroutine ISPOL_data call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - + if (trim(atm_data_format) == 'nc') then ! read nc file - + !------------------------------------------------------------------- ! data from NCEP_DOE Reanalysis 2 and Bareiss et al 2008 - ! daily data located at the end of the 24-hour period. + ! daily data located at the end of the 24-hour period. !------------------------------------------------------------------- dataloc = 2 ! data located at end of interval sec1hr = secday ! seconds in day - maxrec = 366 ! + maxrec = 366 ! ! current record number - recnum = int(yday) + recnum = int(yday) ! Compute record numbers for surrounding data (2 on each side) ixm = mod(recnum+maxrec-2,maxrec) + 1 @@ -5092,11 +5086,11 @@ subroutine ISPOL_data read1 = .false. if (istep==1 .or. oldrecnum .ne. recnum) read1 = .true. - + ! Daily 2m Air temperature 1991 - - met_file = tair_file - fieldname='Tair' + + met_file = tair_file + fieldname='Tair' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, Tair_data_p, & @@ -5106,7 +5100,7 @@ subroutine ISPOL_data + c2intp * Tair_data_p(2) & - lapse_rate*8.0_dbl_kind - met_file = humid_file + met_file = humid_file fieldname='Qa' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & @@ -5114,7 +5108,7 @@ subroutine ISPOL_data field_loc_center, field_type_scalar) Qa_pnt= c1intp * Qa_data_p(1) & - + c2intp * Qa_data_p(2) + + c2intp * Qa_data_p(2) Qa(:,:,:) = Qa_pnt met_file = uwind_file @@ -5125,19 +5119,19 @@ subroutine ISPOL_data field_loc_center, field_type_scalar) uatm(:,:,:) = c1intp * uatm_data_p(1) & - + c2intp * uatm_data_p(2) + + c2intp * uatm_data_p(2) met_file = vwind_file fieldname='vatm' - + call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, vatm_data_p, & field_loc_center, field_type_scalar) vatm(:,:,:) = c1intp * vatm_data_p(1) & + c2intp * vatm_data_p(2) - - met_file = rain_file + + met_file = rain_file fieldname='fsnow' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & @@ -5145,7 +5139,7 @@ subroutine ISPOL_data field_loc_center, field_type_scalar) fsnow(:,:,:) = (c1intp * fsnow_data_p(1) + & - c2intp * fsnow_data_p(2)) + c2intp * fsnow_data_p(2)) !----------------------------- !fsw and flw are every 6 hours @@ -5155,7 +5149,7 @@ subroutine ISPOL_data maxrec = 1460 ! 366*4 ! current record number - recnum4X = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec1hr) + recnum4X = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec1hr) ! Compute record numbers for surrounding data (2 on each side) ixm = mod(recnum4X+maxrec-2,maxrec) + 1 @@ -5183,14 +5177,14 @@ subroutine ISPOL_data + c2intp * fsw_data_p(2) met_file = flw_file - fieldname='flw' + fieldname='flw' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, flw_data_p, & field_loc_center, field_type_scalar) flw(:,:,:) = c1intp * flw_data_p(1) & - + c2intp * flw_data_p(2) + + c2intp * flw_data_p(2) endif !nc !flw given cldf and Tair calculated in prepare_forcing @@ -5202,7 +5196,7 @@ subroutine ISPOL_data rhoa (:,:,:) = 1.3_dbl_kind ! air density (kg/m^3) cldf(:,:,:) = c1 !0.25_dbl_kind ! cloud fraction frain(:,:,:) = c0 ! this is available in hourlymet_rh file - + ! Save record number for next time step oldrecnum = recnum oldrecnum4X = recnum4X @@ -5211,20 +5205,20 @@ end subroutine ISPOL_data !======================================================================= - subroutine ocn_data_ispol_init + subroutine ocn_data_ispol_init ! Reads NCAR pop ocean forcing data set 'pop_frc_gx1v3_010815.nc' ! at the ISPOL location -67.4677N, 310.4375E ! -! For ocean mixed layer-----------------------------units -! -! 1 sst------temperature---------------------------(C) -! 2 sss------salinity------------------------------(ppt) -! 3 hbl------depth---------------------------------(m) -! 4 u--------surface u current---------------------(m/s) +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) ! 5 v--------surface v current---------------------(m/s) -! 6 dhdx-----surface tilt x direction--------------(m/m) -! 7 dhdy-----surface tilt y direction--------------(m/m) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) ! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) ! ! Fields 4, 5, 6, 7 are on the U-grid; 1, 2, 3, and 8 are @@ -5235,7 +5229,7 @@ subroutine ocn_data_ispol_init use ice_gather_scatter use ice_read_write - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n , & ! field index m ! month index @@ -5246,13 +5240,10 @@ subroutine ocn_data_ispol_init 'dhdx', 'dhdy', 'qdp' / real (kind=dbl_kind) :: & - work - - integer (kind=int_kind) :: & - fid ! file id + work integer (kind=int_kind) :: & - status ! status flag + fid ! file id character(len=*), parameter :: subname = '(ocn_data_ispol_init)' @@ -5261,7 +5252,7 @@ subroutine ocn_data_ispol_init if (my_task == master_task) then if (restore_ocn) write (nu_diag,*) & - 'SST restoring timescale = ',trestore,' days' + 'SST restoring timescale = ',trestore,' days' sst_file = trim(ocn_data_dir)//'/'//trim(oceanmixed_file) ! not just sst @@ -5280,14 +5271,14 @@ subroutine ocn_data_ispol_init ! Read in ocean forcing data for all 12 months do n=1,nfld - do m=1,12 + do m=1,12 ! Note: netCDF does single to double conversion if necessary if (n >= 4 .and. n <= 7) then call ice_read_nc(fid, m, vname(n), work, debug_forcing, & field_loc_NEcorner, field_type_vector) else call ice_read_nc(fid, m, vname(n), work, debug_forcing, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) endif ocn_frc_m(:,:,:,n,m) = work enddo ! month loop @@ -5316,7 +5307,6 @@ subroutine box2001_data_atm ! authors: Elizabeth Hunke, LANL use ice_domain, only: nblocks, blocks_ice - use ice_domain_size, only: max_blocks use ice_calendar, only: timesecs use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray @@ -5347,8 +5337,8 @@ subroutine box2001_data_atm period = c4*secday do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block this_block = get_block(blocks_ice(iblk),iblk) iglob = this_block%i_glob @@ -5396,8 +5386,8 @@ subroutine box2001_data_atm ! / real(ny_global,kind=dbl_kind) ! initialization test - enddo - enddo + enddo + enddo enddo ! nblocks end subroutine box2001_data_atm @@ -5411,8 +5401,6 @@ subroutine box2001_data_ocn ! authors: Elizabeth Hunke, LANL use ice_domain, only: nblocks, blocks_ice - use ice_domain_size, only: max_blocks - use ice_calendar, only: timesecs use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_flux, only: uocn, vocn use ice_grid, only: uvm @@ -5429,16 +5417,13 @@ subroutine box2001_data_ocn type (block) :: & this_block ! block information for current block - real (kind=dbl_kind) :: & - secday, pi , puny, period, pi2, tau - character(len=*), parameter :: subname = '(box2001_data_ocn)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block this_block = get_block(blocks_ice(iblk),iblk) iglob = this_block%i_glob @@ -5454,8 +5439,8 @@ subroutine box2001_data_ocn uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) - enddo - enddo + enddo + enddo enddo ! nblocks end subroutine box2001_data_ocn @@ -5466,7 +5451,6 @@ subroutine uniform_data_atm(dir,spd) ! uniform wind fields in some direction use ice_domain, only: nblocks - use ice_domain_size, only: max_blocks use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray use ice_state, only: aice @@ -5516,17 +5500,17 @@ subroutine uniform_data_atm(dir,spd) endif do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block ! wind stress wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk) stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk) - + + enddo enddo - enddo enddo ! nblocks end subroutine uniform_data_atm @@ -5537,25 +5521,19 @@ subroutine uniform_data_ocn(dir,spd) ! uniform current fields in some direction - use ice_domain, only: nblocks - use ice_domain_size, only: max_blocks - use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uocn, vocn character(len=*), intent(in) :: dir - real(kind=dbl_kind), intent(in), optional :: spd ! velocity + real(kind=dbl_kind), intent(in), optional :: spd ! velocity ! local parameters - integer (kind=int_kind) :: & - iblk, i,j ! loop indices - real(kind=dbl_kind) :: & ocn_val ! value to use for ocean currents character(len=*), parameter :: subname = '(uniform_data_ocn)' - + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (present(spd)) then @@ -5583,9 +5561,9 @@ end subroutine uniform_data_ocn !======================================================================= subroutine get_wave_spec - + use ice_read_write, only: ice_read_nc_xyf - use ice_arrays_column, only: wave_spectrum, wave_sig_ht, & + use ice_arrays_column, only: wave_spectrum, & dwavefreq, wavefreq use ice_constants, only: c0 use ice_domain_size, only: nfreq @@ -5593,8 +5571,7 @@ subroutine get_wave_spec ! local variables integer (kind=int_kind) :: & - fid, & ! file id for netCDF routines - k + fid ! file id for netCDF routines real(kind=dbl_kind), dimension(nfreq) :: & wave_spectrum_profile ! wave spectrum @@ -5686,9 +5663,6 @@ subroutine init_snowtable snw_aging_table, & ! aging table setting fieldname ! field name in netcdf file - integer (kind=int_kind) :: & - j, k ! indices - character(len=*), parameter :: subname = '(init_snowtable)' !----------------------------------------------------------------- @@ -5816,7 +5790,8 @@ subroutine init_snowtable write(nu_diag,*) subname,' snoage_tau (1,1,1) = ',snowage_tau (1,1,1) write(nu_diag,*) subname,' snoage_kappa (1,1,1) = ',snowage_kappa(1,1,1) write(nu_diag,*) subname,' snoage_drdt0 (1,1,1) = ',snowage_drdt0(1,1,1) - write(nu_diag,*) subname,' Data at rhos, Tgrd, T = ',snowage_rhos(idx_rhos_max),snowage_Tgrd(idx_Tgrd_max),snowage_T(idx_T_max) + write(nu_diag,*) subname,' Data at rhos, Tgrd, T = ', & + snowage_rhos(idx_rhos_max),snowage_Tgrd(idx_Tgrd_max),snowage_T(idx_T_max) write(nu_diag,*) subname,' snoage_tau (max,max,max) = ',snowage_tau (idx_rhos_max, idx_Tgrd_max, idx_T_max) write(nu_diag,*) subname,' snoage_kappa (max,max,max) = ',snowage_kappa(idx_rhos_max, idx_Tgrd_max, idx_T_max) write(nu_diag,*) subname,' snoage_drdt0 (max,max,max) = ',snowage_drdt0(idx_rhos_max, idx_Tgrd_max, idx_T_max) diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedynB/general/ice_forcing_bgc.F90 index 383d388de..fc440834c 100644 --- a/cicecore/cicedynB/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedynB/general/ice_forcing_bgc.F90 @@ -192,7 +192,7 @@ subroutine get_forcing_bgc ! Read two monthly silicate values and interpolate. ! Restore toward interpolated value. !------------------------------------------------------------------- - + if (trim(bgc_data_type)=='clim' .AND. tr_bgc_Sil) then ! call read_clim_data (readm, 0, ixm, mmonth, ixp, & ! sil_file, sil_data, & @@ -202,7 +202,7 @@ subroutine get_forcing_bgc sil_file, fieldname, sil_data, & field_loc_center, field_type_scalar) call interpolate_data (sil_data, sildat) - + if (istep == 1 .or. .NOT. restore_bgc) then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) @@ -275,7 +275,7 @@ subroutine get_forcing_bgc ! Restore toward interpolated value. !------------------------------------------------------------------- - if (trim(bgc_data_type)=='clim' .AND. tr_bgc_Nit) then + if (trim(bgc_data_type)=='clim' .AND. tr_bgc_Nit) then ! call read_clim_data (readm, 0, ixm, mmonth, ixp, & ! nit_file, nit_data, & ! field_loc_center, field_type_scalar) @@ -321,7 +321,7 @@ subroutine get_forcing_bgc do i = ilo, ihi nit(i,j,iblk) = nit(i,j,iblk) & - + (nitdat(i,j,iblk)-nit(i,j,iblk))*dt/trest + + (nitdat(i,j,iblk)-nit(i,j,iblk))*dt/trest ks = icepack_max_algae + 1 ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic @@ -332,7 +332,7 @@ subroutine get_forcing_bgc !$OMP END PARALLEL DO endif !restore_bgc -! elseif (trim(nit_data_type) == 'sss' .AND. tr_bgc_Nit) then +! elseif (trim(nit_data_type) == 'sss' .AND. tr_bgc_Nit) then ! !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) ! do iblk = 1, nblocks @@ -345,11 +345,11 @@ subroutine get_forcing_bgc ! do j = jlo, jhi ! do i = ilo, ihi -! nit(i,j,iblk) = sss(i,j,iblk) +! nit(i,j,iblk) = sss(i,j,iblk) ! ks = icepack_max_algae + 1 -! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit +! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ! ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic -! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON +! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON ! enddo ! enddo ! enddo @@ -367,12 +367,12 @@ subroutine get_forcing_bgc do j = jlo, jhi do i = ilo, ihi - + nit(i,j,iblk) = 12.0_dbl_kind ks = icepack_max_algae + 1 - ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit + ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic - ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON + ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON enddo enddo enddo @@ -381,15 +381,15 @@ subroutine get_forcing_bgc endif !tr_bgc_Nit !------------------------------------------------------------------- - ! Data from Papdimitrious et al., 2007, Limnol. Oceanogr. - ! and WOA at 68oS, 304.5oE : - ! daily data located at the end of the 24-hour period. + ! Data from Papdimitrious et al., 2007, Limnol. Oceanogr. + ! and WOA at 68oS, 304.5oE : + ! daily data located at the end of the 24-hour period. !------------------------------------------------------------------- if (trim(bgc_data_type) == 'ISPOL') then nit_file = trim(bgc_data_dir)//'nutrients_daily_ISPOL_WOA_field3.nc' - sil_file = trim(bgc_data_dir)//'nutrients_daily_ISPOL_WOA_field3.nc' + sil_file = trim(bgc_data_dir)//'nutrients_daily_ISPOL_WOA_field3.nc' if (my_task == master_task .and. istep == 1) then if (tr_bgc_Sil) then @@ -408,45 +408,45 @@ subroutine get_forcing_bgc dataloc = 2 ! data located at end of interval sec1hr = secday ! seconds in day - maxrec = 365 ! + maxrec = 365 ! ! current record number - recnum = int(yday) + recnum = int(yday) ! Compute record numbers for surrounding data (2 on each side) ixm = mod(recnum+maxrec-2,maxrec) + 1 ixx = mod(recnum-1, maxrec) + 1 - + recslot = 2 ixp = -99 call interp_coeff (recnum, recslot, sec1hr, dataloc) read1 = .false. if (istep==1 .or. bgcrecnum .ne. recnum) read1 = .true. - - + + if (tr_bgc_Sil) then met_file = sil_file - fieldname= 'silicate' + fieldname= 'silicate' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, sil_data_p, & field_loc_center, field_type_scalar) - + sil(:,:,:) = c1intp * sil_data_p(1) & + c2intp * sil_data_p(2) endif if (tr_bgc_Nit) then met_file = nit_file - fieldname= 'nitrate' + fieldname= 'nitrate' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, nit_data_p, & field_loc_center, field_type_scalar) - + nit(:,:,:) = c1intp * nit_data_p(1) & + c2intp * nit_data_p(2) endif - + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -458,13 +458,13 @@ subroutine get_forcing_bgc do j = jlo, jhi do i = ilo, ihi - + ks = 2*icepack_max_algae + icepack_max_doc + 3 + icepack_max_dic - ocean_bio_all(i,j,ks,iblk) = sil(i,j,iblk) !Sil + ocean_bio_all(i,j,ks,iblk) = sil(i,j,iblk) !Sil ks = icepack_max_algae + 1 ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic - ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON + ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON enddo enddo enddo @@ -480,11 +480,11 @@ end subroutine get_forcing_bgc ! ! author: Nicole Jeffery, LANL - subroutine get_atm_bgc + subroutine get_atm_bgc use ice_blocks, only: block, get_block use ice_domain, only: nblocks, blocks_ice - use ice_domain_size, only: n_zaero + use ice_domain_size, only: n_zaero use ice_flux_bgc, only: flux_bio_atm, faero_atm ! local variables @@ -492,7 +492,7 @@ subroutine get_atm_bgc integer (kind=int_kind) :: & i, j, nn , & ! horizontal indices ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - iblk ! block index + iblk ! block index logical (kind=log_kind) :: & tr_zaero @@ -520,15 +520,15 @@ subroutine get_atm_bgc !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,nn) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - - do nn = 1, n_zaero + + do nn = 1, n_zaero do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi flux_bio_atm(i,j,nlt_zaero(nn),iblk) = faero_atm(i,j,nn,iblk) enddo enddo @@ -569,10 +569,10 @@ subroutine faero_default faero_atm(:,:,1,:) = 1.e-12_dbl_kind ! kg/m^2 s faero_atm(:,:,2,:) = 1.e-13_dbl_kind - faero_atm(:,:,3,:) = 1.e-14_dbl_kind - faero_atm(:,:,4,:) = 1.e-14_dbl_kind - faero_atm(:,:,5,:) = 1.e-14_dbl_kind - faero_atm(:,:,6,:) = 1.e-14_dbl_kind + faero_atm(:,:,3,:) = 1.e-14_dbl_kind + faero_atm(:,:,4,:) = 1.e-14_dbl_kind + faero_atm(:,:,5,:) = 1.e-14_dbl_kind + faero_atm(:,:,6,:) = 1.e-14_dbl_kind end subroutine faero_default @@ -598,11 +598,11 @@ subroutine faero_data aero2_data , & ! field values at 2 temporal data points aero3_data ! field values at 2 temporal data points - character (char_len_long) :: & + character (char_len_long) :: & aero_file, & ! netcdf filename fieldname ! field name in netcdf file - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number recslot , & ! spline slot for current record @@ -618,9 +618,9 @@ subroutine faero_data !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -645,12 +645,12 @@ subroutine faero_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. -! aero_file = trim(atm_data_dir)//'faero.nc' - aero_file = '/usr/projects/climate/eclare/DATA/gx1v3/faero.nc' +! aero_file = trim(atm_data_dir)//'faero.nc' + aero_file = '/usr/projects/climate/eclare/DATA/gx1v3/faero.nc' fieldname='faero_atm001' call read_clim_data_nc (readm, 0, ixm, mmonth, ixp, & @@ -695,11 +695,11 @@ subroutine fzaero_data save :: & aero_data ! field values at 2 temporal data points - character (char_len_long) :: & + character (char_len_long) :: & aero_file, & ! netcdf filename fieldname ! field name in netcdf file - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number recslot , & ! spline slot for current record @@ -720,9 +720,9 @@ subroutine fzaero_data allocate( aero_data(nx_block,ny_block,2,max_blocks) ) !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -747,13 +747,13 @@ subroutine fzaero_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. -! aero_file = trim(atm_data_dir)//'faero.nc' +! aero_file = trim(atm_data_dir)//'faero.nc' ! Cam5 monthly total black carbon deposition on the gx1 grid" - aero_file = '/usr/projects/climate/njeffery/DATA/CAM/Hailong_Wang/Cam5_bc_monthly_popgrid.nc' + aero_file = '/usr/projects/climate/njeffery/DATA/CAM/Hailong_Wang/Cam5_bc_monthly_popgrid.nc' fieldname='bcd' call read_clim_data_nc (readm, 0, ixm, mmonth, ixp, & @@ -786,11 +786,11 @@ subroutine init_bgc_data (fed1,fep1) ! local parameters integer (kind=int_kind) :: & - fid ! file id for netCDF file + fid ! file id for netCDF file logical (kind=log_kind) :: diag - character (char_len_long) :: & + character (char_len_long) :: & iron_file, & ! netcdf filename fieldname ! field name in netcdf file @@ -802,7 +802,7 @@ subroutine init_bgc_data (fed1,fep1) !------------------------------------------------------------------- if (trim(fe_data_type) == 'clim') then - diag = .true. ! write diagnostic information + diag = .true. ! write diagnostic information iron_file = trim(bgc_data_dir)//'dFe_50m_annual_Tagliabue_gx1.nc' if (my_task == master_task) then @@ -814,12 +814,12 @@ subroutine init_bgc_data (fed1,fep1) fieldname='dFe' ! Currently only first fed value is read - call ice_read_nc(fid,1,fieldname,fed1,diag) - where ( fed1(:,:,:) > 1.e20) fed1(:,:,:) = p1 + call ice_read_nc(fid,1,fieldname,fed1,diag) + where ( fed1(:,:,:) > 1.e20) fed1(:,:,:) = p1 - if (my_task == master_task) call ice_close_nc(fid) + if (my_task == master_task) call ice_close_nc(fid) - diag = .true. ! write diagnostic information + diag = .true. ! write diagnostic information iron_file = trim(bgc_data_dir)//'pFe_bathy_gx1.nc' if (my_task == master_task) then @@ -831,13 +831,13 @@ subroutine init_bgc_data (fed1,fep1) fieldname='pFe' ! Currently only first fep value is read - call ice_read_nc(fid,1,fieldname,fep1,diag) - where ( fep1(:,:,:) > 1.e20) fep1(:,:,:) = p1 + call ice_read_nc(fid,1,fieldname,fep1,diag) + where ( fep1(:,:,:) > 1.e20) fep1(:,:,:) = p1 + + if (my_task == master_task) call ice_close_nc(fid) - if (my_task == master_task) call ice_close_nc(fid) - endif - + end subroutine init_bgc_data !======================================================================= @@ -871,7 +871,7 @@ subroutine faero_optics logical (kind=log_kind) :: modal_aero - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid , & ! variable id status , & ! status output from netcdf routines n, k ! index @@ -880,9 +880,9 @@ subroutine faero_optics amin, amax, asum ! min, max values and sum of input array integer (kind=int_kind) :: & - fid ! file id for netCDF file + fid ! file id for netCDF file - character (char_len_long) :: & + character (char_len_long) :: & fieldname ! field name in netcdf file character(len=*), parameter :: subname = '(faero_optics)' @@ -972,12 +972,12 @@ subroutine faero_optics fieldname=optics_file_fieldname status = nf90_inq_varid(fid, trim(fieldname), varid) - + if (status /= nf90_noerr) then call abort_ice (subname//'ERROR: Cannot find variable '//trim(fieldname)) endif status = nf90_get_var( fid, varid, bcenh, & - start=(/1,1,1,1/), & + start=(/1,1,1,1/), & count=(/3,10,8,1/) ) do n=1,10 amin = minval(bcenh(:,n,:)) @@ -985,13 +985,13 @@ subroutine faero_optics asum = sum (bcenh(:,n,:)) write(nu_diag,*) ' min, max, sum =', amin, amax, asum enddo - call ice_close_nc(fid) + call ice_close_nc(fid) endif !master_task do n=1,3 do k=1,8 call broadcast_array(bcenh(n,:,k), master_task) enddo - enddo + enddo #else call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 0c368a413..c2cc986f8 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -5,7 +5,7 @@ ! authors Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Added namelist variables, warnings. ! Replaced old default initial ice conditions with 3.14 version. ! Converted to free source form (F90). @@ -97,7 +97,7 @@ subroutine input_data atm_data_format, ocn_data_format, & bgc_data_type, & ocn_data_type, ocn_data_dir, wave_spec_file, & - oceanmixed_file, restore_ocn, trestore, & + oceanmixed_file, restore_ocn, trestore, & ice_data_type, ice_data_conc, ice_data_dist, & snw_filename, & snw_tau_fname, snw_kappa_fname, snw_drdt0_fname, & @@ -108,7 +108,7 @@ subroutine input_data bathymetry_format, kmt_type, & grid_type, grid_format, & grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & - grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & + grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & dxrect, dyrect, & pgl_global_ext @@ -140,7 +140,9 @@ subroutine input_data nml_error, & ! namelist i/o error flag n ! loop index +#ifdef CESMCOUPLED logical :: exists +#endif real (kind=dbl_kind) :: ustar_min, albicev, albicei, albsnowv, albsnowi, & ahmax, R_ice, R_pnd, R_snw, dT_mlt, rsnw_mlt, emissivity, & @@ -171,13 +173,15 @@ subroutine input_data integer (kind=int_kind) :: numin, numax ! unit number limits #ifdef UNDEPRECATE_CESMPONDS - integer (kind=int_kind) :: rpcesm, rplvl, rptopo + integer (kind=int_kind) :: rpcesm, rplvl, rptopo #else - integer (kind=int_kind) :: rplvl, rptopo + integer (kind=int_kind) :: rplvl, rptopo #endif real (kind=dbl_kind) :: Cf, ksno, puny character (len=char_len) :: abort_list +#ifdef CESMCOUPLED character (len=64) :: tmpstr +#endif character (len=128) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -249,7 +253,7 @@ subroutine input_data k1, k2, alphab, threshold_hw, & deltaminEVP, deltaminVP, capping_method, & Cf, Pstar, Cstar, Ktens - + namelist /shortwave_nml/ & shortwave, albedo_type, & albicev, albicei, albsnowv, albsnowi, & @@ -304,11 +308,11 @@ subroutine input_data istep0 = 0 ! no. of steps taken in previous integrations, ! real (dumped) or imagined (to set calendar) #ifndef CESMCOUPLED - dt = 3600.0_dbl_kind ! time step, s + dt = 3600.0_dbl_kind ! time step, s #endif numin = 11 ! min allowed unit number numax = 99 ! max allowed unit number - npt = 99999 ! total number of time steps (dt) + npt = 99999 ! total number of time steps (dt) npt_unit = '1' ! units of npt 'y', 'm', 'd', 's', '1' diagfreq = 24 ! how often diag output is written debug_model = .false. ! debug output @@ -329,7 +333,7 @@ subroutine input_data histfreq(3) = 'd' ! output frequency option for different streams histfreq(4) = 'm' ! output frequency option for different streams histfreq(5) = 'y' ! output frequency option for different streams - histfreq_n(:) = 1 ! output frequency + histfreq_n(:) = 1 ! output frequency histfreq_base = 'zero' ! output frequency reference date hist_avg = .true. ! if true, write time-averages (not snapshots) history_format = 'default' ! history file format @@ -377,20 +381,20 @@ subroutine input_data kdyn = 1 ! type of dynamics (-1, 0 = off, 1 = evp, 2 = eap, 3 = vp) ndtd = 1 ! dynamic time steps per thermodynamic time step ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte - evp_algorithm = 'standard_2d' ! EVP kernel (=standard_2d: standard cice evp; =shared_mem_1d: 1d shared memory and no mpi. if more mpi processors then executed on master + evp_algorithm = 'standard_2d' ! EVP kernel (standard_2d=standard cice evp; shared_mem_1d=1d shared memory and no mpi elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared revised_evp = .false. ! if true, use revised procedure for evp dynamics - yield_curve = 'ellipse' ! yield curve + yield_curve = 'ellipse' ! yield curve kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 Pstar = 2.75e4_dbl_kind ! constant in Hibler strength formula (kstrength = 0) Cstar = 20._dbl_kind ! constant in Hibler strength formula (kstrength = 0) krdg_partic = 1 ! 1 = new participation, 0 = Thorndike et al 75 krdg_redist = 1 ! 1 = new redistribution, 0 = Hibler 80 mu_rdg = 3 ! e-folding scale of ridged ice, krdg_partic=1 (m^0.5) - Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging + Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging ksno = 0.3_dbl_kind ! snow thermal conductivity dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction @@ -402,14 +406,15 @@ subroutine input_data alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 threshold_hw = 30.0_dbl_kind ! max water depth for grounding Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) - e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve + e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential visc_method = 'avg_zeta' ! calc viscosities at U point: avg_strength, avg_zeta deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) maxits_nonlin = 4 ! max nb of iteration for nonlinear solver - precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) + precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), + ! 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace dim_pgmres = 5 ! size of pgmres Krylov subspace maxits_fgmres = 50 ! max nb of iteration for fgmres @@ -422,7 +427,8 @@ subroutine input_data reltol_fgmres = 1e-2_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) - fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) + fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: + ! 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) dim_andacc = 5 ! size of Anderson minimization matrix (number of saved previous residuals) reltol_andacc = 1e-6_dbl_kind ! relative tolerance for Anderson acceleration damping_andacc = 0 ! damping factor for Anderson acceleration @@ -463,7 +469,7 @@ subroutine input_data hp1 = 0.01_dbl_kind ! critical pond lid thickness for topo ponds hs0 = 0.03_dbl_kind ! snow depth for transition to bare sea ice (m) hs1 = 0.03_dbl_kind ! snow depth for transition to bare pond ice (m) - dpscale = c1 ! alter e-folding time scale for flushing + dpscale = c1 ! alter e-folding time scale for flushing frzpnd = 'cesm' ! melt pond refreezing parameterization rfracmin = 0.15_dbl_kind ! minimum retained fraction of meltwater rfracmax = 0.85_dbl_kind ! maximum retained fraction of meltwater @@ -544,7 +550,7 @@ subroutine input_data restart_age = .false. ! ice age restart tr_FY = .false. ! ice age restart_FY = .false. ! ice age restart - tr_lvl = .false. ! level ice + tr_lvl = .false. ! level ice restart_lvl = .false. ! level ice restart #ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm = .false. ! CESM melt ponds @@ -818,7 +824,7 @@ subroutine input_data call broadcast_scalar(diag_file, master_task) do n = 1, max_nstrm call broadcast_scalar(histfreq(n), master_task) - enddo + enddo call broadcast_array(histfreq_n, master_task) call broadcast_scalar(histfreq_base, master_task) call broadcast_scalar(hist_avg, master_task) @@ -1229,7 +1235,7 @@ subroutine input_data abort_list = trim(abort_list)//":45" endif endif - + #ifdef UNDEPRECATE_CESMPONDS rpcesm = 0 #endif @@ -1493,7 +1499,7 @@ subroutine input_data if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=F and fbot_xfer_type=Cdn_ocn' abort_list = trim(abort_list)//":19" endif - + if(history_precision .ne. 4 .and. history_precision .ne. 8) then write (nu_diag,*) subname//' ERROR: bad value for history_precision, allowed values: 4, 8' abort_list = trim(abort_list)//":22" @@ -1530,12 +1536,12 @@ subroutine input_data endif abort_list = trim(abort_list)//":60" endif - + if (trim(algo_nonlin) == 'picard') then ! Picard solver is implemented in the Anderson solver; reset number of saved residuals to zero dim_andacc = 0 endif - + if (.not. (trim(precond) == 'ident' .or. trim(precond) == 'diag' .or. trim(precond) == 'pgmres')) then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: unknown precond: '//precond @@ -1543,7 +1549,7 @@ subroutine input_data endif abort_list = trim(abort_list)//":61" endif - + if (.not. (trim(ortho_type) == 'cgs' .or. trim(ortho_type) == 'mgs')) then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: unknown ortho_type: '//ortho_type @@ -1738,7 +1744,7 @@ subroutine input_data tmpstr2 = ' : revised EVP formulation not used' endif write(nu_diag,1010) ' revised_evp = ', revised_evp,trim(tmpstr2) - + if (evp_algorithm == 'standard_2d') then tmpstr2 = ' : standard 2d EVP solver' elseif (evp_algorithm == 'shared_mem_1d') then @@ -1807,7 +1813,7 @@ subroutine input_data tmpstr2 = ' : no seabed stress parameterization' endif write(nu_diag,1010) ' seabed_stress = ', seabed_stress,trim(tmpstr2) - if (seabed_stress) then + if (seabed_stress) then write(nu_diag,1030) ' seabed method = ',trim(seabed_stress_method) if (seabed_stress_method == 'LKD') then write(nu_diag,1002) ' k1 = ', k1, ' : free parameter for landfast ice' @@ -1821,7 +1827,7 @@ subroutine input_data if (grid_ice == 'C' .or. grid_ice == 'CD') then write(nu_diag,1030) ' visc_method= ', trim(visc_method),' : viscosities method (U point)' endif - + write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' if (kdyn == 3) then @@ -2316,11 +2322,11 @@ subroutine input_data trim(ocn_data_type) /= 'default') then write(nu_diag,1031) ' ocn_data_dir = ', trim(ocn_data_dir) write(nu_diag,1011) ' restore_ocn = ', restore_ocn - endif + endif write(nu_diag,1011) ' restore_ice = ', restore_ice if (restore_ice .or. restore_ocn) & write(nu_diag,1021) ' trestore = ', trestore - + write(nu_diag,*) ' ' write(nu_diag,'(a31,2f8.2)') 'Diagnostic point 1: lat, lon =', & latpnt(1), lonpnt(1) @@ -2392,9 +2398,9 @@ subroutine input_data if (kdyn == 1 .and. & evp_algorithm /= 'standard_2d' .and. & - evp_algorithm /= 'shared_mem_1d') then + evp_algorithm /= 'shared_mem_1d') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown evp_algorithm=',trim(evp_algorithm) - abort_list = trim(abort_list)//":21" + abort_list = trim(abort_list)//":21" endif if (abort_list /= "") then @@ -2548,7 +2554,7 @@ subroutine init_state !----------------------------------------------------------------- if (my_task == master_task) then - + if (nilyr < 1) then write(nu_diag,*) subname//' ERROR: Must have at least one ice layer' write(nu_diag,*) subname//' ERROR: nilyr =', nilyr @@ -2649,11 +2655,11 @@ subroutine init_state do it = 1, ntrcr ! mask for base quantity on which tracers are carried if (trcr_depend(it) == 0) then ! area - trcr_base(it,1) = c1 + trcr_base(it,1) = c1 elseif (trcr_depend(it) == 1) then ! ice volume - trcr_base(it,2) = c1 + trcr_base(it,2) = c1 elseif (trcr_depend(it) == 2) then ! snow volume - trcr_base(it,3) = c1 + trcr_base(it,3) = c1 else trcr_base(it,1) = c1 ! default: ice area trcr_base(it,2) = c0 @@ -2698,7 +2704,7 @@ subroutine init_state !$OMP iglob,jglob) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2736,13 +2742,13 @@ subroutine init_state call grid_average_X2Y('S',vvel,'U',vvelN,'N') call grid_average_X2Y('S',uvel,'U',uvelE,'E') call grid_average_X2Y('S',vvel,'U',vvelE,'E') - + ! Halo update on North, East faces call ice_HaloUpdate(uvelN, halo_info, & field_loc_Nface, field_type_scalar) call ice_HaloUpdate(vvelN, halo_info, & field_loc_Nface, field_type_scalar) - + call ice_HaloUpdate(uvelE, halo_info, & field_loc_Eface, field_type_scalar) call ice_HaloUpdate(vvelE, halo_info, & @@ -2821,7 +2827,7 @@ subroutine set_state_var (nx_block, ny_block, & use ice_arrays_column, only: hin_max use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, ncat - use ice_grid, only: grid_type, dxrect, dyrect + use ice_grid, only: dxrect, dyrect use ice_forcing, only: ice_data_type, ice_data_conc, ice_data_dist integer (kind=int_kind), intent(in) :: & @@ -2831,7 +2837,7 @@ subroutine set_state_var (nx_block, ny_block, & iglob(nx_block) , & ! global indices jglob(ny_block) ! - character(len=char_len_long), intent(in) :: & + character(len=char_len_long), intent(in) :: & ice_ic ! method of ice cover initialization logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & @@ -2843,8 +2849,8 @@ subroutine set_state_var (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Tair , & ! air temperature (K) - Tf , & ! freezing temperature (C) - sst ! sea surface temperature (C) + Tf , & ! freezing temperature (C) + sst ! sea surface temperature (C) real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), intent(in) :: & salinz , & ! initial salinity profile @@ -2861,7 +2867,7 @@ subroutine set_state_var (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & uvel , & ! ice velocity B grid - vvel ! + vvel ! ! local variables integer (kind=int_kind) :: & @@ -2902,7 +2908,7 @@ subroutine set_state_var (nx_block, ny_block, & real (kind=dbl_kind), parameter :: & hsno_init = 0.20_dbl_kind , & ! initial snow thickness (m) - edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) + edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) edge_init_sh = -60._dbl_kind ! initial ice edge, S.Hem. (deg) real (kind=dbl_kind) :: & ! boxslotcyl @@ -2950,7 +2956,7 @@ subroutine set_state_var (nx_block, ny_block, & vicen(i,j,n) = c0 vsnon(i,j,n) = c0 if (tmask(i,j)) then - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature else trcrn(i,j,nt_Tsfc,n) = c0 ! at land grid cells (for clean history/restart files) endif @@ -3022,9 +3028,9 @@ subroutine set_state_var (nx_block, ny_block, & ! initial category areas in cells with ice hbar = c3 ! initial ice thickness with greatest area - ! Note: the resulting average ice thickness + ! Note: the resulting average ice thickness ! tends to be less than hbar due to the - ! nonlinear distribution of ice thicknesses + ! nonlinear distribution of ice thicknesses sum = c0 do n = 1, ncat if (n < ncat) then @@ -3083,7 +3089,7 @@ subroutine set_state_var (nx_block, ny_block, & if (tmask(i,j)) then ! check if grid point is inside slotted cylinder in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= center_x - width/c2) .and. & - (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & + (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & (dyrect*real(jglob(j)-1, kind=dbl_kind) >= center_y - radius) .and. & (dyrect*real(jglob(j)-1, kind=dbl_kind) <= center_y + (length - radius)) @@ -3254,7 +3260,7 @@ subroutine set_state_var (nx_block, ny_block, & ! surface temperature trcrn(i,j,nt_Tsfc,n) = Tsfc ! deg C - ! ice enthalpy, salinity + ! ice enthalpy, salinity do k = 1, nilyr trcrn(i,j,nt_qice+k-1,n) = qin(k) trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) @@ -3268,7 +3274,7 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! ij enddo ! ncat - + !--------------------------------------------------------- ! ice velocity ! these velocites are defined on B-grid diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index e07eca209..d5c115a0c 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -10,7 +10,7 @@ ! aicen(i,j,n) aice(i,j) --- ! vicen(i,j,n) vice(i,j) m ! vsnon(i,j,n) vsno(i,j) m -! trcrn(i,j,it,n) trcr(i,j,it) +! trcrn(i,j,it,n) trcr(i,j,it) ! ! Area is dimensionless because aice is the fractional area ! (normalized so that the sum over all categories, including open @@ -118,7 +118,7 @@ module ice_state strength ! ice strength (N/m) !----------------------------------------------------------------- - ! ice state at start of time step, saved for later in the step + ! ice state at start of time step, saved for later in the step !----------------------------------------------------------------- real (kind=dbl_kind), dimension(:,:,:), allocatable, & @@ -129,7 +129,7 @@ module ice_state dimension(:,:,:,:), allocatable, public :: & aicen_init , & ! initial ice concentration, for linear ITD vicen_init , & ! initial ice volume (m), for linear ITD - vsnon_init ! initial snow volume (m), for aerosol + vsnon_init ! initial snow volume (m), for aerosol !======================================================================= @@ -137,7 +137,7 @@ module ice_state !======================================================================= ! -! Allocate space for all state variables +! Allocate space for all state variables ! subroutine alloc_state integer (int_kind) :: ntrcr, ierr @@ -168,7 +168,7 @@ subroutine alloc_state vsnon (nx_block,ny_block,ncat,max_blocks) , & ! volume per unit area of snow (m) aicen_init(nx_block,ny_block,ncat,max_blocks) , & ! initial ice concentration, for linear ITD vicen_init(nx_block,ny_block,ncat,max_blocks) , & ! initial ice volume (m), for linear ITD - vsnon_init(nx_block,ny_block,ncat,max_blocks) , & ! initial snow volume (m), for aerosol + vsnon_init(nx_block,ny_block,ncat,max_blocks) , & ! initial snow volume (m), for aerosol trcr (nx_block,ny_block,ntrcr,max_blocks) , & ! ice tracers: 1: surface temperature of ice/snow (C) trcrn (nx_block,ny_block,ntrcr,ncat,max_blocks) , & ! tracers: 1: surface temperature of ice/snow (C) stat=ierr) @@ -181,7 +181,7 @@ subroutine alloc_state trcr_base(ntrcr,3) , & ! = 0 or 1 depending on tracer dependency, (1) aice, (2) vice, (3) vsno stat=ierr) if (ierr/=0) call abort_ice('(alloc_state): Out of memory2') - + trcr_depend = 0 n_trcr_strata = 0 nt_strata = 0 diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 3f9b9abeb..b6f8741c0 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -128,7 +128,7 @@ subroutine prep_radiation (iblk) alidr_init(:,:,iblk) = c0 alidf_init(:,:,iblk) = c0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -188,7 +188,10 @@ subroutine step_therm1 (dt, iblk) hfreebd, hdraft, hridge, distrdg, hkeel, dkeel, lfloe, dfloe, & fswsfcn, fswintn, Sswabsn, Iswabsn, meltsliqn, meltsliq, & fswthrun, fswthrun_vdr, fswthrun_vdf, fswthrun_idr, fswthrun_idf - use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_blocks, only: block, get_block +#ifdef CICE_IN_NEMO + use ice_blocks, only: nx_block, ny_block +#endif use ice_calendar, only: yday use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero @@ -205,13 +208,16 @@ subroutine step_therm1 (dt, iblk) use ice_flux_bgc, only: dsnown, faero_atm, faero_ocn, fiso_atm, fiso_ocn, & Qa_iso, Qref_iso, fiso_evap, HDO_ocn, H2_16O_ocn, H2_18O_ocn use ice_grid, only: lmask_n, lmask_s, tmask - use ice_state, only: aice, aicen, aice_init, aicen_init, vicen_init, & + use ice_state, only: aice, aicen, aicen_init, vicen_init, & vice, vicen, vsno, vsnon, trcrn, uvel, vvel, vsnon_init +#ifdef CICE_IN_NEMO + use ice_state, only: aice_init +#endif #ifdef CESMCOUPLED use ice_prescribed_mod, only: prescribed_ice #else - logical (kind=log_kind) :: & + logical (kind=log_kind) :: & prescribed_ice ! if .true., use prescribed ice instead of computed #endif real (kind=dbl_kind), intent(in) :: & @@ -222,7 +228,7 @@ subroutine step_therm1 (dt, iblk) ! local variables #ifdef CICE_IN_NEMO - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & raice ! reciprocal of ice concentration #endif integer (kind=int_kind) :: & @@ -321,12 +327,12 @@ subroutine step_therm1 (dt, iblk) enddo ! j #endif - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi @@ -388,16 +394,16 @@ subroutine step_therm1 (dt, iblk) uvel = uvel_center , & vvel = vvel_center , & Tsfc = trcrn (i,j,nt_Tsfc,:,iblk), & - zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & - zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & - zSin = trcrn (i,j,nt_sice:nt_sice+nilyr-1,:,iblk), & - alvl = trcrn (i,j,nt_alvl,:,iblk), & - vlvl = trcrn (i,j,nt_vlvl,:,iblk), & - apnd = trcrn (i,j,nt_apnd,:,iblk), & - hpnd = trcrn (i,j,nt_hpnd,:,iblk), & - ipnd = trcrn (i,j,nt_ipnd,:,iblk), & + zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & + zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & + zSin = trcrn (i,j,nt_sice:nt_sice+nilyr-1,:,iblk), & + alvl = trcrn (i,j,nt_alvl,:,iblk), & + vlvl = trcrn (i,j,nt_vlvl,:,iblk), & + apnd = trcrn (i,j,nt_apnd,:,iblk), & + hpnd = trcrn (i,j,nt_hpnd,:,iblk), & + ipnd = trcrn (i,j,nt_ipnd,:,iblk), & iage = trcrn (i,j,nt_iage,:,iblk), & - FY = trcrn (i,j,nt_FY ,:,iblk), & + FY = trcrn (i,j,nt_FY ,:,iblk), & rsnwn = rsnwn (:,:), & smicen = smicen (:,:), & smliqn = smliqn (:,:), & @@ -601,7 +607,7 @@ subroutine step_therm2 (dt, iblk) use ice_blocks, only: block, get_block use ice_calendar, only: yday use ice_domain, only: blocks_ice - use ice_domain_size, only: ncat, nilyr, nslyr, n_aero, nblyr, nfsd + use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, & meltl, frazil_diag @@ -651,7 +657,7 @@ subroutine step_therm2 (dt, iblk) nltrcr = 0 endif - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -668,7 +674,7 @@ subroutine step_therm2 (dt, iblk) call icepack_step_therm2(dt=dt, ncat=ncat, & nltrcr=nltrcr, nilyr=nilyr, nslyr=nslyr, nblyr=nblyr, & - hin_max = hin_max (:), & + hin_max = hin_max (:), & aicen = aicen (i,j,:,iblk), & vicen = vicen (i,j,:,iblk), & vsnon = vsnon (i,j,:,iblk), & @@ -760,8 +766,8 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) real (kind=dbl_kind), intent(in), optional :: & offset ! d(age)/dt time offset = dt for thermo, 0 for dyn - integer (kind=int_kind) :: & - iblk, & ! block index + integer (kind=int_kind) :: & + iblk, & ! block index i,j, & ! horizontal indices ntrcr, & ! nt_iage ! @@ -795,9 +801,9 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) do i = 1, nx_block !----------------------------------------------------------------- - ! Aggregate the updated state variables (includes ghost cells). - !----------------------------------------------------------------- - + ! Aggregate the updated state variables (includes ghost cells). + !----------------------------------------------------------------- + ! if (tmask(i,j,iblk)) & call icepack_aggregate(ncat = ncat, & aicen = aicen(i,j,:,iblk), & @@ -856,7 +862,7 @@ end subroutine update_state subroutine step_dyn_wave (dt) - use ice_arrays_column, only: wave_spectrum, wave_sig_ht, & + use ice_arrays_column, only: wave_spectrum, & d_afsd_wave, floe_rad_l, floe_rad_c, wavefreq, dwavefreq use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice, nblocks @@ -876,9 +882,7 @@ subroutine step_dyn_wave (dt) integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain iblk, & ! block index - i, j, & ! horizontal indices - ntrcr, & ! - nbtrcr ! + i, j ! horizontal indices character (len=char_len) :: wave_spec_type @@ -1000,14 +1004,14 @@ subroutine step_dyn_ridge (dt, ndtd, iblk) integer (kind=int_kind), intent(in) :: & ndtd, & ! number of dynamics subcycles - iblk ! block index + iblk ! block index ! local variables type (block) :: & this_block ! block information for current block - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain i, j, & ! horizontal indices ntrcr, & ! @@ -1127,9 +1131,7 @@ subroutine step_snow (dt, iblk) integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain i, j, & ! horizontal indices - n, & ! category index - ns, & ! history streams index - ipoint ! index for print diagnostic + ns ! history streams index real (kind=dbl_kind) :: & puny @@ -1142,7 +1144,7 @@ subroutine step_snow (dt, iblk) type (block) :: & this_block ! block information for current block - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1182,7 +1184,7 @@ subroutine step_snow (dt, iblk) trcrn(i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & trcrn(i,j,nt_alvl,:,iblk), & trcrn(i,j,nt_vlvl,:,iblk), & - trcrn(i,j,nt_smice:nt_smice+nslyr-1,:,iblk), & + trcrn(i,j,nt_smice:nt_smice+nslyr-1,:,iblk), & trcrn(i,j,nt_smliq:nt_smliq+nslyr-1,:,iblk), & trcrn(i,j,nt_rsnw:nt_rsnw+nslyr-1,:,iblk), & trcrn(i,j,nt_rhos:nt_rhos+nslyr-1,:,iblk), & @@ -1297,7 +1299,7 @@ subroutine step_radiation (dt, iblk) allocate(ztrcr_sw(nbtrcr_sw,ncat)) allocate(rsnow(nslyr,ncat)) - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1377,7 +1379,7 @@ subroutine step_radiation (dt, iblk) dhsn =dhsn (i,j,: ,iblk), ffracn =ffracn(i,j,:,iblk), & rsnow =rsnow (:,:), l_print_point=l_print_point) endif - + if (dEdd_algae .and. (tr_zaero .or. tr_bgc_N)) then do n = 1, ncat do k = 1, nbtrcr_sw @@ -1495,24 +1497,24 @@ subroutine ocean_mixed_layer (dt, iblk) j = indxj(ij) call icepack_atm_boundary(sfctype = 'ocn', & - Tsf = sst (i,j,iblk), & + Tsf = sst (i,j,iblk), & potT = potT (i,j,iblk), & uatm = uatmT (i,j,iblk), & vatm = vatmT (i,j,iblk), & - wind = wind (i,j,iblk), & - zlvl = zlvl (i,j,iblk), & - Qa = Qa (i,j,iblk), & + wind = wind (i,j,iblk), & + zlvl = zlvl (i,j,iblk), & + Qa = Qa (i,j,iblk), & rhoa = rhoa (i,j,iblk), & - strx = strairx_ocn(i,j,iblk), & - stry = strairy_ocn(i,j,iblk), & - Tref = Tref_ocn (i,j,iblk), & - Qref = Qref_ocn (i,j,iblk), & - delt = delt (i,j), & + strx = strairx_ocn(i,j,iblk), & + stry = strairy_ocn(i,j,iblk), & + Tref = Tref_ocn (i,j,iblk), & + Qref = Qref_ocn (i,j,iblk), & + delt = delt (i,j), & delq = delq (i,j), & lhcoef = lhcoef (i,j), & shcoef = shcoef (i,j), & - Cdn_atm = Cdn_atm (i,j,iblk), & - Cdn_atm_ratio_n = Cdn_atm_ratio(i,j,iblk)) + Cdn_atm = Cdn_atm (i,j,iblk), & + Cdn_atm_ratio_n = Cdn_atm_ratio(i,j,iblk)) enddo ! ij call icepack_warnings_flush(nu_diag) @@ -1576,10 +1578,10 @@ subroutine biogeochemistry (dt, iblk) n_doc, n_dic, n_don, n_fed, n_fep use ice_flux, only: meltbn, melttn, congeln, snoicen, & sst, sss, fsnow, meltsn - use ice_flux_bgc, only: hin_old, flux_bio, flux_bio_atm, faero_atm, & + use ice_flux_bgc, only: hin_old, flux_bio, flux_bio_atm, faero_atm, & nit, amm, sil, dmsp, dms, algalN, doc, don, dic, fed, fep, zaeros, hum use ice_state, only: aicen_init, vicen_init, aicen, vicen, vsnon, & - trcrn, vsnon_init, aice0 + trcrn, vsnon_init, aice0 use ice_timers, only: timer_bgc, ice_timer_start, ice_timer_stop real (kind=dbl_kind), intent(in) :: & @@ -1626,7 +1628,7 @@ subroutine biogeochemistry (dt, iblk) call ice_timer_start(timer_bgc,iblk) ! biogeochemistry - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1634,7 +1636,7 @@ subroutine biogeochemistry (dt, iblk) ! Define ocean concentrations for tracers used in simulation do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi call icepack_load_ocean_bio_array(max_nbtrcr = icepack_max_nbtrcr, & max_algae = icepack_max_algae, max_don = icepack_max_don, & @@ -1650,8 +1652,8 @@ subroutine biogeochemistry (dt, iblk) ocean_bio_all = ocean_bio_all(i,j,:,iblk)) do mm = 1,nbtrcr - ocean_bio(i,j,mm,iblk) = ocean_bio_all(i,j,bio_index_o(mm),iblk) - enddo ! mm + ocean_bio(i,j,mm,iblk) = ocean_bio_all(i,j,bio_index_o(mm),iblk) + enddo ! mm if (tr_zaero) then do mm = 1, n_zaero ! update aerosols flux_bio_atm(i,j,nlt_zaero(mm),iblk) = faero_atm(i,j,mm,iblk) @@ -1686,13 +1688,13 @@ subroutine biogeochemistry (dt, iblk) snow_bio_net = snow_bio_net(i,j,1:nbtrcr, iblk), & fswthrun = fswthrun (i,j,:, iblk), & sice_rho = sice_rho (i,j,:, iblk), & - fzsal = fzsal (i,j, iblk), & + fzsal = fzsal (i,j, iblk), & fzsal_g = fzsal_g (i,j, iblk), & meltbn = meltbn (i,j,:, iblk), & melttn = melttn (i,j,:, iblk), & congeln = congeln (i,j,:, iblk), & - snoicen = snoicen (i,j,:, iblk), & - sst = sst (i,j, iblk), & + snoicen = snoicen (i,j,:, iblk), & + sst = sst (i,j, iblk), & sss = sss (i,j, iblk), & fsnow = fsnow (i,j, iblk), & meltsn = meltsn (i,j,:, iblk), & diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 index 76a7659a6..2b64f8932 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 @@ -7,8 +7,8 @@ module ice_boundary ! ! 2007-07-19: Phil Jones, Yoshi Yoshida, John Dennis ! new naming conventions, optimizations during -! initialization, true multi-dimensional updates -! (rather than serial call to two-dimensional updates), +! initialization, true multi-dimensional updates +! (rather than serial call to two-dimensional updates), ! fixes for non-existent blocks ! 2008-01-28: Elizabeth Hunke replaced old routines with new POP ! infrastructure @@ -98,7 +98,7 @@ module ice_boundary !----------------------------------------------------------------------- ! ! to prevent frequent allocate-deallocate for 2d halo updates, create -! a static 2d buffer to be allocated once at creation. if future +! a static 2d buffer to be allocated once at creation. if future ! creation needs larger buffer, resize during the creation. ! !----------------------------------------------------------------------- @@ -177,9 +177,9 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & seBlock, swBlock, &! block id southeast, southwest nbrs srcProc, dstProc, &! source, dest processor locations srcLocalID, dstLocalID, &! local block index of src,dst blocks - maxTmp, &! temp for global maxval - blockSizeX, &! size of default physical domain in X - blockSizeY, &! size of default physical domain in Y + maxTmp, &! temp for global maxval + blockSizeX, &! size of default physical domain in X + blockSizeY, &! size of default physical domain in Y maxSizeSend, maxSizeRecv, &! max buffer sizes numMsgSend, numMsgRecv, &! number of messages for this halo eastMsgSize, westMsgSize, &! nominal sizes for e-w msgs @@ -305,7 +305,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & srcProc, dstProc, msgSize) !*** if a tripole boundary block, also create a local - !*** message into and out of tripole buffer + !*** message into and out of tripole buffer if (tripoleBlock) then !*** copy out of tripole buffer - includes halo @@ -316,7 +316,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** copy in only required if dstProc not same as srcProc if (dstProc /= srcProc) then call ice_HaloIncrementMsgCount(sendCount, recvCount, & - srcProc, srcProc, & + srcProc, srcProc, & msgSize) endif endif @@ -393,7 +393,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ewBoundaryType, nsBoundaryType) if (neBlock > 0) then - msgSize = cornerMsgSize ! normal corner message + msgSize = cornerMsgSize ! normal corner message call ice_distributionGetBlockLoc(dist, neBlock, dstProc, & dstLocalID) @@ -557,7 +557,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !----------------------------------------------------------------------- ! -! if messages are received from the same processor, the message is +! if messages are received from the same processor, the message is ! actually a local copy - count them and reset to zero ! !----------------------------------------------------------------------- @@ -585,7 +585,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ! check to see if they need to be re-sized ! !----------------------------------------------------------------------- - + maxTmp = maxval(sendCount) maxSizeSend = global_maxval(maxTmp, dist) maxTmp = maxval(recvCount) @@ -733,7 +733,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & 'north') !*** if a tripole boundary block, also create a local - !*** message into and out of tripole buffer + !*** message into and out of tripole buffer if (tripoleBlock) then !*** copy out of tripole buffer - includes halo @@ -1102,7 +1102,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) elseif (mask(icel,jcel,abs(nblock)) /= 0) then tmpflag = .true. endif - + if (tmpflag) then scnt = scnt + 1 if (scnt == 1) then @@ -1117,7 +1117,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) endif enddo enddo - halo%numMsgSend = numMsgSend + halo%numMsgSend = numMsgSend numMsgRecv = 0 do nmsg=1,basehalo%numMsgRecv @@ -1134,7 +1134,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) elseif (mask(icel,jcel,abs(nblock)) /= 0) then tmpflag = .true. endif - + if (tmpflag) then scnt = scnt + 1 if (scnt == 1) then @@ -1149,7 +1149,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) endif enddo enddo - halo%numMsgRecv = numMsgRecv + halo%numMsgRecv = numMsgRecv !----------------------------------------------------------------------- @@ -1312,7 +1312,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -1339,7 +1339,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1409,7 +1409,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows -! (T-fold) of physical domain for entire (global) top row +! (T-fold) of physical domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -1430,13 +1430,13 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1445,20 +1445,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1467,12 +1467,12 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1481,18 +1481,18 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1501,20 +1501,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1523,7 +1523,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1730,7 +1730,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -1752,7 +1752,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1828,13 +1828,13 @@ subroutine ice_HaloUpdate2DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1843,20 +1843,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1865,32 +1865,32 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select + end select else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1899,20 +1899,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1921,7 +1921,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2128,7 +2128,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -2150,7 +2150,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2226,13 +2226,13 @@ subroutine ice_HaloUpdate2DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2241,20 +2241,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2263,32 +2263,32 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - else ! tripole u-fold - + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2297,20 +2297,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2319,7 +2319,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2451,7 +2451,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & real (dbl_kind), dimension(:,:,:), allocatable :: & bufTripole ! 3d tripole buffer - integer (int_kind) :: len ! length of message + integer (int_kind) :: len ! length of message character(len=*), parameter :: subname = '(ice_HaloUpdate3DR8)' @@ -2554,7 +2554,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -2576,7 +2576,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2665,10 +2665,10 @@ subroutine ice_HaloUpdate3DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2682,20 +2682,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2706,32 +2706,32 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - + end select + else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -2742,20 +2742,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2766,7 +2766,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2908,7 +2908,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & real (real_kind), dimension(:,:,:), allocatable :: & bufTripole ! 3d tripole buffer - integer (int_kind) :: len ! length of message + integer (int_kind) :: len ! length of message character(len=*), parameter :: subname = '(ice_HaloUpdate3DR4)' @@ -3011,7 +3011,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -3033,7 +3033,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3122,10 +3122,10 @@ subroutine ice_HaloUpdate3DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3139,20 +3139,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3163,32 +3163,32 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold - + end select + + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -3199,20 +3199,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3223,7 +3223,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -3468,7 +3468,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -3490,7 +3490,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3563,7 +3563,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -3579,10 +3579,10 @@ subroutine ice_HaloUpdate3DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3596,20 +3596,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3620,32 +3620,32 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - - else ! tripole u-fold + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -3656,20 +3656,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3680,11 +3680,11 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -3929,7 +3929,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -3951,7 +3951,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -4034,7 +4034,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -4050,10 +4050,10 @@ subroutine ice_HaloUpdate4DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4069,17 +4069,17 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4095,32 +4095,32 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -4133,20 +4133,20 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -4159,11 +4159,11 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -4410,7 +4410,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -4432,7 +4432,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -4515,7 +4515,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -4531,10 +4531,10 @@ subroutine ice_HaloUpdate4DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4550,17 +4550,17 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4576,32 +4576,32 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -4614,20 +4614,20 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -4640,11 +4640,11 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -4891,7 +4891,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -4913,7 +4913,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -5012,10 +5012,10 @@ subroutine ice_HaloUpdate4DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -5031,17 +5031,17 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -5057,32 +5057,32 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -5095,20 +5095,20 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -5121,11 +5121,11 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -5354,7 +5354,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -5404,7 +5404,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top haloWidth+1 rows of physical -! domain for entire (global) top row +! domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -5432,12 +5432,12 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ioffset = 1 joffset = 1 - case (field_loc_Eface) + case (field_loc_Eface) ioffset = 1 joffset = 0 - case (field_loc_Nface) + case (field_loc_Nface) ioffset = 0 joffset = 1 @@ -5468,7 +5468,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & iSrc = iSrc - ioffset jSrc = jSrc - joffset if (iSrc == 0) iSrc = nxGlobal - + !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped @@ -5537,7 +5537,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -5546,7 +5546,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the source, must send data +! if the current processor is the source, must send data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- @@ -5556,14 +5556,14 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the destination, must receive data +! if the current processor is the destination, must receive data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- if (dstProc == my_task + 1) then - if (srcProc > 0) then + if (srcProc > 0) then !*** the source block has ocean points !*** count as a receive from srcProc @@ -5631,7 +5631,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -5673,7 +5673,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & !----------------------------------------------------------------------- if (srcProc == my_task+1 .and. & - dstProc == my_task+1) then + dstProc == my_task+1) then !*** compute addresses based on direction @@ -5764,7 +5764,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & if (jeSrc - jbSrc + 1 < halo%tripoleRows) then call abort_ice(subname//'ERROR: not enough points in block for tripole') return - endif + endif do j=1,halo%tripoleRows do i=1,ieSrc-ibSrc+1 @@ -5784,7 +5784,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & else if (srcBlock < 0 .and. dstBlock > 0) then - !*** tripole grid - set up for copying out of + !*** tripole grid - set up for copying out of !*** tripole buffer into ghost cell domains !*** include e-w ghost cells @@ -5950,12 +5950,12 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & !----------------------------------------------------------------------- ! -! if dest block is local and source block does not exist, create a +! if dest block is local and source block does not exist, create a ! local copy to fill halo with a fill value ! !----------------------------------------------------------------------- - else if (srcProc == 0 .and. dstProc == my_task+1) then + else if (srcProc == 0 .and. dstProc == my_task+1) then msgIndx = halo%numLocalCopies @@ -6184,7 +6184,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & bufSize = halo%sizeSend(n) exit srchSend endif - end do srchSend + end do srchSend if (msgIndx == 0) then msgIndx = halo%numMsgSend + 1 @@ -6255,7 +6255,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & halo%sizeSend(msgIndx) = bufSize - else + else !*** tripole block - send top halo%tripoleRows rows of phys domain @@ -6318,7 +6318,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & halo%sizeSend(msgIndx) = bufSize - else + else !*** tripole block - send top halo%tripoleRows rows of phys domain @@ -6361,7 +6361,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & halo%sizeSend(msgIndx) = bufSize - else + else !*** tripole block - send top halo%tripoleRows rows of phys domain @@ -6447,7 +6447,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & bufSize = halo%sizeRecv(n) exit srchRecv endif - end do srchRecv + end do srchRecv if (msgIndx == 0) then msgIndx = halo%numMsgRecv + 1 @@ -6705,14 +6705,14 @@ end subroutine ice_HaloMsgCreate subroutine ice_HaloExtrapolate2DR8(ARRAY,dist,ew_bndy_type,ns_bndy_type) -! This subroutine extrapolates ARRAY values into the first row or column -! of ghost cells, and is intended for grid variables whose ghost cells -! would otherwise be set using the default boundary conditions (Dirichlet +! This subroutine extrapolates ARRAY values into the first row or column +! of ghost cells, and is intended for grid variables whose ghost cells +! would otherwise be set using the default boundary conditions (Dirichlet ! or Neumann). ! Note: This routine will need to be modified for nghost > 1. ! We assume padding occurs only on east and north edges. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface ice_HaloExtrapolate use ice_blocks, only: block, nblocks_x, nblocks_y, get_block diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 index 00f427144..fab0c9218 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 @@ -72,7 +72,7 @@ subroutine init_communicate(mpicom) if (present(mpicom)) then ice_comm = mpicom else - ice_comm = MPI_COMM_WORLD ! Global communicator + ice_comm = MPI_COMM_WORLD ! Global communicator endif call MPI_INITIALIZED(flag,ierr) diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 index 061fd63c5..eafb3228f 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 @@ -1,6 +1,6 @@ !======================================================================= ! -! Exit the model. +! Exit the model. ! authors William H. Lipscomb (LANL) ! Elizabeth C. Hunke (LANL) ! 2006 ECH: separated serial and mpi functionality diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 index 0a58769db..030deabca 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 @@ -55,13 +55,13 @@ module ice_gather_scatter module procedure gather_global_dbl, & gather_global_real, & gather_global_int - end interface + end interface interface scatter_global module procedure scatter_global_dbl, & scatter_global_real, & scatter_global_int - end interface + end interface !----------------------------------------------------------------------- ! @@ -80,7 +80,7 @@ subroutine gather_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) ! This subroutine gathers a distributed array to a global-sized ! array on the processor dst_task. ! -! This is the specific inteface for double precision arrays +! This is the specific inteface for double precision arrays ! corresponding to the generic interface gather_global. It is shown ! to provide information on the generic interface (the generic ! interface is identical, but chooses a specific inteface based @@ -141,7 +141,7 @@ subroutine gather_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -308,7 +308,7 @@ subroutine gather_global_real(ARRAY_G, ARRAY, dst_task, src_dist) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -475,7 +475,7 @@ subroutine gather_global_int(ARRAY_G, ARRAY, dst_task, src_dist) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -597,7 +597,7 @@ subroutine gather_global_ext_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) real (dbl_kind), intent(in), optional :: & spc_val - + !----------------------------------------------------------------------- ! ! local variables @@ -643,7 +643,7 @@ subroutine gather_global_ext_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -907,7 +907,7 @@ subroutine gather_global_ext_int(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) integer (int_kind), intent(in), optional :: & spc_val - + !----------------------------------------------------------------------- ! ! local variables @@ -953,7 +953,7 @@ subroutine gather_global_ext_int(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -1217,7 +1217,7 @@ subroutine gather_global_ext_log(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) logical (log_kind), intent(in), optional :: & spc_val - + !----------------------------------------------------------------------- ! ! local variables @@ -1263,7 +1263,7 @@ subroutine gather_global_ext_log(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -1513,7 +1513,7 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & ! This subroutine scatters a global-sized array to a distributed array. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface scatter_global. integer (int_kind), intent(in) :: & @@ -1552,9 +1552,6 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -1628,7 +1625,7 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -1941,9 +1938,6 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -2017,7 +2011,7 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -2330,9 +2324,6 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -2406,7 +2397,7 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -2666,7 +2657,7 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) ! This subroutine scatters a global-sized array to a distributed array. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface scatter_global. integer (int_kind), intent(in) :: & @@ -2698,9 +2689,6 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -2722,7 +2710,7 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -3034,9 +3022,6 @@ subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -3058,17 +3043,17 @@ subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it if (this_block%tripoleTFlag) then - xoffset = 2 ! treat stresses as cell-centered scalars (they are not + xoffset = 2 ! treat stresses as cell-centered scalars (they are not yoffset = 0 ! shared with neighboring grid cells) else - xoffset = 1 ! treat stresses as cell-centered scalars (they are not + xoffset = 1 ! treat stresses as cell-centered scalars (they are not yoffset = 1 ! shared with neighboring grid cells) endif isign = 1 !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index 0728ac105..a5fed760b 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -533,7 +533,6 @@ function global_sum_scalar_dbl(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -603,7 +602,6 @@ function global_sum_scalar_real(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -714,7 +712,7 @@ end function global_sum_scalar_int function global_allreduce_sum_vector_dbl(vector, dist) & result(globalSums) -! Computes the global sums of sets of scalars (elements of 'vector') +! Computes the global sums of sets of scalars (elements of 'vector') ! distributed across a parallel machine. ! ! This is actually the specific interface for the generic global_allreduce_sum @@ -737,7 +735,6 @@ function global_allreduce_sum_vector_dbl(vector, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator, &! communicator for this distribution @@ -781,9 +778,9 @@ function global_sum_prod_dbl (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to double precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (dbl_kind), dimension(:,:,:), intent(in) :: & @@ -919,9 +916,9 @@ function global_sum_prod_real (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to single precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (real_kind), dimension(:,:,:), intent(in) :: & @@ -1057,9 +1054,9 @@ function global_sum_prod_int (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to integer arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. integer (int_kind), dimension(:,:,:), intent(in) :: & @@ -1198,7 +1195,7 @@ function global_maxval_dbl (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1305,7 +1302,7 @@ function global_maxval_real (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1412,7 +1409,7 @@ function global_maxval_int (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1520,7 +1517,7 @@ function global_maxval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1578,7 +1575,7 @@ function global_maxval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1636,7 +1633,7 @@ function global_maxval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1694,7 +1691,7 @@ function global_maxval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1743,7 +1740,7 @@ function global_minval_dbl (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1850,7 +1847,7 @@ function global_minval_real (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1957,7 +1954,7 @@ function global_minval_int (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -2065,7 +2062,7 @@ function global_minval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2123,7 +2120,7 @@ function global_minval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2181,7 +2178,7 @@ function global_minval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2239,7 +2236,7 @@ function global_minval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2299,7 +2296,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) ! reprosum = fixed point method based on ordered double integer sums. ! that requires two scalar reductions per global sum. ! This is extremely likely to be bfb. -! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, +! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, ! https://journals.sagepub.com/doi/10.1177/1094342011412630) ! ddpdd = parallel double-double algorithm using single scalar reduction. ! This is very likely to be bfb. diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 index 27f66f712..8c6f90363 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 @@ -3,34 +3,34 @@ MODULE ice_reprosum -!----------------------------------------------------------------------- -! -! Purpose: -!> Compute reproducible global sums of a set of arrays across an MPI +!----------------------------------------------------------------------- +! +! Purpose: +!> Compute reproducible global sums of a set of arrays across an MPI !> subcommunicator ! -! Methods: +! Methods: !> Compute using either or both a scalable, reproducible algorithm and a !> scalable, nonreproducible algorithm: -!> * Reproducible (scalable): +!> * Reproducible (scalable): !> Convert to fixed point (integer vector representation) to enable !> reproducibility when using MPI_Allreduce !> * Alternative usually reproducible (scalable): -!> Use parallel double-double algorithm due to Helen He and +!> Use parallel double-double algorithm due to Helen He and !> Chris Ding, based on David Bailey's/Don Knuth's DDPDD algorithm -!> * Nonreproducible (scalable): +!> * Nonreproducible (scalable): !> Floating point and MPI_Allreduce based. -!> If computing both reproducible and nonreproducible sums, compare +!> If computing both reproducible and nonreproducible sums, compare !> these and report relative difference (if absolute difference !> less than sum) or absolute difference back to calling routine. ! -! Author: P. Worley (based on suggestions from J. White for fixed -! point algorithm and on He/Ding paper for ddpdd +! Author: P. Worley (based on suggestions from J. White for fixed +! point algorithm and on He/Ding paper for ddpdd ! algorithm) ! ! Modified by T.Craig for CICE, March 2019 based on the public version in ! Oasis3-MCT_4.0. -! +! !----------------------------------------------------------------------- !----------------------------------------------------------------------- @@ -65,7 +65,7 @@ MODULE ice_reprosum public :: & ice_reprosum_setopts, &! set runtime options ice_reprosum_calc, &! calculate distributed sum - ice_reprosum_tolExceeded ! utility function to check relative + ice_reprosum_tolExceeded ! utility function to check relative ! differences against the tolerance !----------------------------------------------------------------------- @@ -87,14 +87,14 @@ MODULE ice_reprosum !----------------------------------------------------------------------- logical :: repro_sum_use_ddpdd = .false. - logical :: detailed_timing = .false. +! logical :: detailed_timing = .false. character(len=char_len_long) :: tmpstr CONTAINS !======================================================================== -!----------------------------------------------------------------------- -! Purpose: +!----------------------------------------------------------------------- +! Purpose: !> Set runtime options ! Author: P. Worley !----------------------------------------------------------------------- @@ -109,11 +109,11 @@ subroutine ice_reprosum_setopts(repro_sum_use_ddpdd_in, & logical, intent(in), optional :: repro_sum_use_ddpdd_in !< Use DDPDD algorithm instead of fixed precision algorithm real(r8), intent(in), optional :: repro_sum_rel_diff_max_in - !< maximum permissible difference between reproducible and + !< maximum permissible difference between reproducible and !< nonreproducible sums logical, intent(in), optional :: repro_sum_recompute_in - !< recompute using different algorithm when difference between - !< reproducible and nonreproducible sums is too great + !< recompute using different algorithm when difference between + !< reproducible and nonreproducible sums is too great logical, intent(in), optional :: repro_sum_master !< flag indicating whether this process should output !< log messages @@ -187,10 +187,10 @@ end subroutine ice_reprosum_setopts !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. An alternative is to use an "almost !> always reproducible" floating point algorithm. ! @@ -198,65 +198,65 @@ end subroutine ice_reprosum_setopts ! number of "levels" of integer expansion. The algorithm will calculate ! the number of levels that is required for the sum to be essentially ! exact. The optional parameter arr_max_levels can be used to override -! the calculated value. The optional parameter arr_max_levels_out can be +! the calculated value. The optional parameter arr_max_levels_out can be ! used to return the values used. ! -! The algorithm also requires an upper bound on -! the maximum summand (in absolute value) for each field, and will +! The algorithm also requires an upper bound on +! the maximum summand (in absolute value) for each field, and will ! calculate this internally. However, if the optional parameters ! arr_max_levels and arr_gbl_max are both set, then the algorithm will ! use the values in arr_gbl_max for the upper bounds instead. If these ! are not upper bounds, or if the upper bounds are not tight enough ! to achieve the requisite accuracy, and if the optional parameter ! repro_sum_validate is NOT set to .false., the algorithm will repeat the -! computation with appropriate upper bounds. If only arr_gbl_max is present, +! computation with appropriate upper bounds. If only arr_gbl_max is present, ! then the maxima are computed internally (and the specified values are -! ignored). The optional parameter arr_gbl_max_out can be +! ignored). The optional parameter arr_gbl_max_out can be ! used to return the values used. ! ! Finally, the algorithm requires an upper bound on the number of -! local summands across all processes. This will be calculated internally, -! using an MPI collective, but the value in the optional argument +! local summands across all processes. This will be calculated internally, +! using an MPI collective, but the value in the optional argument ! gbl_max_nsummands will be used instead if (1) it is present, (2) -! it is > 0, and (3) the maximum value and required number of levels -! are also specified. (If the maximum value is calculated, the same -! MPI collective is used to determine the maximum number of local -! summands.) The accuracy of the user-specified value is not checked. -! However, if set to < 1, the value will instead be calculated. If the -! optional parameter gbl_max_nsummands_out is present, then the value -! used (gbl_max_nsummands if >= 1; calculated otherwise) will be +! it is > 0, and (3) the maximum value and required number of levels +! are also specified. (If the maximum value is calculated, the same +! MPI collective is used to determine the maximum number of local +! summands.) The accuracy of the user-specified value is not checked. +! However, if set to < 1, the value will instead be calculated. If the +! optional parameter gbl_max_nsummands_out is present, then the value +! used (gbl_max_nsummands if >= 1; calculated otherwise) will be ! returned. ! ! If requested (by setting ice_reprosum_reldiffmax >= 0.0 and passing in -! the optional rel_diff parameter), results are compared with a -! nonreproducible floating point algorithm. +! the optional rel_diff parameter), results are compared with a +! nonreproducible floating point algorithm. ! -! Note that the cost of the algorithm is not strongly correlated with +! Note that the cost of the algorithm is not strongly correlated with ! the number of levels, which primarily shows up as a (modest) increase -! in cost of the MPI_Allreduce as a function of vector length. Rather the -! cost is more a function of (a) the number of integers required to +! in cost of the MPI_Allreduce as a function of vector length. Rather the +! cost is more a function of (a) the number of integers required to ! represent an individual summand and (b) the number of MPI_Allreduce -! calls. The number of integers required to represent an individual -! summand is 1 or 2 when using 8-byte integers for 8-byte real summands -! when the number of local summands is not too large. As the number of -! local summands increases, the number of integers required increases. +! calls. The number of integers required to represent an individual +! summand is 1 or 2 when using 8-byte integers for 8-byte real summands +! when the number of local summands is not too large. As the number of +! local summands increases, the number of integers required increases. ! The number of MPI_Allreduce calls is either 2 (specifying nothing) or -! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max -! correctly). When specifying arr_max_levels and arr_gbl_max +! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max +! correctly). When specifying arr_max_levels and arr_gbl_max ! incorrectly, 3 or 4 MPI_Allreduce calls will be required. ! ! The alternative algorithm is a minor modification of a parallel ! implementation of David Bailey's routine DDPDD by Helen He ! and Chris Ding. Bailey uses the Knuth trick to implement quadruple -! precision summation of double precision values with 10 double -! precision operations. The advantage of this algorithm is that +! precision summation of double precision values with 10 double +! precision operations. The advantage of this algorithm is that ! it requires a single MPI_Allreduce and is less expensive per summand -! than is the fixed precision algorithm. The disadvantage is that it -! is not guaranteed to be reproducible (though it is reproducible -! much more often than is the standard algorithm). This alternative -! is used when the optional parameter ddpdd_sum is set to .true. It is -! also used if the fixed precision algorithm radix assumption does not -! hold. +! than is the fixed precision algorithm. The disadvantage is that it +! is not guaranteed to be reproducible (though it is reproducible +! much more often than is the standard algorithm). This alternative +! is used when the optional parameter ddpdd_sum is set to .true. It is +! also used if the fixed precision algorithm radix assumption does not +! hold. !---------------------------------------------------------------------- subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & @@ -273,10 +273,10 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global means logical, intent(in), optional :: ddpdd_sum @@ -287,7 +287,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< upper bound on max(abs(arr)) real(r8), intent(out), optional :: arr_gbl_max_out(nflds) - !< calculated upper bound on + !< calculated upper bound on !< max(abs(arr)) integer, intent(in), optional :: arr_max_levels(nflds) @@ -306,13 +306,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< calculated maximum nsummands !< over all processes - integer, intent(in), optional :: gbl_count + integer, intent(in), optional :: gbl_count !< was total number of summands; - !< now is ignored; use + !< now is ignored; use !< gbl_max_nsummands instead logical, intent(in), optional :: repro_sum_validate - !< flag enabling/disabling testing that gmax and max_levels are + !< flag enabling/disabling testing that gmax and max_levels are !< accurate/sufficient. Default is enabled. integer, intent(inout), optional :: repro_sum_stats(5) @@ -325,21 +325,21 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8), intent(out), optional :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums - integer, intent(in), optional :: commid + integer, intent(in), optional :: commid !< MPI communicator ! Local workspace logical :: use_ddpdd_sum ! flag indicating whether to ! use ice_reprosum_ddpdd or not - logical :: recompute ! flag indicating need to - ! determine gmax/gmin before + logical :: recompute ! flag indicating need to + ! determine gmax/gmin before ! computing sum - logical :: validate ! flag indicating need to - ! verify gmax and max_levels + logical :: validate ! flag indicating need to + ! verify gmax and max_levels ! are accurate/sufficient integer :: omp_nthreads ! number of OpenMP threads integer :: mpi_comm ! MPI subcommunicator @@ -364,12 +364,12 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer :: arr_lextremes(0:nflds,2)! local exponent extrema integer :: arr_gextremes(0:nflds,2)! global exponent extrema - integer :: arr_gmax_exp(nflds) ! global exponents maxima + integer :: arr_gmax_exp(nflds) ! global exponents maxima integer :: arr_gmin_exp(nflds) ! global exponents minima - integer :: arr_max_shift ! maximum safe exponent for + integer :: arr_max_shift ! maximum safe exponent for ! value < 1 (so that sum does ! not overflow) - integer :: max_levels(nflds) ! maximum number of levels of + integer :: max_levels(nflds) ! maximum number of levels of ! integer expansion to use integer :: max_level ! maximum value in max_levels integer :: gbl_max_red ! global max local sum reduction? (0/1) @@ -380,16 +380,16 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8) :: xmax_nsummands ! dble of max_nsummands real(r8) :: arr_lsum(nflds) ! local sums - real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using - ! fast, nonreproducible, + real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using + ! fast, nonreproducible, ! floating point alg. - real(r8) :: abs_diff ! absolute difference between - ! fixed and floating point + real(r8) :: abs_diff ! absolute difference between + ! fixed and floating point ! sums #ifdef _OPENMP integer omp_get_max_threads external omp_get_max_threads -#endif +#endif character(len=*),parameter :: subname = '(ice_reprosum_calc)' !----------------------------------------------------------------------- @@ -483,7 +483,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & if (.not. recompute) then -! determine maximum number of summands in local phases of the +! determine maximum number of summands in local phases of the ! algorithm ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_max") if ( present(gbl_max_nsummands) ) then @@ -509,7 +509,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_max") -! determine maximum shift. Shift needs to be small enough that summation +! determine maximum shift. Shift needs to be small enough that summation ! does not exceed maximum number of digits in i8. ! if requested, return max_nsummands before it is redefined @@ -544,7 +544,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & repro_sum_fast = 1 if (recompute) then repro_sum_both = 1 - else + else ! if requested, return specified levels and upper bounds on maxima if ( present(arr_max_levels_out) ) then do ifld=1,nflds @@ -560,7 +560,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif endif -! do not have sufficient information; calculate global max/min and +! do not have sufficient information; calculate global max/min and ! use to compute required number of levels if (recompute) then @@ -622,13 +622,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & arr_gmin_exp(:) = arr_gextremes(1:nflds,2) ! if a field is identically zero, arr_gmin_exp still equals MAXEXPONENT -! and arr_gmax_exp still equals MINEXPONENT. In this case, set +! and arr_gmax_exp still equals MINEXPONENT. In this case, set ! arr_gmin_exp = arr_gmax_exp = MINEXPONENT do ifld=1,nflds arr_gmin_exp(ifld) = min(arr_gmax_exp(ifld),arr_gmin_exp(ifld)) enddo -! if requested, return upper bounds on observed maxima +! if requested, return upper bounds on observed maxima if ( present(arr_gbl_max_out) ) then do ifld=1,nflds arr_gbl_max_out(ifld) = scale(1.0_r8,arr_gmax_exp(ifld)) @@ -641,7 +641,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! determine maximum shift (same as in previous branch, but with calculated -! max_nsummands). Shift needs to be small enough that summation does not +! max_nsummands). Shift needs to be small enough that summation does not ! exceed maximum number of digits in i8. ! summing within each thread first @@ -763,14 +763,14 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & end subroutine ice_reprosum_calc !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. The accuracy of the fixed point algorithm !> is controlled by the number of "levels" of integer expansion, the -!> maximum value of which is specified by max_level. -! +!> maximum value of which is specified by max_level. +! !---------------------------------------------------------------------- subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & @@ -785,29 +785,29 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - integer, intent(in) :: arr_max_shift !< maximum safe exponent for - !< value < 1 (so that sum + integer, intent(in) :: arr_max_shift !< maximum safe exponent for + !< value < 1 (so that sum !< does not overflow) - integer, intent(in) :: arr_gmax_exp(nflds) + integer, intent(in) :: arr_gmax_exp(nflds) !< exponents of global maxima - integer, intent(in) :: max_levels(nflds) - !< maximum number of levels + integer, intent(in) :: max_levels(nflds) + !< maximum number of levels !< of integer expansion - integer, intent(in) :: max_level !< maximum value in + integer, intent(in) :: max_level !< maximum value in !< max_levels integer, intent(in) :: omp_nthreads !< number of OpenMP threads integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array logical, intent(in):: validate !< flag indicating that accuracy of solution generated from !< arr_gmax_exp and max_levels should be tested - logical, intent(out):: recompute + logical, intent(out):: recompute !< flag indicating that either the upper bounds are inaccurate, - !< or max_levels and arr_gmax_exp do not generate accurate + !< or max_levels and arr_gmax_exp do not generate accurate !< enough sums real(r8), intent(out):: arr_gsum(nflds) !< global means @@ -817,27 +817,27 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, parameter :: max_jlevel = & 1 + (digits(0_i8)/digits(0.0_r8)) - integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) - ! integer vector representing local + integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) + ! integer vector representing local ! sum (per thread, per field) - integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) - ! integer vector representing local + integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) + ! integer vector representing local ! sum integer(i8) :: i8_arr_level ! integer part of summand for current ! expansion level - integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) + integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) ! integer vector representing global ! sum - integer(i8) :: IX_8 ! integer representation of current - ! jlevels of X_8 ('part' of + integer(i8) :: IX_8 ! integer representation of current + ! jlevels of X_8 ('part' of ! i8_arr_gsum_level) integer(i8) :: i8_sign ! sign global sum integer(i8) :: i8_radix ! radix for i8 variables - integer :: max_error(nflds,omp_nthreads) + integer :: max_error(nflds,omp_nthreads) ! accurate upper bound on data? - integer :: not_exact(nflds,omp_nthreads) - ! max_levels sufficient to + integer :: not_exact(nflds,omp_nthreads) + ! max_levels sufficient to ! capture all digits? integer :: isum_beg(omp_nthreads), isum_end(omp_nthreads) ! range of summand indices for each @@ -845,16 +845,16 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer :: ifld, isum, ithread ! loop variables integer :: arr_exp ! exponent of summand - integer :: arr_shift ! exponent used to generate integer + integer :: arr_shift ! exponent used to generate integer ! for current expansion level integer :: ilevel ! current integer expansion level - integer :: offset(nflds) ! beginning location in - ! i8_arr_{g,l}sum_level for integer + integer :: offset(nflds) ! beginning location in + ! i8_arr_{g,l}sum_level for integer ! expansion of current ifld - integer :: voffset ! modification to offset used to - ! include validation metrics + integer :: voffset ! modification to offset used to + ! include validation metrics integer :: ioffset ! offset(ifld) - integer :: jlevel ! number of floating point 'pieces' + integer :: jlevel ! number of floating point 'pieces' ! extracted from a given i8 integer integer :: ierr ! MPI error return integer :: LX(max_jlevel) ! exponent of X_8 (see below) @@ -867,13 +867,13 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! reconstruction from integer vector real(r8) :: arr_frac ! fraction of summand - real(r8) :: arr_remainder ! part of summand remaining after + real(r8) :: arr_remainder ! part of summand remaining after ! current level of integer expansion - real(r8) :: X_8(max_jlevel) ! r8 vector representation of current + real(r8) :: X_8(max_jlevel) ! r8 vector representation of current ! i8_arr_gsum_level - real(r8) :: RX_8 ! r8 representation of difference + real(r8) :: RX_8 ! r8 representation of difference ! between current i8_arr_gsum_level - ! and current jlevels of X_8 + ! and current jlevels of X_8 ! (== IX_8). Also used in final ! scaling step @@ -940,7 +940,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! calculate first shift arr_shift = arr_max_shift - (arr_gmax_exp(ifld)-arr_exp) -! determine first (probably) nonzero level (assuming initial fraction is +! determine first (probably) nonzero level (assuming initial fraction is ! 'normal' - algorithm still works if this is not true) ! NOTE: this is critical; scale will set to zero if min exponent is too small. if (arr_shift < 1) then @@ -956,7 +956,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & endif if (ilevel .le. max_levels(ifld)) then -! apply first shift/truncate, add it to the relevant running +! apply first shift/truncate, add it to the relevant running ! sum, and calculate the remainder. arr_remainder = scale(arr_frac,arr_shift) i8_arr_level = int(arr_remainder,i8) @@ -964,7 +964,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_tlsum_level(ilevel,ifld,ithread) + i8_arr_level arr_remainder = arr_remainder - i8_arr_level -! while the remainder is non-zero, continue to shift, truncate, +! while the remainder is non-zero, continue to shift, truncate, ! sum, and calculate new remainder do while ((arr_remainder .ne. 0.0_r8) & .and. (ilevel < max_levels(ifld))) @@ -986,9 +986,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! postprocess integer vector to eliminate potential for overlap in the following -! sums over threads and processes: if value larger than or equal to -! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in -! vector, resulting in nonoverlapping ranges for each component. Note that +! sums over threads and processes: if value larger than or equal to +! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in +! vector, resulting in nonoverlapping ranges for each component. Note that ! "ilevel-1==0" corresponds to an extra level used to guarantee that the sums ! over threads and processes do not overflow for ilevel==1. do ilevel=max_levels(ifld),1,-1 @@ -1035,12 +1035,12 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & #if defined (NO_I8) ! Workaround for when i8 is not supported. ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i4") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i4") #else ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i8") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER8, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i8") #endif @@ -1055,10 +1055,10 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! alternate. To avoid this, do some arithmetic with integer vectors so that all ! components have the same sign. This should keep relative difference between ! using different integer sizes (e.g. i8 and i4) to machine epsilon -! 3) assignment to X_8 will usually lose accuracy since maximum integer -! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands -! correction not very large). Calculate remainder and add in first (since -! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). +! 3) assignment to X_8 will usually lose accuracy since maximum integer +! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands +! correction not very large). Calculate remainder and add in first (since +! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). ! For r4 (24 digits) may need to correct twice. Code is written in a general ! fashion, to work no matter how many corrections are necessary (assuming ! max_jlevel parameter calculation is correct). @@ -1079,7 +1079,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (.not. recompute) then ! preprocess integer vector: -! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' +! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' ! to next larger integer in vector, resulting in nonoverlapping ranges for each ! component. Note that have "ilevel-1=0" level here as described above. do ilevel=max_levels(ifld),1,-1 @@ -1093,9 +1093,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & - IX_8 endif enddo -! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary -! so that all vector components have the same sign (eliminating loss -! of accuracy arising from difference of large values when +! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary +! so that all vector components have the same sign (eliminating loss +! of accuracy arising from difference of large values when ! reconstructing r8 sum from integer vector) ilevel = 0 do while ((i8_arr_gsum_level(ioffset+ilevel) .eq. 0_i8) & @@ -1117,7 +1117,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_gsum_level(ioffset+jlevel+1) = i8_arr_gsum_level(ioffset+jlevel+1) & + i8_sign*(i8_radix**arr_max_shift) endif - enddo + enddo endif ! start with maximum shift, and work up to larger values @@ -1130,7 +1130,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (i8_arr_gsum_level(ioffset+ilevel) .ne. 0_i8) then jlevel = 1 -! r8 representation of higher order bits in integer +! r8 representation of higher order bits in integer X_8(jlevel) = i8_arr_gsum_level(ioffset+ilevel) LX(jlevel) = exponent(X_8(jlevel)) @@ -1148,7 +1148,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! add in contributions, smaller to larger, rescaling for each -! addition to guarantee that exponent of working summand is always +! addition to guarantee that exponent of working summand is always ! larger than minexponent do while (jlevel > 0) if (first) then @@ -1172,7 +1172,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! apply final exponent correction, scaling first if exponent is too small ! to apply directly corr_exp = curr_exp + exponent(arr_gsum(ifld)) - if (corr_exp .ge. MINEXPONENT(1._r8)) then + if (corr_exp .ge. MINEXPONENT(1._r8)) then arr_gsum(ifld) = set_exponent(arr_gsum(ifld),corr_exp) else RX_8 = set_exponent(arr_gsum(ifld), & @@ -1180,9 +1180,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & arr_gsum(ifld) = scale(RX_8,MINEXPONENT(1._r8)) endif -! if validate is .true. and some precision lost, test whether 'too much' +! if validate is .true. and some precision lost, test whether 'too much' ! was lost, due to too loose an upper bound, too stringent a limit on number -! of levels of expansion, cancellation, .... Calculated by comparing lower +! of levels of expansion, cancellation, .... Calculated by comparing lower ! bound on number of sigificant digits with number of digits in 1.0_r8 . if (validate) then if (i8_arr_gsum_level(ioffset-voffset+2) .ne. 0_i8) then @@ -1216,11 +1216,11 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & end subroutine ice_reprosum_int !======================================================================== -! -! Purpose: +! +! Purpose: !> Test whether distributed sum exceeds tolerance and print out a !> warning message. -! +! !---------------------------------------------------------------------- logical function ice_reprosum_tolExceeded (name, nflds, master, & @@ -1233,11 +1233,11 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & integer, intent(in) :: nflds !< number of fields logical, intent(in) :: master !< process that will write !< warning messages? - integer, optional, intent(in) :: logunit!< unit warning messages + integer, optional, intent(in) :: logunit!< unit warning messages !< written to real(r8), intent(in) :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums ! Local workspace @@ -1301,12 +1301,12 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & end function ice_reprosum_tolExceeded !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on He and Ding's implementation of the double-double algorithm. -! +! !---------------------------------------------------------------------- subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & @@ -1318,11 +1318,11 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global sums @@ -1360,8 +1360,8 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & arr_lsum_dd(ifld) = (0.0_r8,0.0_r8) do isum=1,nsummands - - ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s + + ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s ! trick. t1 = arr(isum,ifld) + real(arr_lsum_dd(ifld)) e = t1 - arr(isum,ifld) @@ -1393,11 +1393,11 @@ end subroutine ice_reprosum_ddpdd subroutine DDPDD (dda, ddb, len, itype) !---------------------------------------------------------------------- -! -! Purpose: -! Modification of original codes written by David H. Bailey +! +! Purpose: +! Modification of original codes written by David H. Bailey ! This subroutine computes ddb(i) = dda(i)+ddb(i) -! +! !---------------------------------------------------------------------- ! Arguments @@ -1433,10 +1433,10 @@ end subroutine DDPDD subroutine split_indices(total,num_pieces,ibeg,iend) !---------------------------------------------------------------------- -! -! Purpose: +! +! Purpose: ! Split range into 'num_pieces' -! +! !---------------------------------------------------------------------- ! Arguments diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 index abec3758f..baab6f49b 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 @@ -223,7 +223,7 @@ end subroutine init_ice_timers subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) -! This routine initializes a timer with a given name and returns a +! This routine initializes a timer with a given name and returns a ! timer id. character (*), intent(in) :: & @@ -236,7 +236,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) ! threaded region) integer (int_kind), intent(out) :: & - timer_id ! timer number assigned to this timer + timer_id ! timer number assigned to this timer !----------------------------------------------------------------------- ! @@ -267,7 +267,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) all_timers(n)%name = name_choice all_timers(n)%in_use = .true. all_timers(n)%num_blocks = num_blocks - all_timers(n)%num_nodes = num_nodes + all_timers(n)%num_nodes = num_nodes allocate(all_timers(n)%block_started (num_blocks), & all_timers(n)%block_cycles1 (num_blocks), & @@ -285,7 +285,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) if (srch_error /= 0) & call abort_ice(subname//'ERROR: Exceeded maximum number of timers') - + !----------------------------------------------------------------------- @@ -326,7 +326,7 @@ subroutine ice_timer_clear(timer_id) all_timers(timer_id)%block_accum_time(:) = c0 else call abort_ice(subname//'ERROR: attempt to reset undefined timer') - + endif !----------------------------------------------------------------------- @@ -386,7 +386,7 @@ subroutine ice_timer_start(timer_id, block_id) !*** another thread. if already started, keep track !*** of number of start requests in order to match !*** start and stop requests - + !$OMP CRITICAL if (.not. all_timers(timer_id)%node_started) then @@ -419,18 +419,18 @@ subroutine ice_timer_start(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to start undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_start - + !*********************************************************************** subroutine ice_timer_stop(timer_id, block_id) -! This routine stops a given node timer if appropriate. If block +! This routine stops a given node timer if appropriate. If block ! information is available the appropriate block timer is also stopped. integer (int_kind), intent(in) :: & @@ -491,7 +491,7 @@ subroutine ice_timer_stop(timer_id, block_id) !*** stop node timer if number of requested stops !*** matches the number of starts (to avoid stopping !*** a node timer started by multiple threads) - + cycles1 = all_timers(timer_id)%node_cycles1 !$OMP CRITICAL @@ -530,13 +530,13 @@ subroutine ice_timer_stop(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to stop undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_stop - + !*********************************************************************** subroutine ice_timer_print(timer_id,stats) @@ -560,7 +560,7 @@ subroutine ice_timer_print(timer_id,stats) integer (int_kind) :: & n,icount, & ! dummy loop index and counter - nBlocks + nBlocks logical (log_kind) :: & lrestart_timer ! flag to restart timer if timer is running @@ -613,7 +613,7 @@ subroutine ice_timer_print(timer_id,stats) local_time = c0 endif max_time = global_maxval(local_time,distrb_info) - + if (my_task == master_task) then write (nu_diag,timer_format) timer_id, & trim(all_timers(timer_id)%name),max_time @@ -735,7 +735,7 @@ end subroutine ice_timer_print_all subroutine ice_timer_check(timer_id,block_id) ! This routine checks a given timer by stopping and restarting the -! timer. This is primarily used to periodically accumulate time in +! timer. This is primarily used to periodically accumulate time in ! the timer to prevent timer cycles from wrapping around max_cycles. integer (int_kind), intent(in) :: & diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 index eb8f5d948..cafe4dc05 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 @@ -3,12 +3,12 @@ module ice_boundary ! This module contains data types and routines for updating halo -! regions (ghost cells) +! regions (ghost cells) ! ! 2007-07-19: Phil Jones, Yoshi Yoshida, John Dennis ! new naming conventions, optimizations during -! initialization, true multi-dimensional updates -! (rather than serial call to two-dimensional updates), +! initialization, true multi-dimensional updates +! (rather than serial call to two-dimensional updates), ! fixes for non-existent blocks ! 2008-01-28: Elizabeth Hunke replaced old routines with new POP ! infrastructure @@ -140,8 +140,8 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & seBlock, swBlock, &! block id southeast, southwest nbrs srcProc, dstProc, &! source, dest processor locations srcLocalID, dstLocalID, &! local block index of src,dst blocks - blockSizeX, &! size of default physical domain in X - blockSizeY, &! size of default physical domain in Y + blockSizeX, &! size of default physical domain in X + blockSizeY, &! size of default physical domain in Y eastMsgSize, westMsgSize, &! nominal sizes for e-w msgs northMsgSize, southMsgSize, &! nominal sizes for n-s msgs tripoleRows, &! number of rows in tripole buffer @@ -258,7 +258,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & srcProc, dstProc, northMsgSize) !*** if a tripole boundary block, also create a local - !*** message into and out of tripole buffer + !*** message into and out of tripole buffer if (tripoleBlock) then !*** copy in @@ -268,7 +268,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** copy out of tripole buffer - includes halo call ice_HaloIncrementMsgCount(sendCount, recvCount, & - srcProc, srcProc, & + srcProc, srcProc, & (nghost+1)*nx_block) endif @@ -346,7 +346,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ewBoundaryType, nsBoundaryType) if (neBlock > 0) then - msgSize = cornerMsgSize ! normal corner message + msgSize = cornerMsgSize ! normal corner message call ice_distributionGetBlockLoc(dist, neBlock, dstProc, & dstLocalID) @@ -425,7 +425,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !----------------------------------------------------------------------- ! -! if messages are received from the same processor, the message is +! if messages are received from the same processor, the message is ! actually a local copy - count them and reset to zero ! !----------------------------------------------------------------------- @@ -526,7 +526,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** for tripole grids, send a north tripole message to !*** the west block to make sure enough information is !*** available for tripole manipulations - + if (tripoleBlock) then call ice_HaloMsgCreate(halo, dist, iblock, -westBlock, 'north') endif @@ -752,7 +752,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -793,7 +793,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows -! (T-fold) of physical domain for entire (global) top row +! (T-fold) of physical domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -814,13 +814,13 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -829,20 +829,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -851,12 +851,12 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -865,18 +865,18 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -885,20 +885,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -907,7 +907,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1053,7 +1053,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1106,13 +1106,13 @@ subroutine ice_HaloUpdate2DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1121,20 +1121,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1143,32 +1143,32 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select + end select else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1177,20 +1177,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1199,7 +1199,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1345,7 +1345,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1398,13 +1398,13 @@ subroutine ice_HaloUpdate2DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1413,20 +1413,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1435,32 +1435,32 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - else ! tripole u-fold - + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1469,20 +1469,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1491,7 +1491,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1617,7 +1617,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & nxGlobal = size(bufTripoleR8,dim=1) allocate(bufTripole(nxGlobal,halo%tripoleRows,nz)) bufTripole = fill - endif + endif !----------------------------------------------------------------------- ! @@ -1644,7 +1644,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1703,10 +1703,10 @@ subroutine ice_HaloUpdate3DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -1720,20 +1720,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -1744,32 +1744,32 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - + end select + else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -1780,20 +1780,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -1804,7 +1804,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1962,7 +1962,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2021,10 +2021,10 @@ subroutine ice_HaloUpdate3DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2038,20 +2038,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2062,32 +2062,32 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold - + end select + + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -2098,20 +2098,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2122,7 +2122,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2280,7 +2280,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2323,7 +2323,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -2339,10 +2339,10 @@ subroutine ice_HaloUpdate3DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2356,20 +2356,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2380,32 +2380,32 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - - else ! tripole u-fold + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -2416,20 +2416,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2440,11 +2440,11 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -2599,7 +2599,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2648,7 +2648,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -2664,10 +2664,10 @@ subroutine ice_HaloUpdate4DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2683,17 +2683,17 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2709,32 +2709,32 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -2747,20 +2747,20 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -2773,11 +2773,11 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -2934,7 +2934,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2983,7 +2983,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -2999,10 +2999,10 @@ subroutine ice_HaloUpdate4DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3018,17 +3018,17 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3044,32 +3044,32 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -3082,20 +3082,20 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -3108,11 +3108,11 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -3269,7 +3269,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3334,10 +3334,10 @@ subroutine ice_HaloUpdate4DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3353,17 +3353,17 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3379,32 +3379,32 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -3417,20 +3417,20 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -3443,11 +3443,11 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -3587,7 +3587,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3616,7 +3616,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top haloWidth+1 rows of physical -! domain for entire (global) top row +! domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -3644,12 +3644,12 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ioffset = 1 joffset = 1 - case (field_loc_Eface) + case (field_loc_Eface) ioffset = 1 joffset = 0 - case (field_loc_Nface) + case (field_loc_Nface) ioffset = 0 joffset = 1 @@ -3680,7 +3680,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & iSrc = iSrc - ioffset jSrc = jSrc - joffset if (iSrc == 0) iSrc = nxGlobal - + !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped @@ -3735,7 +3735,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -3744,7 +3744,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the source, must send data +! if the current processor is the source, must send data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- @@ -3754,14 +3754,14 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the destination, must receive data +! if the current processor is the destination, must receive data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- if (dstProc == my_task + 1) then - if (srcProc > 0) then + if (srcProc > 0) then !*** the source block has ocean points !*** count as a receive from srcProc @@ -3852,7 +3852,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -3894,7 +3894,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) !----------------------------------------------------------------------- if (srcProc == my_task+1 .and. & - dstProc == my_task+1) then + dstProc == my_task+1) then !*** compute addresses based on direction @@ -3989,7 +3989,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) if (jeSrc - jbSrc + 1 < halo%tripoleRows) then call abort_ice(subname//'ERROR: not enough points in block for tripole') return - endif + endif msgIndx = halo%numLocalCopies @@ -4013,7 +4013,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) else if (srcBlock < 0 .and. dstBlock > 0) then - !*** tripole grid - set up for copying out of + !*** tripole grid - set up for copying out of !*** tripole buffer into ghost cell domains !*** include e-w ghost cells @@ -4097,7 +4097,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) else - !*** tripole grid - copy entire top halo+1 + !*** tripole grid - copy entire top halo+1 !*** rows into global buffer at src location msgIndx = halo%numLocalCopies @@ -4151,7 +4151,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) else - !*** tripole grid - copy entire top halo+1 + !*** tripole grid - copy entire top halo+1 !*** rows into global buffer at src location msgIndx = halo%numLocalCopies @@ -4235,12 +4235,12 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) !----------------------------------------------------------------------- ! -! if dest block is local and source block does not exist, create a +! if dest block is local and source block does not exist, create a ! local copy to fill halo with a fill value ! !----------------------------------------------------------------------- - else if (srcProc == 0 .and. dstProc == my_task+1) then + else if (srcProc == 0 .and. dstProc == my_task+1) then !*** compute addresses based on direction @@ -4481,14 +4481,14 @@ end subroutine ice_HaloMsgCreate subroutine ice_HaloExtrapolate2DR8(ARRAY,dist,ew_bndy_type,ns_bndy_type) -! This subroutine extrapolates ARRAY values into the first row or column -! of ghost cells, and is intended for grid variables whose ghost cells -! would otherwise be set using the default boundary conditions (Dirichlet +! This subroutine extrapolates ARRAY values into the first row or column +! of ghost cells, and is intended for grid variables whose ghost cells +! would otherwise be set using the default boundary conditions (Dirichlet ! or Neumann). ! Note: This routine will need to be modified for nghost > 1. ! We assume padding occurs only on east and north edges. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface ice_HaloExtrapolate use ice_blocks, only: block, nblocks_x, nblocks_y, get_block diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 index 4b0bb1f9e..34cca2d03 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 @@ -346,7 +346,7 @@ subroutine gather_global_ext_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) real (dbl_kind), optional :: & spc_val - + real (dbl_kind), dimension(:,:), intent(inout) :: & ARRAY_G ! array containing global horizontal field on dst_task @@ -504,7 +504,7 @@ subroutine gather_global_ext_int(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) integer (int_kind), optional :: & spc_val - + integer (int_kind), dimension(:,:), intent(inout) :: & ARRAY_G ! array containing global horizontal field on dst_task @@ -662,7 +662,7 @@ subroutine gather_global_ext_log(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) logical (log_kind), optional :: & spc_val - + logical (log_kind), dimension(:,:), intent(inout) :: & ARRAY_G ! array containing global horizontal field on dst_task @@ -1581,7 +1581,7 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) !----------------------------------------------------------------------- ! -! initialize return array to zero +! initialize return array to zero ! !----------------------------------------------------------------------- @@ -1754,10 +1754,10 @@ subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it if (this_block%tripoleTFlag) then - xoffset = 2 ! treat stresses as cell-centered scalars (they are not + xoffset = 2 ! treat stresses as cell-centered scalars (they are not yoffset = 0 ! shared with neighboring grid cells) else - xoffset = 1 ! treat stresses as cell-centered scalars (they are not + xoffset = 1 ! treat stresses as cell-centered scalars (they are not yoffset = 1 ! shared with neighboring grid cells) endif isign = 1 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index a024698d5..e859ea2bd 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -534,7 +534,6 @@ function global_sum_scalar_dbl(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -604,7 +603,6 @@ function global_sum_scalar_real(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -715,7 +713,7 @@ end function global_sum_scalar_int function global_allreduce_sum_vector_dbl(vector, dist) & result(globalSums) -! Computes the global sums of sets of scalars (elements of 'vector') +! Computes the global sums of sets of scalars (elements of 'vector') ! distributed across a parallel machine. ! ! This is actually the specific interface for the generic global_allreduce_sum @@ -738,7 +736,6 @@ function global_allreduce_sum_vector_dbl(vector, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator, &! communicator for this distribution @@ -782,9 +779,9 @@ function global_sum_prod_dbl (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to double precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (dbl_kind), dimension(:,:,:), intent(in) :: & @@ -920,9 +917,9 @@ function global_sum_prod_real (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to single precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (real_kind), dimension(:,:,:), intent(in) :: & @@ -1058,9 +1055,9 @@ function global_sum_prod_int (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to integer arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. integer (int_kind), dimension(:,:,:), intent(in) :: & @@ -1199,7 +1196,7 @@ function global_maxval_dbl (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1306,7 +1303,7 @@ function global_maxval_real (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1413,7 +1410,7 @@ function global_maxval_int (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1521,7 +1518,7 @@ function global_maxval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1579,7 +1576,7 @@ function global_maxval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1637,7 +1634,7 @@ function global_maxval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1695,7 +1692,7 @@ function global_maxval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1744,7 +1741,7 @@ function global_minval_dbl (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1851,7 +1848,7 @@ function global_minval_real (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1958,7 +1955,7 @@ function global_minval_int (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -2066,7 +2063,7 @@ function global_minval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2124,7 +2121,7 @@ function global_minval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2182,7 +2179,7 @@ function global_minval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2240,7 +2237,7 @@ function global_minval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2300,7 +2297,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) ! reprosum = fixed point method based on ordered double integer sums. ! that requires two scalar reductions per global sum. ! This is extremely likely to be bfb. -! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, +! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, ! https://journals.sagepub.com/doi/10.1177/1094342011412630) ! ddpdd = parallel double-double algorithm using single scalar reduction. ! This is very likely to be bfb. diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 index 13ff6fcb8..2c584bd94 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 @@ -4,34 +4,34 @@ MODULE ice_reprosum -!----------------------------------------------------------------------- -! -! Purpose: -!> Compute reproducible global sums of a set of arrays across an MPI +!----------------------------------------------------------------------- +! +! Purpose: +!> Compute reproducible global sums of a set of arrays across an MPI !> subcommunicator ! -! Methods: +! Methods: !> Compute using either or both a scalable, reproducible algorithm and a !> scalable, nonreproducible algorithm: -!> * Reproducible (scalable): +!> * Reproducible (scalable): !> Convert to fixed point (integer vector representation) to enable !> reproducibility when using MPI_Allreduce !> * Alternative usually reproducible (scalable): -!> Use parallel double-double algorithm due to Helen He and +!> Use parallel double-double algorithm due to Helen He and !> Chris Ding, based on David Bailey's/Don Knuth's DDPDD algorithm -!> * Nonreproducible (scalable): +!> * Nonreproducible (scalable): !> Floating point and MPI_Allreduce based. -!> If computing both reproducible and nonreproducible sums, compare +!> If computing both reproducible and nonreproducible sums, compare !> these and report relative difference (if absolute difference !> less than sum) or absolute difference back to calling routine. ! -! Author: P. Worley (based on suggestions from J. White for fixed -! point algorithm and on He/Ding paper for ddpdd +! Author: P. Worley (based on suggestions from J. White for fixed +! point algorithm and on He/Ding paper for ddpdd ! algorithm) ! ! Modified by T.Craig for CICE, March 2019 based on the public version in ! Oasis3-MCT_4.0. -! +! !----------------------------------------------------------------------- !----------------------------------------------------------------------- @@ -66,7 +66,7 @@ MODULE ice_reprosum public :: & ice_reprosum_setopts, &! set runtime options ice_reprosum_calc, &! calculate distributed sum - ice_reprosum_tolExceeded ! utility function to check relative + ice_reprosum_tolExceeded ! utility function to check relative ! differences against the tolerance !----------------------------------------------------------------------- @@ -94,8 +94,8 @@ MODULE ice_reprosum CONTAINS !======================================================================== -!----------------------------------------------------------------------- -! Purpose: +!----------------------------------------------------------------------- +! Purpose: !> Set runtime options ! Author: P. Worley !----------------------------------------------------------------------- @@ -110,11 +110,11 @@ subroutine ice_reprosum_setopts(repro_sum_use_ddpdd_in, & logical, intent(in), optional :: repro_sum_use_ddpdd_in !< Use DDPDD algorithm instead of fixed precision algorithm real(r8), intent(in), optional :: repro_sum_rel_diff_max_in - !< maximum permissible difference between reproducible and + !< maximum permissible difference between reproducible and !< nonreproducible sums logical, intent(in), optional :: repro_sum_recompute_in - !< recompute using different algorithm when difference between - !< reproducible and nonreproducible sums is too great + !< recompute using different algorithm when difference between + !< reproducible and nonreproducible sums is too great logical, intent(in), optional :: repro_sum_master !< flag indicating whether this process should output !< log messages @@ -188,10 +188,10 @@ end subroutine ice_reprosum_setopts !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. An alternative is to use an "almost !> always reproducible" floating point algorithm. ! @@ -199,65 +199,65 @@ end subroutine ice_reprosum_setopts ! number of "levels" of integer expansion. The algorithm will calculate ! the number of levels that is required for the sum to be essentially ! exact. The optional parameter arr_max_levels can be used to override -! the calculated value. The optional parameter arr_max_levels_out can be +! the calculated value. The optional parameter arr_max_levels_out can be ! used to return the values used. ! -! The algorithm also requires an upper bound on -! the maximum summand (in absolute value) for each field, and will +! The algorithm also requires an upper bound on +! the maximum summand (in absolute value) for each field, and will ! calculate this internally. However, if the optional parameters ! arr_max_levels and arr_gbl_max are both set, then the algorithm will ! use the values in arr_gbl_max for the upper bounds instead. If these ! are not upper bounds, or if the upper bounds are not tight enough ! to achieve the requisite accuracy, and if the optional parameter ! repro_sum_validate is NOT set to .false., the algorithm will repeat the -! computation with appropriate upper bounds. If only arr_gbl_max is present, +! computation with appropriate upper bounds. If only arr_gbl_max is present, ! then the maxima are computed internally (and the specified values are -! ignored). The optional parameter arr_gbl_max_out can be +! ignored). The optional parameter arr_gbl_max_out can be ! used to return the values used. ! ! Finally, the algorithm requires an upper bound on the number of -! local summands across all processes. This will be calculated internally, -! using an MPI collective, but the value in the optional argument +! local summands across all processes. This will be calculated internally, +! using an MPI collective, but the value in the optional argument ! gbl_max_nsummands will be used instead if (1) it is present, (2) -! it is > 0, and (3) the maximum value and required number of levels -! are also specified. (If the maximum value is calculated, the same -! MPI collective is used to determine the maximum number of local -! summands.) The accuracy of the user-specified value is not checked. -! However, if set to < 1, the value will instead be calculated. If the -! optional parameter gbl_max_nsummands_out is present, then the value -! used (gbl_max_nsummands if >= 1; calculated otherwise) will be +! it is > 0, and (3) the maximum value and required number of levels +! are also specified. (If the maximum value is calculated, the same +! MPI collective is used to determine the maximum number of local +! summands.) The accuracy of the user-specified value is not checked. +! However, if set to < 1, the value will instead be calculated. If the +! optional parameter gbl_max_nsummands_out is present, then the value +! used (gbl_max_nsummands if >= 1; calculated otherwise) will be ! returned. ! ! If requested (by setting ice_reprosum_reldiffmax >= 0.0 and passing in -! the optional rel_diff parameter), results are compared with a -! nonreproducible floating point algorithm. +! the optional rel_diff parameter), results are compared with a +! nonreproducible floating point algorithm. ! -! Note that the cost of the algorithm is not strongly correlated with +! Note that the cost of the algorithm is not strongly correlated with ! the number of levels, which primarily shows up as a (modest) increase -! in cost of the MPI_Allreduce as a function of vector length. Rather the -! cost is more a function of (a) the number of integers required to +! in cost of the MPI_Allreduce as a function of vector length. Rather the +! cost is more a function of (a) the number of integers required to ! represent an individual summand and (b) the number of MPI_Allreduce -! calls. The number of integers required to represent an individual -! summand is 1 or 2 when using 8-byte integers for 8-byte real summands -! when the number of local summands is not too large. As the number of -! local summands increases, the number of integers required increases. +! calls. The number of integers required to represent an individual +! summand is 1 or 2 when using 8-byte integers for 8-byte real summands +! when the number of local summands is not too large. As the number of +! local summands increases, the number of integers required increases. ! The number of MPI_Allreduce calls is either 2 (specifying nothing) or -! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max -! correctly). When specifying arr_max_levels and arr_gbl_max +! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max +! correctly). When specifying arr_max_levels and arr_gbl_max ! incorrectly, 3 or 4 MPI_Allreduce calls will be required. ! ! The alternative algorithm is a minor modification of a parallel ! implementation of David Bailey's routine DDPDD by Helen He ! and Chris Ding. Bailey uses the Knuth trick to implement quadruple -! precision summation of double precision values with 10 double -! precision operations. The advantage of this algorithm is that +! precision summation of double precision values with 10 double +! precision operations. The advantage of this algorithm is that ! it requires a single MPI_Allreduce and is less expensive per summand -! than is the fixed precision algorithm. The disadvantage is that it -! is not guaranteed to be reproducible (though it is reproducible -! much more often than is the standard algorithm). This alternative -! is used when the optional parameter ddpdd_sum is set to .true. It is -! also used if the fixed precision algorithm radix assumption does not -! hold. +! than is the fixed precision algorithm. The disadvantage is that it +! is not guaranteed to be reproducible (though it is reproducible +! much more often than is the standard algorithm). This alternative +! is used when the optional parameter ddpdd_sum is set to .true. It is +! also used if the fixed precision algorithm radix assumption does not +! hold. !---------------------------------------------------------------------- subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & @@ -274,10 +274,10 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global means logical, intent(in), optional :: ddpdd_sum @@ -288,7 +288,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< upper bound on max(abs(arr)) real(r8), intent(out), optional :: arr_gbl_max_out(nflds) - !< calculated upper bound on + !< calculated upper bound on !< max(abs(arr)) integer, intent(in), optional :: arr_max_levels(nflds) @@ -307,13 +307,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< calculated maximum nsummands !< over all processes - integer, intent(in), optional :: gbl_count + integer, intent(in), optional :: gbl_count !< was total number of summands; - !< now is ignored; use + !< now is ignored; use !< gbl_max_nsummands instead logical, intent(in), optional :: repro_sum_validate - !< flag enabling/disabling testing that gmax and max_levels are + !< flag enabling/disabling testing that gmax and max_levels are !< accurate/sufficient. Default is enabled. integer, intent(inout), optional :: repro_sum_stats(5) @@ -326,21 +326,21 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8), intent(out), optional :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums - integer, intent(in), optional :: commid + integer, intent(in), optional :: commid !< MPI communicator ! Local workspace logical :: use_ddpdd_sum ! flag indicating whether to ! use ice_reprosum_ddpdd or not - logical :: recompute ! flag indicating need to - ! determine gmax/gmin before + logical :: recompute ! flag indicating need to + ! determine gmax/gmin before ! computing sum - logical :: validate ! flag indicating need to - ! verify gmax and max_levels + logical :: validate ! flag indicating need to + ! verify gmax and max_levels ! are accurate/sufficient integer :: omp_nthreads ! number of OpenMP threads integer :: mpi_comm ! MPI subcommunicator @@ -365,12 +365,12 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer :: arr_lextremes(0:nflds,2)! local exponent extrema integer :: arr_gextremes(0:nflds,2)! global exponent extrema - integer :: arr_gmax_exp(nflds) ! global exponents maxima + integer :: arr_gmax_exp(nflds) ! global exponents maxima integer :: arr_gmin_exp(nflds) ! global exponents minima - integer :: arr_max_shift ! maximum safe exponent for + integer :: arr_max_shift ! maximum safe exponent for ! value < 1 (so that sum does ! not overflow) - integer :: max_levels(nflds) ! maximum number of levels of + integer :: max_levels(nflds) ! maximum number of levels of ! integer expansion to use integer :: max_level ! maximum value in max_levels integer :: gbl_max_red ! global max local sum reduction? (0/1) @@ -381,16 +381,16 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8) :: xmax_nsummands ! dble of max_nsummands real(r8) :: arr_lsum(nflds) ! local sums - real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using - ! fast, nonreproducible, + real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using + ! fast, nonreproducible, ! floating point alg. - real(r8) :: abs_diff ! absolute difference between - ! fixed and floating point + real(r8) :: abs_diff ! absolute difference between + ! fixed and floating point ! sums #ifdef _OPENMP integer omp_get_max_threads external omp_get_max_threads -#endif +#endif character(len=*),parameter :: subname = '(ice_reprosum_calc)' !----------------------------------------------------------------------- @@ -484,7 +484,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & if (.not. recompute) then -! determine maximum number of summands in local phases of the +! determine maximum number of summands in local phases of the ! algorithm ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_max") if ( present(gbl_max_nsummands) ) then @@ -510,7 +510,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_max") -! determine maximum shift. Shift needs to be small enough that summation +! determine maximum shift. Shift needs to be small enough that summation ! does not exceed maximum number of digits in i8. ! if requested, return max_nsummands before it is redefined @@ -545,7 +545,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & repro_sum_fast = 1 if (recompute) then repro_sum_both = 1 - else + else ! if requested, return specified levels and upper bounds on maxima if ( present(arr_max_levels_out) ) then do ifld=1,nflds @@ -561,7 +561,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif endif -! do not have sufficient information; calculate global max/min and +! do not have sufficient information; calculate global max/min and ! use to compute required number of levels if (recompute) then @@ -623,13 +623,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & arr_gmin_exp(:) = arr_gextremes(1:nflds,2) ! if a field is identically zero, arr_gmin_exp still equals MAXEXPONENT -! and arr_gmax_exp still equals MINEXPONENT. In this case, set +! and arr_gmax_exp still equals MINEXPONENT. In this case, set ! arr_gmin_exp = arr_gmax_exp = MINEXPONENT do ifld=1,nflds arr_gmin_exp(ifld) = min(arr_gmax_exp(ifld),arr_gmin_exp(ifld)) enddo -! if requested, return upper bounds on observed maxima +! if requested, return upper bounds on observed maxima if ( present(arr_gbl_max_out) ) then do ifld=1,nflds arr_gbl_max_out(ifld) = scale(1.0_r8,arr_gmax_exp(ifld)) @@ -642,7 +642,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! determine maximum shift (same as in previous branch, but with calculated -! max_nsummands). Shift needs to be small enough that summation does not +! max_nsummands). Shift needs to be small enough that summation does not ! exceed maximum number of digits in i8. ! summing within each thread first @@ -764,14 +764,14 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & end subroutine ice_reprosum_calc !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. The accuracy of the fixed point algorithm !> is controlled by the number of "levels" of integer expansion, the -!> maximum value of which is specified by max_level. -! +!> maximum value of which is specified by max_level. +! !---------------------------------------------------------------------- subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & @@ -786,29 +786,29 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - integer, intent(in) :: arr_max_shift !< maximum safe exponent for - !< value < 1 (so that sum + integer, intent(in) :: arr_max_shift !< maximum safe exponent for + !< value < 1 (so that sum !< does not overflow) - integer, intent(in) :: arr_gmax_exp(nflds) + integer, intent(in) :: arr_gmax_exp(nflds) !< exponents of global maxima - integer, intent(in) :: max_levels(nflds) - !< maximum number of levels + integer, intent(in) :: max_levels(nflds) + !< maximum number of levels !< of integer expansion - integer, intent(in) :: max_level !< maximum value in + integer, intent(in) :: max_level !< maximum value in !< max_levels integer, intent(in) :: omp_nthreads !< number of OpenMP threads integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array logical, intent(in):: validate !< flag indicating that accuracy of solution generated from !< arr_gmax_exp and max_levels should be tested - logical, intent(out):: recompute + logical, intent(out):: recompute !< flag indicating that either the upper bounds are inaccurate, - !< or max_levels and arr_gmax_exp do not generate accurate + !< or max_levels and arr_gmax_exp do not generate accurate !< enough sums real(r8), intent(out):: arr_gsum(nflds) !< global means @@ -818,27 +818,27 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, parameter :: max_jlevel = & 1 + (digits(0_i8)/digits(0.0_r8)) - integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) - ! integer vector representing local + integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) + ! integer vector representing local ! sum (per thread, per field) - integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) - ! integer vector representing local + integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) + ! integer vector representing local ! sum integer(i8) :: i8_arr_level ! integer part of summand for current ! expansion level - integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) + integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) ! integer vector representing global ! sum - integer(i8) :: IX_8 ! integer representation of current - ! jlevels of X_8 ('part' of + integer(i8) :: IX_8 ! integer representation of current + ! jlevels of X_8 ('part' of ! i8_arr_gsum_level) integer(i8) :: i8_sign ! sign global sum integer(i8) :: i8_radix ! radix for i8 variables - integer :: max_error(nflds,omp_nthreads) + integer :: max_error(nflds,omp_nthreads) ! accurate upper bound on data? - integer :: not_exact(nflds,omp_nthreads) - ! max_levels sufficient to + integer :: not_exact(nflds,omp_nthreads) + ! max_levels sufficient to ! capture all digits? integer :: isum_beg(omp_nthreads), isum_end(omp_nthreads) ! range of summand indices for each @@ -846,16 +846,16 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer :: ifld, isum, ithread ! loop variables integer :: arr_exp ! exponent of summand - integer :: arr_shift ! exponent used to generate integer + integer :: arr_shift ! exponent used to generate integer ! for current expansion level integer :: ilevel ! current integer expansion level - integer :: offset(nflds) ! beginning location in - ! i8_arr_{g,l}sum_level for integer + integer :: offset(nflds) ! beginning location in + ! i8_arr_{g,l}sum_level for integer ! expansion of current ifld - integer :: voffset ! modification to offset used to - ! include validation metrics + integer :: voffset ! modification to offset used to + ! include validation metrics integer :: ioffset ! offset(ifld) - integer :: jlevel ! number of floating point 'pieces' + integer :: jlevel ! number of floating point 'pieces' ! extracted from a given i8 integer integer :: ierr ! MPI error return integer :: LX(max_jlevel) ! exponent of X_8 (see below) @@ -868,13 +868,13 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! reconstruction from integer vector real(r8) :: arr_frac ! fraction of summand - real(r8) :: arr_remainder ! part of summand remaining after + real(r8) :: arr_remainder ! part of summand remaining after ! current level of integer expansion - real(r8) :: X_8(max_jlevel) ! r8 vector representation of current + real(r8) :: X_8(max_jlevel) ! r8 vector representation of current ! i8_arr_gsum_level - real(r8) :: RX_8 ! r8 representation of difference + real(r8) :: RX_8 ! r8 representation of difference ! between current i8_arr_gsum_level - ! and current jlevels of X_8 + ! and current jlevels of X_8 ! (== IX_8). Also used in final ! scaling step @@ -941,7 +941,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! calculate first shift arr_shift = arr_max_shift - (arr_gmax_exp(ifld)-arr_exp) -! determine first (probably) nonzero level (assuming initial fraction is +! determine first (probably) nonzero level (assuming initial fraction is ! 'normal' - algorithm still works if this is not true) ! NOTE: this is critical; scale will set to zero if min exponent is too small. if (arr_shift < 1) then @@ -957,7 +957,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & endif if (ilevel .le. max_levels(ifld)) then -! apply first shift/truncate, add it to the relevant running +! apply first shift/truncate, add it to the relevant running ! sum, and calculate the remainder. arr_remainder = scale(arr_frac,arr_shift) i8_arr_level = int(arr_remainder,i8) @@ -965,7 +965,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_tlsum_level(ilevel,ifld,ithread) + i8_arr_level arr_remainder = arr_remainder - i8_arr_level -! while the remainder is non-zero, continue to shift, truncate, +! while the remainder is non-zero, continue to shift, truncate, ! sum, and calculate new remainder do while ((arr_remainder .ne. 0.0_r8) & .and. (ilevel < max_levels(ifld))) @@ -987,9 +987,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! postprocess integer vector to eliminate potential for overlap in the following -! sums over threads and processes: if value larger than or equal to -! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in -! vector, resulting in nonoverlapping ranges for each component. Note that +! sums over threads and processes: if value larger than or equal to +! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in +! vector, resulting in nonoverlapping ranges for each component. Note that ! "ilevel-1==0" corresponds to an extra level used to guarantee that the sums ! over threads and processes do not overflow for ilevel==1. do ilevel=max_levels(ifld),1,-1 @@ -1036,12 +1036,12 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & #if defined (NO_I8) ! Workaround for when i8 is not supported. ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i4") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i4") #else ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i8") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER8, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i8") #endif @@ -1056,10 +1056,10 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! alternate. To avoid this, do some arithmetic with integer vectors so that all ! components have the same sign. This should keep relative difference between ! using different integer sizes (e.g. i8 and i4) to machine epsilon -! 3) assignment to X_8 will usually lose accuracy since maximum integer -! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands -! correction not very large). Calculate remainder and add in first (since -! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). +! 3) assignment to X_8 will usually lose accuracy since maximum integer +! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands +! correction not very large). Calculate remainder and add in first (since +! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). ! For r4 (24 digits) may need to correct twice. Code is written in a general ! fashion, to work no matter how many corrections are necessary (assuming ! max_jlevel parameter calculation is correct). @@ -1080,7 +1080,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (.not. recompute) then ! preprocess integer vector: -! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' +! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' ! to next larger integer in vector, resulting in nonoverlapping ranges for each ! component. Note that have "ilevel-1=0" level here as described above. do ilevel=max_levels(ifld),1,-1 @@ -1094,9 +1094,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & - IX_8 endif enddo -! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary -! so that all vector components have the same sign (eliminating loss -! of accuracy arising from difference of large values when +! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary +! so that all vector components have the same sign (eliminating loss +! of accuracy arising from difference of large values when ! reconstructing r8 sum from integer vector) ilevel = 0 do while ((i8_arr_gsum_level(ioffset+ilevel) .eq. 0_i8) & @@ -1118,7 +1118,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_gsum_level(ioffset+jlevel+1) = i8_arr_gsum_level(ioffset+jlevel+1) & + i8_sign*(i8_radix**arr_max_shift) endif - enddo + enddo endif ! start with maximum shift, and work up to larger values @@ -1131,7 +1131,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (i8_arr_gsum_level(ioffset+ilevel) .ne. 0_i8) then jlevel = 1 -! r8 representation of higher order bits in integer +! r8 representation of higher order bits in integer X_8(jlevel) = i8_arr_gsum_level(ioffset+ilevel) LX(jlevel) = exponent(X_8(jlevel)) @@ -1149,7 +1149,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! add in contributions, smaller to larger, rescaling for each -! addition to guarantee that exponent of working summand is always +! addition to guarantee that exponent of working summand is always ! larger than minexponent do while (jlevel > 0) if (first) then @@ -1173,7 +1173,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! apply final exponent correction, scaling first if exponent is too small ! to apply directly corr_exp = curr_exp + exponent(arr_gsum(ifld)) - if (corr_exp .ge. MINEXPONENT(1._r8)) then + if (corr_exp .ge. MINEXPONENT(1._r8)) then arr_gsum(ifld) = set_exponent(arr_gsum(ifld),corr_exp) else RX_8 = set_exponent(arr_gsum(ifld), & @@ -1181,9 +1181,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & arr_gsum(ifld) = scale(RX_8,MINEXPONENT(1._r8)) endif -! if validate is .true. and some precision lost, test whether 'too much' +! if validate is .true. and some precision lost, test whether 'too much' ! was lost, due to too loose an upper bound, too stringent a limit on number -! of levels of expansion, cancellation, .... Calculated by comparing lower +! of levels of expansion, cancellation, .... Calculated by comparing lower ! bound on number of sigificant digits with number of digits in 1.0_r8 . if (validate) then if (i8_arr_gsum_level(ioffset-voffset+2) .ne. 0_i8) then @@ -1217,11 +1217,11 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & end subroutine ice_reprosum_int !======================================================================== -! -! Purpose: +! +! Purpose: !> Test whether distributed sum exceeds tolerance and print out a !> warning message. -! +! !---------------------------------------------------------------------- logical function ice_reprosum_tolExceeded (name, nflds, master, & @@ -1234,11 +1234,11 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & integer, intent(in) :: nflds !< number of fields logical, intent(in) :: master !< process that will write !< warning messages? - integer, optional, intent(in) :: logunit!< unit warning messages + integer, optional, intent(in) :: logunit!< unit warning messages !< written to real(r8), intent(in) :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums ! Local workspace @@ -1302,12 +1302,12 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & end function ice_reprosum_tolExceeded !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on He and Ding's implementation of the double-double algorithm. -! +! !---------------------------------------------------------------------- subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & @@ -1319,11 +1319,11 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global sums @@ -1361,8 +1361,8 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & arr_lsum_dd(ifld) = (0.0_r8,0.0_r8) do isum=1,nsummands - - ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s + + ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s ! trick. t1 = arr(isum,ifld) + real(arr_lsum_dd(ifld)) e = t1 - arr(isum,ifld) @@ -1394,11 +1394,11 @@ end subroutine ice_reprosum_ddpdd subroutine DDPDD (dda, ddb, len, itype) !---------------------------------------------------------------------- -! -! Purpose: -! Modification of original codes written by David H. Bailey +! +! Purpose: +! Modification of original codes written by David H. Bailey ! This subroutine computes ddb(i) = dda(i)+ddb(i) -! +! !---------------------------------------------------------------------- ! Arguments @@ -1434,10 +1434,10 @@ end subroutine DDPDD subroutine split_indices(total,num_pieces,ibeg,iend) !---------------------------------------------------------------------- -! -! Purpose: +! +! Purpose: ! Split range into 'num_pieces' -! +! !---------------------------------------------------------------------- ! Arguments diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 index be6e12253..bbe2fd4d1 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 @@ -116,7 +116,7 @@ module ice_timers type (timer_data), dimension(max_timers) :: & all_timers ! timer data for all timers - integer (int_kind) :: & + integer (int_kind) :: & cycles_max ! max clock cycles allowed by system real (dbl_kind) :: & @@ -148,8 +148,8 @@ subroutine init_ice_timers !----------------------------------------------------------------------- ! ! Call F90 intrinsic system_clock to determine clock rate -! and maximum cycles for single-processor runs. If no clock -! available, print message. +! and maximum cycles for single-processor runs. If no clock +! available, print message. ! !----------------------------------------------------------------------- @@ -231,7 +231,7 @@ end subroutine init_ice_timers subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) -! This routine initializes a timer with a given name and returns a +! This routine initializes a timer with a given name and returns a ! timer id. character (*), intent(in) :: & @@ -244,7 +244,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) ! threaded region) integer (int_kind), intent(out) :: & - timer_id ! timer number assigned to this timer + timer_id ! timer number assigned to this timer !----------------------------------------------------------------------- ! @@ -275,7 +275,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) all_timers(n)%name = name_choice all_timers(n)%in_use = .true. all_timers(n)%num_blocks = num_blocks - all_timers(n)%num_nodes = num_nodes + all_timers(n)%num_nodes = num_nodes allocate(all_timers(n)%block_started (num_blocks), & all_timers(n)%block_cycles1 (num_blocks), & @@ -293,7 +293,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) if (srch_error /= 0) & call abort_ice(subname//'ERROR: Exceeded maximum number of timers') - + !----------------------------------------------------------------------- @@ -334,7 +334,7 @@ subroutine ice_timer_clear(timer_id) all_timers(timer_id)%block_accum_time(:) = c0 else call abort_ice(subname//'ERROR: attempt to reset undefined timer') - + endif !----------------------------------------------------------------------- @@ -396,7 +396,7 @@ subroutine ice_timer_start(timer_id, block_id) !*** another thread. if already started, keep track !*** of number of start requests in order to match !*** start and stop requests - + !$OMP CRITICAL if (.not. all_timers(timer_id)%node_started) then @@ -431,18 +431,18 @@ subroutine ice_timer_start(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to start undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_start - + !*********************************************************************** subroutine ice_timer_stop(timer_id, block_id) -! This routine stops a given node timer if appropriate. If block +! This routine stops a given node timer if appropriate. If block ! information is available the appropriate block timer is also stopped. integer (int_kind), intent(in) :: & @@ -513,7 +513,7 @@ subroutine ice_timer_stop(timer_id, block_id) !*** stop node timer if number of requested stops !*** matches the number of starts (to avoid stopping !*** a node timer started by multiple threads) - + cycles1 = all_timers(timer_id)%node_cycles1 !$OMP CRITICAL @@ -566,13 +566,13 @@ subroutine ice_timer_stop(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to stop undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_stop - + !*********************************************************************** subroutine ice_timer_print(timer_id,stats) @@ -648,7 +648,7 @@ subroutine ice_timer_print(timer_id,stats) local_time = c0 endif max_time = global_maxval(local_time,distrb_info) - + if (my_task == master_task) then write (nu_diag,timer_format) timer_id, & trim(all_timers(timer_id)%name),max_time @@ -713,7 +713,7 @@ subroutine ice_timer_print(timer_id,stats) if (lrestart_timer) call ice_timer_start(timer_id) else call abort_ice(subname//'ERROR: attempt to print undefined timer') - + endif !----------------------------------------------------------------------- @@ -771,7 +771,7 @@ end subroutine ice_timer_print_all subroutine ice_timer_check(timer_id,block_id) ! This routine checks a given timer by stopping and restarting the -! timer. This is primarily used to periodically accumulate time in +! timer. This is primarily used to periodically accumulate time in ! the timer to prevent timer cycles from wrapping around max_cycles. integer (int_kind), intent(in) :: & diff --git a/cicecore/cicedynB/infrastructure/ice_blocks.F90 b/cicecore/cicedynB/infrastructure/ice_blocks.F90 index 74aba9cb5..fb7483914 100644 --- a/cicecore/cicedynB/infrastructure/ice_blocks.F90 +++ b/cicecore/cicedynB/infrastructure/ice_blocks.F90 @@ -3,8 +3,8 @@ module ice_blocks ! This module contains data types and tools for decomposing a global -! horizontal domain into a set of blocks. It contains a data type -! for describing each block and contains routines for creating and +! horizontal domain into a set of blocks. It contains a data type +! for describing each block and contains routines for creating and ! querying the block decomposition for a global domain. ! ! author: Phil Jones, LANL @@ -46,7 +46,7 @@ module ice_blocks nx_block, ny_block ! x,y dir including ghost ! predefined directions for neighbor id routine - ! Note: the directions that are commented out are implemented in + ! Note: the directions that are commented out are implemented in ! POP but not in CICE. If the tripole cut were in the south ! instead of the north, these would need to be used (and also ! implemented in ice_boundary.F90). @@ -314,11 +314,12 @@ subroutine create_blocks(nx_global, ny_global, ew_boundary_type, & if (debug_blocks) then if (my_task == master_task) then - write(nu_diag,*) 'block i,j locations' + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname,' block ID, iblock, jblock Locations:' do n = 1, nblocks_tot - write(nu_diag,*) 'block id, iblock, jblock, tripole:', & + write(nu_diag,'(2a,3i8,l4)') subname,' global block ID, iblock, jblock, tripole:', & all_blocks(n)%block_id, & - all_blocks(n)%iblock, & + all_blocks(n)%iblock, & all_blocks(n)%jblock, & all_blocks(n)%tripole enddo @@ -380,7 +381,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & ! local variables ! !---------------------------------------------------------------------- - + integer (int_kind) :: & iBlock, jBlock, &! i,j block location of current block inbr, jnbr ! i,j block location of neighboring block @@ -394,6 +395,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !---------------------------------------------------------------------- call get_block_parameter(blockID, iblock=iBlock, jblock=jBlock) + nbrID = 0 ! initial default !---------------------------------------------------------------------- ! @@ -422,7 +424,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock + 1 + inbr = nblocks_x - iBlock + 1 jnbr = -jBlock case default call abort_ice(subname//'ERROR: unknown north boundary') @@ -515,7 +517,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock + inbr = nblocks_x - iBlock if (inbr == 0) inbr = nblocks_x jnbr = -jBlock case default @@ -554,7 +556,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock + 2 + inbr = nblocks_x - iBlock + 2 if (inbr > nblocks_x) inbr = 1 jnbr = -jBlock case default @@ -691,7 +693,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock - 1 + inbr = nblocks_x - iBlock - 1 if (inbr <= 0) inbr = inbr + nblocks_x jnbr = -jBlock case default @@ -799,7 +801,7 @@ end function get_block !********************************************************************** - subroutine get_block_parameter(block_id, local_id, & + subroutine get_block_parameter(block_id, local_id, & ilo, ihi, jlo, jhi, & iblock, jblock, tripole, & i_glob, j_glob) diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index 79f5bcb9a..ac56356e5 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -18,7 +18,7 @@ module ice_domain use ice_kinds_mod use ice_constants, only: shlat, nhlat use ice_communicate, only: my_task, master_task, get_num_procs, & - add_mpi_barriers + add_mpi_barriers, ice_barrier use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_blocks, only: block, get_block, create_blocks, nghost, & nblocks_x, nblocks_y, nblocks_tot, nx_block, ny_block, debug_blocks @@ -26,7 +26,7 @@ module ice_domain use ice_boundary, only: ice_halo use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nml_filename, nu_diag, & - get_fileunit, release_fileunit + get_fileunit, release_fileunit, flush_fileunit use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters @@ -79,7 +79,7 @@ module ice_domain distribution_type, &! method to use for distributing blocks ! 'cartesian', 'roundrobin', 'sectrobin', 'sectcart' ! 'rake', 'spacecurve', etc - distribution_wght ! method for weighting work per block + distribution_wght ! method for weighting work per block ! 'block' = POP default configuration ! 'blockall' = no land block elimination ! 'latitude' = no. ocean points * |lat| @@ -326,6 +326,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) ig,jg ,&! global indices igm1,igp1,jgm1,jgp1,&! global indices ninfo ,&! ice_distributionGet check + np, nlb, m ,&! debug blocks temporaries work_unit ,&! size of quantized work unit #ifdef USE_NETCDF fid ,&! file id @@ -357,7 +358,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) ! ! check that there are at least nghost+1 rows or columns of land cells ! for closed boundary conditions (otherwise grid lengths are zero in -! cells neighboring ocean points). +! cells neighboring ocean points). ! !---------------------------------------------------------------------- @@ -526,12 +527,12 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) KMTG(igm1,jg ) > puny .or. KMTG(igp1,jg ) > puny .or. & KMTG(ig ,jgp1) > puny .or. KMTG(ig ,jgm1) > puny) .and. & (ULATG(ig,jg) < shlat/rad_to_deg .or. & - ULATG(ig,jg) > nhlat/rad_to_deg) ) & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & nocn(n) = nocn(n) + flat(ig,jg) else if (KMTG(ig,jg) > puny .and. & (ULATG(ig,jg) < shlat/rad_to_deg .or. & - ULATG(ig,jg) > nhlat/rad_to_deg) ) & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & nocn(n) = nocn(n) + flat(ig,jg) endif endif @@ -543,7 +544,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) !*** points, so where the block is not completely land, !*** reset nocn to be the full size of the block - ! use processor_shape = 'square-pop' and distribution_wght = 'block' + ! use processor_shape = 'square-pop' and distribution_wght = 'block' ! to make CICE and POP decompositions/distributions identical. #ifdef CICE_IN_NEMO @@ -596,8 +597,41 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) call create_local_block_ids(blocks_ice, distrb_info) + ! write out block distribution ! internal check of icedistributionGet as part of verification process if (debug_blocks) then + + call flush_fileunit(nu_diag) + call ice_barrier() + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname, ' Blocks by Proc:' + endif + call ice_distributionGet(distrb_info, nprocs=np, numLocalBlocks=nlb) + do m = 1, np + if (m == my_task+1) then + do n=1,nlb + write(nu_diag,'(2a,3i8)') & + subname,' my_task, local block ID, global block ID: ', & + my_task, n, distrb_info%blockGlobalID(n) + enddo + call flush_fileunit(nu_diag) + endif + call ice_barrier() + enddo + + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname, ' Blocks by Global Block ID:' + do m = 1, nblocks_tot + write(nu_diag,'(2a,3i8)') & + subname,' global block id, proc, local block ID: ', & + m, distrb_info%blockLocation(m), distrb_info%blockLocalID(m) + enddo + call flush_fileunit(nu_diag) + endif + call ice_barrier() + call ice_distributionGet(distrb_info, nprocs=ninfo) if (ninfo /= distrb_info%nprocs) & call abort_ice(subname//' ice_distributionGet nprocs ERROR', file=__FILE__, line=__LINE__) @@ -635,8 +669,11 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) deallocate(blkinfo) - if (my_task == master_task) & - write(nu_diag,*) subname,' ice_distributionGet checks pass' + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname,' ice_distributionGet checks pass' + write(nu_diag,*) ' ' + endif endif if (associated(blocks_ice)) then diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 84f9f6547..523c7ea2c 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -14,7 +14,7 @@ ! 2006: Converted to free source form (F90) by Elizabeth Hunke ! 2007: Option to read from netcdf files (A. Keen, Met Office) ! Grid reading routines reworked by E. Hunke for boundary values -! 2021: Add N (center of north face) and E (center of east face) grids +! 2021: Add N (center of north face) and E (center of east face) grids ! to support C and CD solvers. Defining T at center of cells, U at ! NE corner, N at center of top face, E at center of right face. ! All cells are quadrilaterals with NE, E, and N associated with @@ -55,7 +55,7 @@ module ice_grid kmt_type , & ! options are file, default, boxislands bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) - grid_spacing , & ! default of 30.e3m or set by user in namelist + grid_spacing , & ! default of 30.e3m or set by user in namelist grid_ice , & ! Underlying model grid structure (A, B, C, CD) grid_ice_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) grid_ice_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) @@ -111,12 +111,12 @@ module ice_grid G_HTN ! length of northern edge of T-cell (global ext.) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) + cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) - cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) - cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) + cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) + cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) - dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) @@ -157,14 +157,14 @@ module ice_grid dimension (:,:,:,:,:), allocatable, public :: & mne, & ! matrices used for coordinate transformations in remapping mnw, & ! ne = northeast corner, nw = northwest, etc. - mse, & + mse, & msw ! masks real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) bm , & ! task/block id - uvm , & ! land/boundary mask (U-cell) + uvm , & ! land/boundary mask (U-cell) npm , & ! land/boundary mask (N-cell) epm , & ! land/boundary mask (E-cell) kmt ! ocean topography mask for bathymetry (T-cell) @@ -205,7 +205,7 @@ module ice_grid !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_grid @@ -311,11 +311,11 @@ end subroutine alloc_grid !======================================================================= ! Distribute blocks across processors. The distribution is optimized -! based on latitude and topography, contained in the ULAT and KMT arrays. +! based on latitude and topography, contained in the ULAT and KMT arrays. ! ! authors: William Lipscomb and Phil Jones, LANL - subroutine init_grid1 + subroutine init_grid1 use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_array @@ -487,7 +487,7 @@ subroutine init_grid2 call popgrid_nc ! read POP grid lengths from nc file else call popgrid ! read POP grid lengths directly - endif + endif #ifdef CESMCOUPLED elseif (trim(grid_type) == 'latlon') then call latlongrid ! lat lon grid for sequential CESM (CAM mode) @@ -507,7 +507,7 @@ subroutine init_grid2 !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks if (my_task == master_task) then - !$OMP ORDERED + !$OMP ORDERED if (iblk == 1) then call omp_get_schedule(ompsk,ompcs) write(nu_diag,*) '' @@ -516,7 +516,7 @@ subroutine init_grid2 endif write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() call flush_fileunit(nu_diag) - !$OMP END ORDERED + !$OMP END ORDERED endif enddo !$OMP END PARALLEL DO @@ -581,8 +581,8 @@ subroutine init_grid2 cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) enddo enddo @@ -704,7 +704,7 @@ subroutine init_grid2 enddo !$OMP END PARALLEL DO endif ! regional - + call ice_timer_start(timer_bound) call ice_HaloUpdate (ANGLET, halo_info, & field_loc_center, field_type_angle, & @@ -760,7 +760,7 @@ end subroutine init_grid2 !======================================================================= -! POP displaced pole grid and land mask (or tripole). +! POP displaced pole grid and land mask (or tripole). ! Grid record number, field and units are: \\ ! (1) ULAT (radians) \\ ! (2) ULON (radians) \\ @@ -768,7 +768,7 @@ end subroutine init_grid2 ! (4) HTE (cm) \\ ! (5) HUS (cm) \\ ! (6) HUW (cm) \\ -! (7) ANGLE (radians) +! (7) ANGLE (radians) ! ! Land mask record number and field is (1) KMT. ! @@ -809,7 +809,7 @@ subroutine popgrid !----------------------------------------------------------------- call ice_read(nu_kmt,1,work1,'ida4',diag, & - field_loc=field_loc_center, & + field_loc=field_loc_center, & field_type=field_type_scalar) hm (:,:,:) = c0 @@ -838,14 +838,14 @@ subroutine popgrid allocate(work_g1(nx_global,ny_global)) call ice_read_global(nu_grid,1,work_g1,'rda8',.true.) ! ULAT - call gridbox_verts(work_g1,latt_bounds) + call gridbox_verts(work_g1,latt_bounds) call scatter_global(ULAT, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULAT, distrb_info, & ew_boundary_type, ns_boundary_type) call ice_read_global(nu_grid,2,work_g1,'rda8',.true.) ! ULON - call gridbox_verts(work_g1,lont_bounds) + call gridbox_verts(work_g1,lont_bounds) call scatter_global(ULON, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & @@ -857,7 +857,7 @@ subroutine popgrid !----------------------------------------------------------------- ! cell dimensions - ! calculate derived quantities from global arrays to preserve + ! calculate derived quantities from global arrays to preserve ! information on boundaries !----------------------------------------------------------------- @@ -926,7 +926,7 @@ subroutine popgrid_nc type (block) :: & this_block ! block information for current block - + integer(kind=int_kind) :: & varid integer (kind=int_kind) :: & @@ -952,7 +952,7 @@ subroutine popgrid_nc fieldname='kmt' call ice_read_nc(fid_kmt,1,fieldname,work1,diag, & - field_loc=field_loc_center, & + field_loc=field_loc_center, & field_type=field_type_scalar) hm (:,:,:) = c0 @@ -982,7 +982,7 @@ subroutine popgrid_nc fieldname='ulat' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULAT - call gridbox_verts(work_g1,latt_bounds) + call gridbox_verts(work_g1,latt_bounds) call scatter_global(ULAT, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULAT, distrb_info, & @@ -990,7 +990,7 @@ subroutine popgrid_nc fieldname='ulon' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULON - call gridbox_verts(work_g1,lont_bounds) + call gridbox_verts(work_g1,lont_bounds) call scatter_global(ULON, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & @@ -1017,7 +1017,7 @@ subroutine popgrid_nc endif call broadcast_scalar(l_readCenter,master_task) if (l_readCenter) then - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) call scatter_global(ANGLET, work_g1, master_task, distrb_info, & field_loc_center, field_type_angle) where (ANGLET > pi) ANGLET = pi @@ -1033,7 +1033,7 @@ subroutine popgrid_nc endif !----------------------------------------------------------------- ! cell dimensions - ! calculate derived quantities from global arrays to preserve + ! calculate derived quantities from global arrays to preserve ! information on boundaries !----------------------------------------------------------------- @@ -1060,7 +1060,7 @@ end subroutine popgrid_nc #ifdef CESMCOUPLED !======================================================================= -! Read in kmt file that matches CAM lat-lon grid and has single column +! Read in kmt file that matches CAM lat-lon grid and has single column ! functionality ! author: Mariana Vertenstein ! 2007: Elizabeth Hunke upgraded to netcdf90 and cice ncdf calls @@ -1077,8 +1077,8 @@ subroutine latlongrid #endif integer (kind=int_kind) :: & - i, j, iblk - + i, j, iblk + integer (kind=int_kind) :: & ni, nj, ncid, dimid, varid, ier @@ -1106,7 +1106,7 @@ subroutine latlongrid status ! status flag real (kind=dbl_kind), allocatable :: & - lats(:),lons(:),pos_lons(:), glob_grid(:,:) ! temporaries + lats(:),lons(:),pos_lons(:), glob_grid(:,:) ! temporaries real (kind=dbl_kind) :: & pos_scmlon,& ! temporary @@ -1175,12 +1175,12 @@ subroutine latlongrid status = nf90_get_var(ncid, varid, glob_grid, start3, count3) if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') do j = 1,nj - lats(j) = glob_grid(1,j) + lats(j) = glob_grid(1,j) end do - + ! convert lons array and scmlon to 0,360 and find index of value closest to 0 ! and obtain single-column longitude/latitude indices to retrieve - + pos_lons(:)= mod(lons(:) + 360._dbl_kind,360._dbl_kind) pos_scmlon = mod(scmlon + 360._dbl_kind,360._dbl_kind) start(1) = (MINLOC(abs(pos_lons-pos_scmlon),dim=1)) @@ -1267,7 +1267,7 @@ subroutine latlongrid ! Calculate various geometric 2d arrays ! The U grid (velocity) is not used when run with sequential CAM ! because we only use thermodynamic sea ice. However, ULAT is used - ! in the default initialization of CICE so we calculate it here as + ! in the default initialization of CICE so we calculate it here as ! a "dummy" so that CICE will initialize with ice. If a no ice ! initialization is OK (or desired) this can be commented out and ! ULAT will remain 0 as specified above. ULAT is located at the @@ -1298,12 +1298,12 @@ subroutine latlongrid uarear(i,j,iblk) = c1/uarea(i,j,iblk) if (single_column) then - ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) + ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) else if (ny_global == 1) then ULAT (i,j,iblk) = TLAT(i,j,iblk) else - ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/ny_global) + ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/ny_global) endif endif ULON (i,j,iblk) = c0 @@ -1311,9 +1311,9 @@ subroutine latlongrid NLAT (i,j,iblk) = c0 ELON (i,j,iblk) = c0 ELAT (i,j,iblk) = c0 - ANGLE (i,j,iblk) = c0 + ANGLE (i,j,iblk) = c0 - ANGLET(i,j,iblk) = c0 + ANGLET(i,j,iblk) = c0 HTN (i,j,iblk) = 1.e36_dbl_kind HTE (i,j,iblk) = 1.e36_dbl_kind dxT (i,j,iblk) = 1.e36_dbl_kind @@ -1351,13 +1351,12 @@ end subroutine latlongrid subroutine rectgrid - use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c2, radius, cm_to_m, & field_loc_center, field_loc_NEcorner, field_type_scalar use ice_domain, only: close_boundaries integer (kind=int_kind) :: & - i, j, iblk, & + i, j, & imid, jmid real (kind=dbl_kind) :: & @@ -1552,8 +1551,8 @@ subroutine grid_boxislands_kmt (work) if (nxb < 1 .or. nyb < 1) & call abort_ice(subname//'ERROR: requires larger grid size') - - ! initialize work area as all ocean (c1). + + ! initialize work area as all ocean (c1). work(:,:) = c1 ! now add land points (c0) @@ -1956,7 +1955,7 @@ subroutine primary_grid_lengths_HTE(work_g) do i = 1, nx_global ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 - if (i == 1) im1 = nx_global + if (i == 1) im1 = nx_global work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyN enddo enddo @@ -1965,7 +1964,7 @@ subroutine primary_grid_lengths_HTE(work_g) do i = 1, nx_global ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 - if (i == 1) im1 = nx_global + if (i == 1) im1 = nx_global work_g2(i,ny_global) = p5*(c2*work_g(i ,ny_global-1) - work_g(i ,ny_global-2) + & c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyN enddo @@ -2232,7 +2231,7 @@ subroutine Tlatlon ! the prior atan2 call ??? not sure what's going on. #if (1 == 1) enddo ! i - enddo ! j + enddo ! j enddo ! iblk !$OMP END PARALLEL DO @@ -2304,9 +2303,9 @@ subroutine Tlatlon ! ELAT in radians North ELAT(i,j,iblk) = asin(tz) - + enddo ! i - enddo ! j + enddo ! j enddo ! iblk !$OMP END PARALLEL DO @@ -2827,12 +2826,12 @@ subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) do i = ilo, ihi wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + mask1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wtmp enddo @@ -2876,12 +2875,12 @@ subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) do j = jlo, jhi do i = ilo, ihi wtmp = (mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wtmp @@ -3057,12 +3056,12 @@ subroutine grid_average_X2YA(dir,work1,wght1,work2) do i = ilo, ihi wtmp = (wght1(i ,j ,iblk) & + wght1(i-1,j ,iblk) & - + wght1(i ,j-1,iblk) & + + wght1(i ,j-1,iblk) & + wght1(i-1,j-1,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wtmp enddo @@ -3106,12 +3105,12 @@ subroutine grid_average_X2YA(dir,work1,wght1,work2) do j = jlo, jhi do i = ilo, ihi wtmp = (wght1(i ,j-1,iblk) & - + wght1(i+1,j-1,iblk) & + + wght1(i+1,j-1,iblk) & + wght1(i ,j ,iblk) & + wght1(i+1,j ,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wtmp @@ -3282,7 +3281,7 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) work2(i,j,iblk) = p25 * & (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wght2(i ,j ,iblk) enddo @@ -3323,7 +3322,7 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) do i = ilo, ihi work2(i,j,iblk) = p25 * & (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wght2(i ,j ,iblk) @@ -4177,7 +4176,7 @@ subroutine gridbox_verts(work_g,vbounds) if (my_task == master_task) then do j = 1, ny_global do i = 2, nx_global - work_g2(i,j) = work_g(i-1,j ) * rad_to_deg + work_g2(i,j) = work_g(i-1,j ) * rad_to_deg enddo enddo ! extrapolate @@ -4383,13 +4382,13 @@ end subroutine get_bathymetry_popfile !======================================================================= -! Read bathymetry data for seabed stress calculation (grounding scheme for -! landfast ice) in CICE stand-alone mode. When CICE is in coupled mode -! (e.g. CICE-NEMO), hwater should be uptated at each time level so that +! Read bathymetry data for seabed stress calculation (grounding scheme for +! landfast ice) in CICE stand-alone mode. When CICE is in coupled mode +! (e.g. CICE-NEMO), hwater should be uptated at each time level so that ! it varies with ocean dynamics. ! ! author: Fred Dupont, CMC - + subroutine read_seabedstress_bathy ! use module @@ -4399,7 +4398,7 @@ subroutine read_seabedstress_bathy ! local variables integer (kind=int_kind) :: & fid_init ! file id for netCDF init file - + character (char_len_long) :: & ! input data file names fieldname @@ -4433,7 +4432,7 @@ subroutine read_seabedstress_bathy endif end subroutine read_seabedstress_bathy - + !======================================================================= end module ice_grid diff --git a/cicecore/cicedynB/infrastructure/ice_memusage.F90 b/cicecore/cicedynB/infrastructure/ice_memusage.F90 index 19e7dfb15..8dca4e621 100644 --- a/cicecore/cicedynB/infrastructure/ice_memusage.F90 +++ b/cicecore/cicedynB/infrastructure/ice_memusage.F90 @@ -11,11 +11,11 @@ MODULE ice_memusage implicit none private - + ! PUBLIC: Public interfaces public :: ice_memusage_getusage, & - ice_memusage_init, & + ice_memusage_init, & ice_memusage_print logical(log_kind), public :: memory_stats @@ -39,22 +39,20 @@ subroutine ice_memusage_init(iunit) !----- arguments ----- integer, optional :: iunit !< output unit number for optional writes - + !----- local ----- - ! --- Memory stats --- + ! --- Memory stats --- integer :: msize ! memory size (high water) - integer :: mrss ! resident size (current memory use) - integer :: msize0,msize1 ! temporary size integer :: mrss0,mrss1,mrss2 ! temporary rss integer :: mshare,mtext,mdatastack integer :: ierr - + integer :: ice_memusage_gptl real(dbl_kind),allocatable :: mem_tmp(:) character(*),parameter :: subname = '(ice_memusage_init)' - + !--------------------------------------------------- ! return if memory_stats are off @@ -121,7 +119,7 @@ subroutine ice_memusage_print(iunit,string) integer, intent(in) :: iunit !< unit number to write to character(len=*),optional, intent(in) :: string !< optional string - !----- local --- + !----- local --- real(dbl_kind) :: msize,mrss character(len=128) :: lstring character(*),parameter :: subname = '(ice_memusage_print)' diff --git a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c index ec9c2c1d8..309c8824b 100644 --- a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c +++ b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c @@ -28,7 +28,7 @@ ** Author: Jim Rosinski ** Credit to Chuck Bardeen for MACOS section (__APPLE__ ifdef) ** -** get_memusage: +** get_memusage: ** ** Designed to be called from Fortran, returns information about memory ** usage in each of 5 input int* args. On Linux read from the /proc @@ -133,7 +133,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac #endif long long total; int node_config; - + /* memory available */ #if defined(BGP) Kernel_GetPersonality(&pers, sizeof(pers)); @@ -195,7 +195,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac ** arguments, close the file and return. */ - ret = fscanf (fd, "%d %d %d %d %d %d %d", + ret = fscanf (fd, "%d %d %d %d %d %d %d", size, rss, share, text, datastack, &dum, &dum); ret = fclose (fd); return 0; @@ -203,9 +203,9 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac #elif (defined __APPLE__) FILE *fd; - char cmd[60]; + char cmd[60]; int pid = (int) getpid (); - + sprintf (cmd, "ps -o vsz -o rss -o tsiz -p %d | grep -v RSS", pid); fd = popen (cmd, "r"); @@ -224,7 +224,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac if (getrusage (RUSAGE_SELF, &usage) < 0) return -1; - + *size = -1; *rss = usage.ru_maxrss; *share = -1; diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedynB/infrastructure/ice_read_write.F90 index d5cbe1768..b9074d8f6 100644 --- a/cicecore/cicedynB/infrastructure/ice_read_write.F90 +++ b/cicecore/cicedynB/infrastructure/ice_read_write.F90 @@ -25,7 +25,7 @@ module ice_read_write use ice_fileunits, only: nu_diag #ifdef USE_NETCDF - use netcdf + use netcdf #endif implicit none @@ -33,7 +33,7 @@ module ice_read_write private integer (kind=int_kind), parameter, private :: & - bits_per_byte = 8 ! number of bits per byte. + bits_per_byte = 8 ! number of bits per byte. ! used to determine RecSize in ice_open public :: ice_open, & @@ -148,7 +148,7 @@ subroutine ice_open_ext(nu, filename, nbits) integer (kind=int_kind), intent(in) :: & nu , & ! unit number nbits ! no. of bits per variable (0 for sequential access) - + integer (kind=int_kind) :: RecSize, nbytes character (*) :: filename @@ -468,9 +468,9 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & if (present(field_loc)) then call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & field_loc, field_type) - + else - + call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & field_loc_noupdate, field_type_noupdate) endif @@ -791,11 +791,11 @@ subroutine ice_write_xyt(nu, nrec, work, atype, diag) elseif (atype == 'ida8') then allocate(work_gi8(nx_global,ny_global)) work_gi8 = nint(work_g1) - write(nu,rec=nrec) work_gi8 + write(nu,rec=nrec) work_gi8 deallocate(work_gi8) elseif (atype == 'rda4') then allocate(work_gr(nx_global,ny_global)) - work_gr = work_g1 + work_gr = real(work_g1,real_kind) write(nu,rec=nrec) work_gr deallocate(work_gr) elseif (atype == 'rda8') then @@ -824,7 +824,7 @@ end subroutine ice_write_xyt !======================================================================= -! Writes an unformatted file +! Writes an unformatted file ! work is a real array, atype indicates the format of the data subroutine ice_write_xyzt(nu, nrec, work, atype, diag) @@ -895,11 +895,11 @@ subroutine ice_write_xyzt(nu, nrec, work, atype, diag) elseif (atype == 'ida8') then allocate(work_gi9(nx_global,ny_global,nblyr+2)) work_gi9 = nint(work_g4) - write(nu,rec=nrec) work_gi9 + write(nu,rec=nrec) work_gi9 deallocate(work_gi9) elseif (atype == 'rda4') then allocate(work_gr3(nx_global,ny_global,nblyr+2)) - work_gr3 = work_g4 + work_gr3 = real(work_g4,real_kind) write(nu,rec=nrec) work_gr3 deallocate(work_gr3) elseif (atype == 'rda8') then @@ -1002,11 +1002,11 @@ subroutine ice_write_ext(nu, nrec, work, atype, diag) elseif (atype == 'ida8') then allocate(work_gi8(nx,ny)) work_gi8 = nint(work_g1) - write(nu,rec=nrec) work_gi8 + write(nu,rec=nrec) work_gi8 deallocate(work_gi8) elseif (atype == 'rda4') then allocate(work_gr(nx,ny)) - work_gr = work_g1 + work_gr = real(work_g1,real_kind) write(nu,rec=nrec) work_gr deallocate(work_gr) elseif (atype == 'rda8') then @@ -1040,7 +1040,7 @@ end subroutine ice_write_ext subroutine ice_open_nc(filename, fid) - character (char_len_long), intent(in) :: & + character (char_len_long), intent(in) :: & filename ! netCDF filename integer (kind=int_kind), intent(out) :: & @@ -1052,7 +1052,7 @@ subroutine ice_open_nc(filename, fid) #ifdef USE_NETCDF integer (kind=int_kind) :: & - status ! status variable from netCDF routine + status ! status variable from netCDF routine if (my_task == master_task) then @@ -1089,12 +1089,12 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & @@ -1113,13 +1113,13 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid , & ! variable id status , & ! status output from netcdf routines ndims , & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind) :: & @@ -1200,12 +1200,12 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,lnrec/), & + start=(/1,1,lnrec/), & count=(/nx_global+2,ny_global+1,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1214,7 +1214,7 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & work_g1 = work_g2(2:nx_global+1,1:ny_global) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,lnrec/), & + start=(/1,1,lnrec/), & count=(/nx,ny,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1230,8 +1230,8 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -1294,9 +1294,9 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file logical (kind=log_kind), intent(in) :: & @@ -1318,14 +1318,14 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n, & ! ncat index varid , & ! variable id status , & ! status output from netcdf routines ndims , & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind) :: & @@ -1407,12 +1407,12 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx_global+2,ny_global+1,ncat,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1421,7 +1421,7 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & work_g1 = work_g2(2:nx_global+1,1:ny_global,:) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx,ny,ncat,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1437,8 +1437,8 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -1506,14 +1506,14 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & field_loc, field_type, restart_ext) use ice_fileunits, only: nu_diag - use ice_domain_size, only: nfsd, nfreq + use ice_domain_size, only: nfreq use ice_gather_scatter, only: scatter_global, scatter_global_ext integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file logical (kind=log_kind), intent(in) :: & @@ -1533,7 +1533,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & ! local variables ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! variable id status, & ! status output from netcdf routines ndim, nvar, & ! sizes of netcdf file @@ -1542,7 +1542,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & ndims, & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind) :: & @@ -1627,12 +1627,12 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx_global+2,ny_global+1,nfreq,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1641,7 +1641,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & work_g1 = work_g2(2:nx_global+1,1:ny_global,:) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx,ny,nfreq,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1725,12 +1725,12 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (char_len), intent(in) :: & + character (char_len), intent(in) :: & varname ! field name in netcdf file integer (kind=int_kind), optional, intent(in) :: & @@ -1746,7 +1746,7 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines ndim, nvar, & ! sizes of netcdf file @@ -1754,7 +1754,7 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & ndims, & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind), dimension(1) :: & @@ -1805,8 +1805,8 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & ! Read point variable !-------------------------------------------------------------- - status = nf90_get_var(fid, varid, workg, & - start= (/ lnrec /), & + status = nf90_get_var(fid, varid, workg, & + start= (/ lnrec /), & count=(/ 1 /)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1819,8 +1819,8 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -1830,7 +1830,7 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & enddo endif - work = workg(1) + work = workg(1) #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & @@ -1870,11 +1870,7 @@ subroutine ice_read_nc_1D(fid, varname, work, diag, & integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - dimlen ! size of dimension - - character (char_len) :: & - dimname ! dimension name + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim) :: & workg ! output array (real, 8-byte) @@ -1958,11 +1954,7 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - dimlen ! size of dimension - - character (char_len) :: & - dimname ! dimension name + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim,ydim) :: & workg ! output array (real, 8-byte) @@ -2049,11 +2041,7 @@ subroutine ice_read_nc_3D(fid, varname, work, diag, & integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - dimlen ! size of dimension - - character (char_len) :: & - dimname ! dimension name + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim,ydim,zdim) :: & workg ! output array (real, 8-byte) @@ -2121,12 +2109,12 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (char_len), intent(in) :: & + character (char_len), intent(in) :: & varname ! field name in netcdf file integer (kind=int_kind), optional, intent(in) :: & @@ -2143,7 +2131,7 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & work_z ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines ndim, nvar, & ! sizes of netcdf file @@ -2151,11 +2139,11 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & ndims, & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids character (char_len) :: & - dimname ! dimension name + dimname ! dimension name integer (kind=int_kind) :: lnrec ! local value of nrec @@ -2204,11 +2192,11 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_z, & - start=(/1,lnrec/), & + start=(/1,lnrec/), & count=(/nilyr,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -2221,8 +2209,8 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2256,7 +2244,7 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id varid , & ! variable id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output @@ -2276,7 +2264,7 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file ! id, & ! dimension index @@ -2327,11 +2315,11 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & if (my_task == master_task) then !-------------------------------------------------------------- - ! Write global array + ! Write global array !-------------------------------------------------------------- status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,nrec/), & + start=(/1,1,nrec/), & count=(/nx,ny,1/)) endif ! my_task = master_task @@ -2341,8 +2329,8 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & +! write(nu_diag,*) & +! subname,' fid= ',fid, ', nrec = ',nrec, & ! ', varid = ',varid ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2357,7 +2345,7 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & endif deallocate(work_g1) - + #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) @@ -2379,7 +2367,7 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id varid , & ! variable id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output @@ -2399,7 +2387,7 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n, & ! ncat index status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file @@ -2457,11 +2445,11 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & if (my_task == master_task) then !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,1,nrec/), & + start=(/1,1,1,nrec/), & count=(/nx,ny,ncat,1/)) endif ! my_task = master_task @@ -2471,8 +2459,8 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & +! write(nu_diag,*) & +! subname,' fid= ',fid, ', nrec = ',nrec, & ! ', varid = ',varid ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2491,14 +2479,14 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & endif deallocate(work_g1) - + #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif end subroutine ice_write_nc_xyz - + !======================================================================= ! Read a netcdf file. @@ -2506,15 +2494,15 @@ end subroutine ice_write_nc_xyz ! work_g is a real array ! ! Adapted by William Lipscomb, LANL, from ice_read -! Adapted by Ann Keen, Met Office, to read from a netcdf file +! Adapted by Ann Keen, Met Office, to read from a netcdf file subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number - character (char_len), intent(in) :: & + character (char_len), intent(in) :: & varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_global,ny_global), intent(out) :: & @@ -2529,12 +2517,12 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! netcdf id for field status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file ! id, & ! dimension index -! dimlen ! size of dimension +! dimlen ! size of dimension real (kind=dbl_kind) :: & amin, amax, asum ! min, max values and sum of input array @@ -2551,7 +2539,7 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) else allocate(work_g3(1,1)) ! to save memory endif - work_g3(:,:) = c0 + work_g3(:,:) = c0 endif work_g(:,:) = c0 @@ -2569,9 +2557,9 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- - + if (orca_halogrid) then status = nf90_get_var( fid, varid, work_g3, & start=(/1,1,nrec/), & @@ -2583,7 +2571,7 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) work_g=work_g3(2:nx_global+1,1:ny_global) else status = nf90_get_var( fid, varid, work_g, & - start=(/1,1,nrec/), & + start=(/1,1,nrec/), & count=(/nx_global,ny_global,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -2597,8 +2585,8 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) !------------------------------------------------------------------- if (my_task == master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & +! write(nu_diag,*) & +! subname,' fid= ',fid, ', nrec = ',nrec, & ! ', varname = ',trim(varname) ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2638,7 +2626,7 @@ subroutine ice_close_nc(fid) #ifdef USE_NETCDF integer (kind=int_kind) :: & - status ! status variable from netCDF routine + status ! status variable from netCDF routine if (my_task == master_task) then status = nf90_close(fid) @@ -2667,13 +2655,13 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec , & ! record number + nrec , & ! record number nzlev ! z level logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & @@ -2692,7 +2680,7 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid , & ! variable id status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file @@ -2739,11 +2727,11 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,nzlev,nrec/), & + start=(/1,1,nzlev,nrec/), & count=(/nx,ny,1,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 70e70621a..64b8d2101 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -9,7 +9,7 @@ ! 2004-05: Block structure added by William Lipscomb ! Restart module separated from history module ! 2006 ECH: Accepted some CESM code into mainstream CICE -! Converted to free source form (F90) +! Converted to free source form (F90) ! 2008 ECH: Rearranged order in which internal stresses are written and read ! 2010 ECH: Changed eice, esno to qice, qsno ! 2012 ECH: Added routines for reading/writing extended grid @@ -61,7 +61,7 @@ subroutine dumpfile(filename_spec) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & - stresspU, stressmU, stress12U + stresspU, stressmU, stress12U use ice_flux, only: coszen use ice_grid, only: grid_ice, tmask, iceumask, iceemask, icenmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & @@ -85,7 +85,7 @@ subroutine dumpfile(filename_spec) character(len=*), parameter :: subname = '(dumpfile)' call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) + nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -162,7 +162,7 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- - + if (restart_coszen) call write_restart_field(nu_dump,0,coszen,'ruf8','coszen',1,diag) call write_restart_field(nu_dump,0,scale_factor,'ruf8','scale_factor',1,diag) @@ -214,7 +214,7 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -316,7 +316,7 @@ subroutine restartfile (ice_ic) file=__FILE__, line=__LINE__) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) + nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -440,7 +440,7 @@ subroutine restartfile (ice_ic) !----------------------------------------------------------------- if (my_task == master_task) write(nu_diag,*) & 'internal stress components' - + call read_restart_field(nu_restart,0,stressp_1,'ruf8', & 'stressp_1',1,diag,field_loc_center,field_type_scalar) ! stressp_1 call read_restart_field(nu_restart,0,stressp_3,'ruf8', & @@ -755,7 +755,7 @@ subroutine restartfile_v4 (ice_ic) file=__FILE__, line=__LINE__) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) + nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -885,7 +885,7 @@ subroutine restartfile_v4 (ice_ic) !----------------------------------------------------------------- if (my_task == master_task) write(nu_diag,*) & 'internal stress components' - + allocate (work_g1(nx_global,ny_global), & work_g2(nx_global,ny_global)) @@ -1055,7 +1055,7 @@ subroutine restartfile_v4 (ice_ic) ! creates new file filename = trim(restart_dir) // '/iced.converted' - call dumpfile(filename) + call dumpfile(filename) call final_restart ! stop diff --git a/cicecore/cicedynB/infrastructure/ice_restoring.F90 b/cicecore/cicedynB/infrastructure/ice_restoring.F90 index f21e50513..221d066df 100644 --- a/cicecore/cicedynB/infrastructure/ice_restoring.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restoring.F90 @@ -46,7 +46,7 @@ module ice_restoring !======================================================================= -! Allocates and initializes arrays needed for restoring the ice state +! Allocates and initializes arrays needed for restoring the ice state ! in cells surrounding the grid. @@ -115,7 +115,7 @@ subroutine ice_HaloRestore_init !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP iglob,jglob,iblock,jblock) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -154,7 +154,7 @@ subroutine ice_HaloRestore_init !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP i,j,n,nt,ibc,npad) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -280,7 +280,7 @@ subroutine ice_HaloRestore_init enddo if (my_task == master_task) & - write (nu_diag,*) 'ice restoring timescale = ',trestore,' days' + write (nu_diag,*) 'ice restoring timescale = ',trestore,' days' end subroutine ice_HaloRestore_init @@ -318,7 +318,7 @@ subroutine set_restore_var (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Tair , & ! air temperature (K) - Tf ! freezing temperature (C) + Tf ! freezing temperature (C) real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), intent(in) :: & salinz , & ! initial salinity profile @@ -395,7 +395,7 @@ subroutine set_restore_var (nx_block, ny_block, & vicen(i,j,n) = c0 vsnon(i,j,n) = c0 if (tmask(i,j)) then - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature else trcrn(i,j,nt_Tsfc,n) = c0 ! on land gridcells endif @@ -526,7 +526,7 @@ subroutine set_restore_var (nx_block, ny_block, & ! surface temperature trcrn(i,j,nt_Tsfc,n) = Tsfc ! deg C - ! ice enthalpy, salinity + ! ice enthalpy, salinity do k = 1, nilyr trcrn(i,j,nt_qice+k-1,n) = qin(k) trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) @@ -569,7 +569,7 @@ subroutine ice_HaloRestore i,j,iblk,nt,n, &! dummy loop indices ilo,ihi,jlo,jhi, &! beginning and end of physical domain ibc, &! ghost cell column or row - ntrcr, &! + ntrcr, &! npad ! padding column/row counter type (block) :: & @@ -611,7 +611,7 @@ subroutine ice_HaloRestore !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP i,j,n,nt,ibc,npad) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 index b98e09814..2a3f042c3 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 @@ -6,7 +6,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -25,7 +25,7 @@ module ice_history_write implicit none private public :: ice_write_hist - + !======================================================================= contains diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index 503bd18ab..b2b438ebe 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -17,7 +17,7 @@ module ice_restart use ice_fileunits, only: nu_dump_lvl, nu_dump_pond, nu_dump_hbrine use ice_fileunits, only: nu_dump_iso, nu_dump_snow use ice_fileunits, only: nu_dump_bgc, nu_dump_aero, nu_dump_fsd - use ice_fileunits, only: nu_restart, nu_restart_eap, nu_restart_FY, nu_restart_age + use ice_fileunits, only: nu_restart, nu_restart_eap, nu_restart_FY, nu_restart_age use ice_fileunits, only: nu_restart_lvl, nu_restart_pond, nu_restart_hbrine use ice_fileunits, only: nu_restart_bgc, nu_restart_aero, nu_restart_fsd use ice_fileunits, only: nu_restart_iso, nu_restart_snow @@ -98,7 +98,7 @@ subroutine init_restart_read(ice_ic) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (present(ice_ic)) then + if (present(ice_ic)) then filename = trim(ice_ic) else if (my_task == master_task) then @@ -131,7 +131,7 @@ subroutine init_restart_read(ice_ic) call broadcast_scalar(time_forc,master_task) call broadcast_scalar(myear,master_task) call set_date_from_timesecs(timesecs) - + istep1 = istep0 ! Supplemental restart files @@ -464,7 +464,7 @@ subroutine init_restart_write(filename_spec) restart_file(1:lenstr(restart_file)),'.', & myear,'-',mmonth,'-',mday,'-',msec end if - + ! write pointer (path/file) if (my_task == master_task) then open(nu_rst_pointer,file=pointer_file) @@ -809,7 +809,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3, & endif end subroutine read_restart_field - + !======================================================================= ! Writes a single restart field. diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index 97bb72dab..019ab8ce9 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -9,7 +9,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -30,7 +30,7 @@ module ice_history_write implicit none private public :: ice_write_hist - + !======================================================================= contains @@ -48,7 +48,7 @@ subroutine ice_write_hist (ns) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & - histfreq, histfreq_n, days_per_year, use_leap_years, dayyr, & + histfreq, days_per_year, use_leap_years, dayyr, & hh_init, mm_init, ss_init use ice_communicate, only: my_task, master_task use ice_domain, only: distrb_info @@ -60,7 +60,10 @@ subroutine ice_write_hist (ns) lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared - use ice_restart_shared, only: runid, lcdf64 + use ice_restart_shared, only: lcdf64 +#ifdef CESMCOUPLED + use ice_restart_shared, only: runid +#endif #ifdef USE_NETCDF use netcdf #endif @@ -444,7 +447,7 @@ subroutine ice_write_hist (ns) dimidex(4)=kmtidb dimidex(5)=kmtida dimidex(6)=fmtid - + do i = 1, nvar_grdz if (igrdz(i)) then status = nf90_def_var(ncid, var_grdz(i)%short_name, & @@ -779,7 +782,7 @@ subroutine ice_write_hist (ns) work1 = ELAT*rad_to_deg call gather_global(work_g1,work1,master_task,distrb_info) END SELECT - + if (my_task == master_task) then status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & @@ -900,7 +903,7 @@ subroutine ice_write_hist (ns) call broadcast_scalar(var_nverts(i)%short_name,master_task) SELECT CASE (var_nverts(i)%short_name) CASE ('lont_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts work1(:,:,:) = lont_bounds(ivertex,:,:,:) call gather_global(work_g1, work1, master_task, distrb_info) if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index 534637bbb..f647bd96b 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -59,7 +59,7 @@ subroutine init_restart_read(ice_ic) character(len=*), parameter :: subname = '(init_restart_read)' #ifdef USE_NETCDF - if (present(ice_ic)) then + if (present(ice_ic)) then filename = trim(ice_ic) else if (my_task == master_task) then @@ -78,7 +78,7 @@ subroutine init_restart_read(ice_ic) status = nf90_open(trim(filename), nf90_nowrite, ncid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: reading restart ncfile '//trim(filename)) - + if (use_restart_time) then status1 = nf90_noerr status = nf90_get_att(ncid, nf90_global, 'istep1', istep0) @@ -262,12 +262,12 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'uvelN',dims) call define_rest_field(ncid,'vvelN',dims) endif - + if (grid_ice == 'C') then call define_rest_field(ncid,'uvelE',dims) call define_rest_field(ncid,'vvelN',dims) endif - + if (restart_coszen) call define_rest_field(ncid,'coszen',dims) call define_rest_field(ncid,'scale_factor',dims) @@ -367,11 +367,11 @@ subroutine init_restart_write(filename_spec) enddo endif if (tr_bgc_Fe ) then - do k=1,n_fed + do k=1,n_fed write(nchar,'(i3.3)') k call define_rest_field(ncid,'fed'//trim(nchar),dims) enddo - do k=1,n_fep + do k=1,n_fep write(nchar,'(i3.3)') k call define_rest_field(ncid,'fep'//trim(nchar),dims) enddo @@ -482,17 +482,17 @@ subroutine init_restart_write(filename_spec) if (tr_bgc_PON) & call define_rest_field(ncid,'bgc_PON' ,dims) if (tr_bgc_DON) then - do k = 1, n_don + do k = 1, n_don write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_DON'//trim(nchar) ,dims) enddo endif if (tr_bgc_Fe ) then - do k = 1, n_fed + do k = 1, n_fed write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_Fed'//trim(nchar) ,dims) enddo - do k = 1, n_fep + do k = 1, n_fep write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_Fep'//trim(nchar) ,dims) enddo @@ -557,7 +557,7 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'zSalinity'//trim(nchar),dims) enddo endif - + if (z_tracers) then if (tr_zaero) then do n = 1, n_zaero @@ -657,14 +657,14 @@ subroutine init_restart_write(filename_spec) enddo endif if (tr_bgc_Fe ) then - do n = 1, n_fed + do n = 1, n_fed write(ncharb,'(i3.3)') n do k = 1, nblyr+3 write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_Fed'//trim(ncharb)//trim(nchar),dims) enddo enddo - do n = 1, n_fep + do n = 1, n_fep write(ncharb,'(i3.3)') n do k = 1, nblyr+3 write(nchar,'(i3.3)') k @@ -776,7 +776,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3, & #endif end subroutine read_restart_field - + !======================================================================= ! Writes a single restart field. @@ -818,7 +818,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) #ifdef USE_NETCDF status = nf90_inq_varid(ncid,trim(vname),varid) - if (ndim3 == ncat) then + if (ndim3 == ncat) then if (restart_ext) then call ice_write_nc(ncid, 1, varid, work, diag, restart_ext, varname=trim(vname)) else @@ -892,7 +892,7 @@ subroutine define_rest_field(ncid, vname, dims) call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif - + end subroutine define_rest_field !======================================================================= diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 index 92f7663a2..6407d8c76 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 @@ -6,7 +6,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -27,7 +27,7 @@ module ice_history_write implicit none private public :: ice_write_hist - + !======================================================================= contains @@ -407,7 +407,7 @@ subroutine ice_write_hist (ns) endif if (f_bounds) then status = pio_put_att(File, varid, 'bounds', trim(coord_bounds(i))) - endif + endif enddo ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) @@ -446,14 +446,14 @@ subroutine ice_write_hist (ns) if (f_bounds) then status = pio_def_var(File, trim(var_nverts(i)%short_name), & lprecision,dimid_nverts, varid) - status = & + status = & pio_put_att(File,varid, 'long_name', trim(var_nverts(i)%long_name)) status = & pio_put_att(File, varid, 'units', trim(var_nverts(i)%units)) call ice_write_hist_fill(File,varid,var_nverts(i)%short_name,history_precision) endif enddo - + !----------------------------------------------------------------- ! define attributes for time-variant variables !----------------------------------------------------------------- @@ -507,7 +507,7 @@ subroutine ice_write_hist (ns) call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dz - + !----------------------------------------------------------------- ! 3D (biology ice layers) !----------------------------------------------------------------- @@ -690,7 +690,7 @@ subroutine ice_write_hist (ns) bnd_start = (/1,1/) bnd_length = (/2,1/) status = pio_put_var(File,varid,ival=time_bounds, & - start=bnd_start(:),count=bnd_length(:)) + start=bnd_start(:),count=bnd_length(:)) endif !----------------------------------------------------------------- @@ -738,7 +738,7 @@ subroutine ice_write_hist (ns) status = pio_inq_varid(File, var_grdz(i)%short_name, varid) SELECT CASE (var_grdz(i)%short_name) CASE ('NCAT') - status = pio_put_var(File, varid, hin_max(1:ncat_hist)) + status = pio_put_var(File, varid, hin_max(1:ncat_hist)) CASE ('NFSD') status = pio_put_var(File, varid, floe_rad_c(1:nfsd_hist)) CASE ('VGRDi') @@ -826,35 +826,35 @@ subroutine ice_write_hist (ns) do i = 1, nvar_verts SELECT CASE (var_nverts(i)%short_name) CASE ('lont_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lont_bounds(ivertex,:,:,1:nblocks) enddo CASE ('latt_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latt_bounds(ivertex,:,:,1:nblocks) enddo CASE ('lonu_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lonu_bounds(ivertex,:,:,1:nblocks) enddo CASE ('latu_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) enddo CASE ('lonn_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) enddo CASE ('latn_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) enddo CASE ('lone_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) enddo CASE ('late_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) enddo END SELECT diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 index d4149f7bf..b242f542b 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 @@ -10,7 +10,7 @@ module ice_pio use ice_communicate use ice_domain, only : nblocks, blocks_ice use ice_domain_size - use ice_fileunits + use ice_fileunits use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use pio @@ -52,7 +52,7 @@ subroutine ice_pio_init(mode, filename, File, clobber, cdf64, iotype) use perf_mod, only : t_initf #endif #endif - + implicit none character(len=*) , intent(in), optional :: mode character(len=*) , intent(in), optional :: filename @@ -140,14 +140,14 @@ subroutine ice_pio_init(mode, filename, File, clobber, cdf64, iotype) #endif if (present(mode) .and. present(filename) .and. present(File)) then - + if (trim(mode) == 'write') then lclobber = .false. if (present(clobber)) lclobber=clobber - + lcdf64 = .false. if (present(cdf64)) lcdf64=cdf64 - + if (File%fh<0) then ! filename not open inquire(file=trim(filename),exist=exists) @@ -178,7 +178,7 @@ subroutine ice_pio_init(mode, filename, File, clobber, cdf64, iotype) ! filename is already open, just return endif end if - + if (trim(mode) == 'read') then inquire(file=trim(filename),exist=exists) if (exists) then @@ -205,7 +205,7 @@ subroutine ice_pio_initdecomp_2d(iodesc, precision) integer (kind=int_kind) :: & iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k - type(block) :: this_block + type(block) :: this_block integer(kind=int_kind), pointer :: dof2d(:) integer(kind=int_kind) :: lprecision @@ -218,12 +218,12 @@ subroutine ice_pio_initdecomp_2d(iodesc, precision) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j=1,ny_block do i=1,nx_block n = n+1 @@ -249,7 +249,7 @@ subroutine ice_pio_initdecomp_2d(iodesc, precision) endif deallocate(dof2d) - + end subroutine ice_pio_initdecomp_2d !================================================================================ @@ -261,9 +261,9 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) logical, optional :: remap integer(kind=int_kind), optional, intent(in) :: precision integer (kind=int_kind) :: & - iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k + iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k - type(block) :: this_block + type(block) :: this_block logical :: lremap integer(kind=int_kind), pointer :: dof3d(:) integer(kind=int_kind) :: lprecision @@ -279,12 +279,12 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) ! Reorder the ndim3 and nblocks loops to avoid a temporary array in restart read/write n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - do k=1,ndim3 + do k=1,ndim3 do j=1,ny_block do i=1,nx_block n = n+1 @@ -295,7 +295,7 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) else lon = this_block%i_glob(i) lat = this_block%j_glob(j) - dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global + dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global endif enddo !i enddo !j @@ -303,9 +303,9 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) enddo ! iblk else n=0 - do k=1,ndim3 + do k=1,ndim3 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -320,7 +320,7 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) else lon = this_block%i_glob(i) lat = this_block%j_glob(j) - dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global + dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global endif enddo !i enddo !j @@ -350,9 +350,9 @@ subroutine ice_pio_initdecomp_3d_inner(ndim3, inner_dim, iodesc, precision) integer(kind=int_kind), optional, intent(in) :: precision integer (kind=int_kind) :: & - iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k + iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k - type(block) :: this_block + type(block) :: this_block integer(kind=int_kind), pointer :: dof3d(:) integer(kind=int_kind) :: lprecision @@ -365,12 +365,12 @@ subroutine ice_pio_initdecomp_3d_inner(ndim3, inner_dim, iodesc, precision) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j=1,ny_block do i=1,nx_block do k=1,ndim3 @@ -410,9 +410,9 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) integer(kind=int_kind), optional, intent(in) :: precision integer (kind=int_kind) :: & - iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k,l + iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k,l - type(block) :: this_block + type(block) :: this_block integer(kind=int_kind), pointer :: dof4d(:) integer(kind=int_kind) :: lprecision @@ -427,12 +427,12 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) do l=1,ndim4 do k=1,ndim3 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j=1,ny_block do i=1,nx_block n = n+1 @@ -444,8 +444,8 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) lon = this_block%i_glob(i) lat = this_block%j_glob(j) dof4d(n) = ((lat-1)*nx_global + lon) & - + (k-1)*nx_global*ny_global & - + (l-1)*nx_global*ny_global*ndim3 + + (k-1)*nx_global*ny_global & + + (l-1)*nx_global*ny_global*ndim3 endif enddo !i enddo !j @@ -464,7 +464,7 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) deallocate(dof4d) end subroutine ice_pio_initdecomp_4d - + !================================================================================ end module ice_pio diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 03a1fd07f..1124cc048 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -61,7 +61,7 @@ subroutine init_restart_read(ice_ic) character(len=*), parameter :: subname = '(init_restart_read)' - if (present(ice_ic)) then + if (present(ice_ic)) then filename = trim(ice_ic) else if (my_task == master_task) then @@ -83,7 +83,7 @@ subroutine init_restart_read(ice_ic) if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF File%fh=-1 call ice_pio_init(mode='read', filename=trim(filename), File=File, iotype=iotype) - + call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat,remap=.true., precision=8) @@ -122,7 +122,7 @@ subroutine init_restart_read(ice_ic) ! call broadcast_scalar(time,master_task) ! call broadcast_scalar(time_forc,master_task) call broadcast_scalar(myear,master_task) - + istep1 = istep0 ! if runid is bering then need to correct npt for istep0 @@ -219,7 +219,7 @@ subroutine init_restart_write(filename_spec) restart_file(1:lenstr(restart_file)),'.', & myear,'-',mmonth,'-',mday,'-',msec end if - + if (restart_format(1:3) /= 'bin') filename = trim(filename) // '.nc' ! write pointer (path/file) @@ -230,7 +230,7 @@ subroutine init_restart_write(filename_spec) endif ! if (restart_format(1:3) == 'pio') then - + iotype = PIO_IOTYPE_NETCDF if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF File%fh=-1 @@ -807,14 +807,14 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif - + endif ! else ! call abort_ice(subname//"ERROR: Invalid restart_format: "//trim(restart_format)) ! endif ! restart_format end subroutine read_restart_field - + !======================================================================= ! Writes a single restart field. @@ -862,10 +862,10 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) write(nu_diag,*)'Parallel restart file write: ',vname status = pio_inq_varid(File,trim(vname),vardesc) - + status = pio_inq_varndims(File, vardesc, ndims) - if (ndims==3) then + if (ndims==3) then call pio_write_darray(File, vardesc, iodesc3d_ncat,work(:,:,:,1:nblocks), & status, fillval=c0) elseif (ndims == 2) then @@ -937,7 +937,7 @@ subroutine define_rest_field(File, vname, dims) character(len=*), parameter :: subname = '(define_rest_field)' status = pio_def_var(File,trim(vname),pio_double,dims,vardesc) - + end subroutine define_rest_field !======================================================================= diff --git a/cicecore/drivers/direct/hadgem3/CICE.F90 b/cicecore/drivers/direct/hadgem3/CICE.F90 index b0176e801..fe322a04d 100644 --- a/cicecore/drivers/direct/hadgem3/CICE.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE.F90 @@ -1,21 +1,21 @@ !======================================================================= ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! !======================================================================= ! ! Main driver routine for CICE. Initializes and steps through the model. @@ -48,7 +48,7 @@ program icemodel call CICE_Run !----------------------------------------------------------------- - ! Finalize CICE + ! Finalize CICE !----------------------------------------------------------------- call CICE_Finalize diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index fb39375b4..87dc8d9a1 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -176,11 +176,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(subname, & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -215,7 +215,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -297,7 +297,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -308,17 +308,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -329,7 +329,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -340,7 +340,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -354,7 +354,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -368,7 +368,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -383,7 +383,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -404,7 +404,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index c269ab382..91f7985bd 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -46,14 +46,14 @@ subroutine CICE_Run use ice_calendar, only: stop_now, advance_timestep use ice_forcing, only: get_forcing_atmo, get_forcing_ocn, & get_wave_spec - use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & faero_default use ice_flux, only: init_flux_atm, init_flux_ocn use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_couple, timer_step logical (kind=log_kind) :: & tr_aero, tr_zaero, skl_bgc, z_tracers, wave_spec, tr_fsd - + character(len=*), parameter :: subname = '(CICE_Run)' !-------------------------------------------------------------------- @@ -118,7 +118,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -163,7 +163,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -238,7 +238,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics call biogeochemistry (dt, iblk) ! biogeochemistry call step_therm2 (dt, iblk) ! ice thickness distribution thermo @@ -341,7 +341,7 @@ subroutine ice_step if (tr_fsd) call write_restart_fsd if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -350,7 +350,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -380,12 +380,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -466,7 +466,7 @@ subroutine coupling_prep (iblk) do j = jlo, jhi do i = ilo, ihi if (aicen(i,j,n,iblk) > puny) then - + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) alidf(i,j,iblk) = alidf(i,j,iblk) & @@ -491,7 +491,7 @@ subroutine coupling_prep (iblk) + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) - + endif ! aicen > puny enddo enddo @@ -521,8 +521,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -543,7 +543,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -568,21 +568,21 @@ subroutine coupling_prep (iblk) alvdf (:,:,iblk), alidf (:,:,iblk), & fzsal (:,:,iblk), fzsal_g (:,:,iblk), & flux_bio(:,:,1:nbtrcr,iblk)) - + !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod call ice_timer_stop(timer_couple) ! atm/ocn coupling @@ -592,10 +592,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -629,7 +629,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! @@ -656,7 +656,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & enddo ! j enddo ! n -#endif +#endif end subroutine sfcflux_to_ocn diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index fb39375b4..87dc8d9a1 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -176,11 +176,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(subname, & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -215,7 +215,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -297,7 +297,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -308,17 +308,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -329,7 +329,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -340,7 +340,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -354,7 +354,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -368,7 +368,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -383,7 +383,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -404,7 +404,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index 272174fe7..ea6a65165 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -46,14 +46,14 @@ subroutine CICE_Run use ice_calendar, only: stop_now, advance_timestep use ice_forcing, only: get_forcing_atmo, get_forcing_ocn, & get_wave_spec - use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & faero_default use ice_flux, only: init_flux_atm, init_flux_ocn use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_couple, timer_step logical (kind=log_kind) :: & tr_aero, tr_zaero, skl_bgc, z_tracers, wave_spec, tr_fsd - + character(len=*), parameter :: subname = '(CICE_Run)' !-------------------------------------------------------------------- @@ -118,7 +118,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -163,7 +163,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -238,7 +238,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics call biogeochemistry (dt, iblk) ! biogeochemistry call step_therm2 (dt, iblk) ! ice thickness distribution thermo @@ -341,7 +341,7 @@ subroutine ice_step if (tr_fsd) call write_restart_fsd if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -350,7 +350,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -380,12 +380,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -466,7 +466,7 @@ subroutine coupling_prep (iblk) do j = jlo, jhi do i = ilo, ihi if (aicen(i,j,n,iblk) > puny) then - + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) alidf(i,j,iblk) = alidf(i,j,iblk) & @@ -491,7 +491,7 @@ subroutine coupling_prep (iblk) + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) - + endif ! aicen > puny enddo enddo @@ -521,8 +521,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -543,7 +543,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -570,21 +570,21 @@ subroutine coupling_prep (iblk) alvdf (:,:,iblk), alidf (:,:,iblk), & fzsal (:,:,iblk), fzsal_g (:,:,iblk), & flux_bio(:,:,1:nbtrcr,iblk)) - + !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod call ice_timer_stop(timer_couple) ! atm/ocn coupling @@ -594,10 +594,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -631,7 +631,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! @@ -658,7 +658,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & enddo ! j enddo ! n -#endif +#endif end subroutine sfcflux_to_ocn diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index b33886954..cfc5bece9 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -193,11 +193,11 @@ subroutine cice_init(mpicom_ice) if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -241,7 +241,7 @@ subroutine cice_init(mpicom_ice) call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') @@ -332,7 +332,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -343,17 +343,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -364,7 +364,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -375,7 +375,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -389,7 +389,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -403,7 +403,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -418,7 +418,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -469,7 +469,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index 80ff3bd46..b96086c6d 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -110,7 +110,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -157,7 +157,7 @@ subroutine ice_step use ice_prescribed_mod integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -413,7 +413,7 @@ subroutine ice_step if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -421,7 +421,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -456,12 +456,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -599,8 +599,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -621,7 +621,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -652,7 +652,7 @@ subroutine coupling_prep (iblk) fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk), & Uref=Uref(:,:,iblk), wind=wind(:,:,iblk) ) - + if (nbtrcr > 0 .or. skl_bgc) then call bgcflux_ice_to_ocn (nx_block, ny_block, & flux_bio(:,:,1:nbtrcr,iblk), & @@ -669,16 +669,16 @@ subroutine coupling_prep (iblk) if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling @@ -688,10 +688,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -724,7 +724,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/mct/cesm1/CICE_copyright.txt b/cicecore/drivers/mct/cesm1/CICE_copyright.txt index 6eb3c9cca..14d97c481 100644 --- a/cicecore/drivers/mct/cesm1/CICE_copyright.txt +++ b/cicecore/drivers/mct/cesm1/CICE_copyright.txt @@ -1,17 +1,17 @@ ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! diff --git a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 index b0a78bfcd..454895410 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 @@ -56,7 +56,7 @@ module ice_comp_esmf use ice_timers use ice_kinds_mod, only : int_kind, dbl_kind, char_len_long, log_kind - use ice_boundary, only : ice_HaloUpdate + use ice_boundary, only : ice_HaloUpdate use ice_scam, only : scmlat, scmlon, single_column use ice_fileunits, only : nu_diag, inst_index, inst_name, inst_suffix, & release_all_fileunits @@ -89,7 +89,7 @@ module ice_comp_esmf ! ! !PRIVATE VARIABLES - integer (kind=int_kind) :: ICEID + integer (kind=int_kind) :: ICEID type(mct_gGrid) :: dom_i type(mct_gsMap) :: gsMap_i @@ -140,7 +140,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! ! !DESCRIPTION: ! Initialize thermodynamic ice model and obtain relevant atmospheric model -! arrays back from driver +! arrays back from driver ! ! !USES: @@ -168,7 +168,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) integer :: xoff,yoff integer :: nxg,nyg integer :: k, iblk - + character(len=256) :: drvarchdir ! driver archive directory character(len=32) :: starttype ! infodata start type integer :: start_ymd ! Start date (YYYYMMDD) @@ -207,7 +207,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ice_cpl_indices_set() - ! duplicate the mpi communicator from the current VM + ! duplicate the mpi communicator from the current VM call ESMF_VMGetCurrent(vm, rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -218,7 +218,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) if(rc /= 0) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) ! Initialize cice id - + call ESMF_AttributeGet(export_state, name="ID", value=ICEID, rc=rc) if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -324,14 +324,14 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- - ! - on restart run + ! - on restart run ! - istep0, time and time_forc are read from restart file ! - istep1 is set to istep0 ! - idate is determined from time via the call to calendar (see below) - ! - on initial run + ! - on initial run ! - iyear, month and mday obtained from sync clock ! - time determined from iyear, month and mday - ! - istep0 and istep1 are set to 0 + ! - istep0 and istep1 are set to 0 call seq_timemgr_EClockGetData(EClock, & start_ymd=start_ymd, start_tod=start_tod, & @@ -384,7 +384,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call calendar(time) ! update calendar info if (write_ic) call accum_hist(dt) ! write initial conditions - + !--------------------------------------------------------------------------- ! Initialize MCT attribute vectors and indices !--------------------------------------------------------------------------- @@ -413,12 +413,12 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) !----------------------------------------- ! Set arrayspec for dom, l2x and x2l !----------------------------------------- - + call ESMF_ArraySpecSet(arrayspec, rank=2, typekind=ESMF_TYPEKIND_R8, rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) !----------------------------------------- - ! Create dom + ! Create dom !----------------------------------------- nfields = shr_string_listGetNum(trim(seq_flds_dom_fields)) @@ -430,11 +430,11 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ESMF_AttributeSet(dom, name="mct_names", value=trim(seq_flds_dom_fields), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - ! Set values of dom + ! Set values of dom call ice_domain_esmf(dom) - !----------------------------------------- - ! Create i2x + !----------------------------------------- + ! Create i2x !----------------------------------------- ! 1d undistributed index of fields, 2d is packed data @@ -447,9 +447,9 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ESMF_AttributeSet(i2x, name="mct_names", value=trim(seq_flds_i2x_fields), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - - !----------------------------------------- - ! Create x2i + + !----------------------------------------- + ! Create x2i !----------------------------------------- nfields = shr_string_listGetNum(trim(seq_flds_x2i_fields)) @@ -461,16 +461,16 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ESMF_AttributeSet(x2i, name="mct_names", value=trim(seq_flds_x2i_fields), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - !----------------------------------------- - ! Add esmf arrays to import and export state !----------------------------------------- - + ! Add esmf arrays to import and export state + !----------------------------------------- + call ESMF_StateAdd(export_state, (/dom/), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) call ESMF_StateAdd(export_state, (/i2x/), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - + call ESMF_StateAdd(import_state, (/x2i/), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -575,7 +575,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! Error check if ((tr_aero .and. .not. atm_aero) .or. (tr_zaero .and. .not. atm_aero)) then - write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero or tr_zaero' + write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero or tr_zaero' call shr_sys_abort() end if @@ -596,7 +596,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! write(shrlogunit,105) trim(subname)//' memory_write: model date = ',start_ymd,start_tod, & ! ' memory = ',msize0,' MB (highwater) ',mrss0,' MB (usage)' ! endif - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_init_esmf @@ -668,7 +668,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call shr_file_getLogUnit (shrlogunit) call shr_file_getLogLevel(shrloglev) call shr_file_setLogUnit (nu_diag) - + ! Determine time of next atmospheric shortwave calculation call ESMF_AttributeGet(export_state, name="nextsw_cday", value=nextsw_cday, rc=rc) @@ -706,7 +706,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) !------------------------------------------------------------------- ! get import state !------------------------------------------------------------------- - + call t_barrierf('cice_run_import_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_import') @@ -724,7 +724,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) endif call ice_timer_stop(timer_cplrecv) call t_stopf ('cice_run_import') - + !-------------------------------------------------------------------- ! timestep update !-------------------------------------------------------------------- @@ -732,9 +732,9 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call CICE_Run() !----------------------------------------------------------------- - ! send export state to driver + ! send export state to driver !----------------------------------------------------------------- - + call t_barrierf('cice_run_export_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_export') call ice_timer_start(timer_cplsend) @@ -752,7 +752,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) endif call ice_timer_stop(timer_cplsend) call t_stopf ('cice_run_export') - + !-------------------------------------------------------------------- ! check that internal clock is in sync with master clock !-------------------------------------------------------------------- @@ -767,7 +767,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call shr_sys_abort( SubName// & ":: Internal sea-ice clock not in sync with Sync Clock") end if - + ! reset shr logging to my original values call shr_file_setLogUnit (shrlogunit) @@ -776,9 +776,9 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) !------------------------------------------------------------------- ! stop timers and print timer info !------------------------------------------------------------------- - ! Need to have this logic here instead of in ice_final_esmf since + ! Need to have this logic here instead of in ice_final_esmf since ! the ice_final_esmf.F90 will still be called even in aqua-planet mode - ! Could put this logic in the driver - but it seems easier here + ! Could put this logic in the driver - but it seems easier here ! Need to stop this at the end of every run phase in a coupled run. call ice_timer_stop(timer_total) ! stop timing @@ -788,7 +788,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call ice_timer_print_all(stats=.true.) ! print timing information call release_all_fileunits end if - + ! if(tod == 0) then ! call shr_get_memusage(msize,mrss) ! call shr_mpi_max(mrss, mrss0, MPI_COMM_ICE,trim(subname)//' mrss0') @@ -799,7 +799,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) ! endif ! endif call t_stopf ('cice_run_total') - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_run_esmf @@ -881,12 +881,12 @@ function ice_distgrid_esmf(gsize) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -902,12 +902,12 @@ function ice_distgrid_esmf(gsize) allocate(gindex(lsize)) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -918,7 +918,7 @@ function ice_distgrid_esmf(gsize) enddo !i enddo !j enddo !iblk - + ice_distgrid_esmf = ESMF_DistGridCreate(arbSeqIndexList=gindex, rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -969,17 +969,17 @@ subroutine ice_domain_esmf( dom ) fptr(:,:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 - fptr(klon, n) = TLON(i,j,iblk)*rad_to_deg - fptr(klat, n) = TLAT(i,j,iblk)*rad_to_deg + fptr(klon, n) = TLON(i,j,iblk)*rad_to_deg + fptr(klat, n) = TLAT(i,j,iblk)*rad_to_deg fptr(karea, n) = tarea(i,j,iblk)/(radius*radius) fptr(kmask, n) = real(nint(hm(i,j,iblk)),kind=dbl_kind) if (trim(grid_type) == 'latlon') then diff --git a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 index d663d0f97..a1d1a2ad1 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 @@ -53,7 +53,7 @@ module ice_comp_mct use ice_timers use ice_kinds_mod, only : int_kind, dbl_kind, char_len_long, log_kind - use ice_boundary, only : ice_HaloUpdate + use ice_boundary, only : ice_HaloUpdate use ice_scam, only : scmlat, scmlon, single_column use ice_fileunits, only : nu_diag, inst_index, inst_name, inst_suffix, & release_all_fileunits @@ -90,7 +90,7 @@ module ice_comp_mct ! ! !PRIVATE VARIABLES - integer (kind=int_kind) :: ICEID + integer (kind=int_kind) :: ICEID !--- for coupling on other grid from gridcpl_file --- type(mct_gsMap) :: gsMap_iloc ! local gsmaps @@ -115,7 +115,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! ! !DESCRIPTION: ! Initialize thermodynamic ice model and obtain relevant atmospheric model -! arrays back from driver +! arrays back from driver ! ! !USES: @@ -139,7 +139,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) integer :: xoff,yoff integer :: nxg,nyg integer :: k, iblk - + type(mct_gsMap) :: gsmap_extend ! local gsmaps character(len=256) :: drvarchdir ! driver archive directory @@ -240,7 +240,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) scmlat = -999. scmlon = -999. - call seq_infodata_GetData( infodata, case_name=runid , & + call seq_infodata_GetData( infodata, case_name=runid , & single_column=single_column ,scmlat=scmlat,scmlon=scmlon) call seq_infodata_GetData( infodata, start_type=starttype) @@ -296,13 +296,13 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- - ! - on restart run + ! - on restart run ! - istep0, time and time_forc are read from restart file ! - istep1 is set to istep0 ! - date information is determined from restart - ! - on initial run + ! - on initial run ! - myear, mmonth, mday, msec obtained from sync clock - ! - istep0 and istep1 are set to 0 + ! - istep0 and istep1 are set to 0 call seq_timemgr_EClockGetData(EClock, & start_ymd=start_ymd, start_tod=start_tod, & @@ -352,7 +352,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) call calendar ! update calendar info if (write_ic) call accum_hist(dt) ! write initial conditions - + !--------------------------------------------------------------------------- ! Initialize MCT attribute vectors and indices !--------------------------------------------------------------------------- @@ -362,22 +362,22 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! Initialize ice gsMap if (trim(gridcpl_file) == 'unknown_gridcpl_file') then - call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_ice ) + call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_ice ) lsize = mct_gsMap_lsize(gsMap_ice, MPI_COMM_ICE) call ice_domain_mct( lsize, gsMap_ice, dom_i ) other_cplgrid = .false. nxg = nx_global nyg = ny_global else - call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_iloc ) + call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_iloc ) lsize_loc = mct_gsMap_lsize(gsMap_iloc, MPI_COMM_ICE) call ice_domain_mct( lsize_loc, gsMap_iloc, dom_iloc ) - + call ice_setcoupling_mct(MPI_COMM_ICE, ICEID, gsmap_ice, dom_i) call ice_coffset_mct(xoff,yoff,gsmap_iloc,dom_iloc,gsmap_ice,dom_i,MPI_COMM_ICE) call mct_gsmap_clean(gsmap_ice) call mct_gGrid_clean(dom_i) - + call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, gsmap_extend, xoff, yoff, nxcpl, nycpl) if (lsize_loc /= mct_gsmap_lsize(gsmap_extend,MPI_COMM_ICE)) then write(nu_diag,*) subname,' :: gsmap_extend extended ',lsize_loc, & @@ -398,7 +398,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) call mct_aVect_init(i2x_iloc, rList=seq_flds_i2x_fields, lsize=lsize_loc) call mct_aVect_zero(i2x_iloc) call mct_gsmap_clean(gsmap_extend) - + other_cplgrid = .true. nxg = nxcpl nyg = nycpl @@ -409,7 +409,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) call mct_aVect_init(x2i_i, rList=seq_flds_x2i_fields, lsize=lsize) call mct_aVect_zero(x2i_i) - call mct_aVect_init(i2x_i, rList=seq_flds_i2x_fields, lsize=lsize) + call mct_aVect_init(i2x_i, rList=seq_flds_i2x_fields, lsize=lsize) call mct_aVect_zero(i2x_i) !----------------------------------------------------------------- @@ -448,7 +448,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! Error check if ((tr_aero .or. tr_zaero) .and. .not. atm_aero) then - write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero/tr_zaero' + write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero/tr_zaero' call shr_sys_abort() end if @@ -469,7 +469,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! write(shrlogunit,105) trim(subname)//': memory_write: model date = ',start_ymd,start_tod, & ! ' memory = ',msize0,' MB (highwater) ',mrss0,' MB (usage)' ! endif - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_init_mct @@ -514,7 +514,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) integer :: lbnum integer :: n, myearp type(mct_gGrid) , pointer :: dom_i - type(seq_infodata_type), pointer :: infodata + type(seq_infodata_type), pointer :: infodata type(mct_gsMap) , pointer :: gsMap_i real(r8) :: eccen, obliqr, lambm0, mvelpp character(len=char_len_long) :: fname @@ -542,7 +542,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call shr_file_getLogUnit (shrlogunit) call shr_file_getLogLevel(shrloglev) call shr_file_setLogUnit (nu_diag) - + call seq_cdata_setptrs(cdata_i, infodata=infodata, dom=dom_i, & gsMap=gsMap_i) @@ -577,7 +577,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) !------------------------------------------------------------------- ! get import state !------------------------------------------------------------------- - + call t_barrierf('cice_run_import_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_import') call ice_timer_start(timer_cplrecv) @@ -589,7 +589,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) endif call ice_timer_stop(timer_cplrecv) call t_stopf ('cice_run_import') - + !-------------------------------------------------------------------- ! timestep update !-------------------------------------------------------------------- @@ -597,9 +597,9 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call CICE_Run() !----------------------------------------------------------------- - ! send export state to driver + ! send export state to driver !----------------------------------------------------------------- - + call t_barrierf('cice_run_export_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_export') call ice_timer_start(timer_cplsend) @@ -612,7 +612,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) endif call ice_timer_stop(timer_cplsend) call t_stopf ('cice_run_export') - + !-------------------------------------------------------------------- ! check that internal clock is in sync with master clock !-------------------------------------------------------------------- @@ -627,7 +627,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call shr_sys_abort( SubName// & ":: Internal sea-ice clock not in sync with Sync Clock") end if - + ! reset shr logging to my original values call shr_file_setLogUnit (shrlogunit) @@ -636,9 +636,9 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) !------------------------------------------------------------------- ! stop timers and print timer info !------------------------------------------------------------------- - ! Need to have this logic here instead of in ice_final_mct since + ! Need to have this logic here instead of in ice_final_mct since ! the ice_final_mct.F90 will still be called even in aqua-planet mode - ! Could put this logic in the driver - but it seems easier here + ! Could put this logic in the driver - but it seems easier here ! Need to stop this at the end of every run phase in a coupled run. call ice_timer_stop(timer_total) ! stop timing @@ -648,7 +648,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call ice_timer_print_all(stats=.true.) ! print timing information call release_all_fileunits end if - + ! if(tod == 0) then ! call shr_get_memusage(msize,mrss) ! call shr_mpi_max(mrss, mrss0, MPI_COMM_ICE,trim(subname)//' mrss0') @@ -659,7 +659,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) ! endif ! endif call t_stopf ('cice_run_total') - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_run_mct @@ -754,12 +754,12 @@ subroutine ice_SetGSMap_mct( mpicom, ID, gsMap_ice, xoff, yoff, nxgin, nygin ) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -771,12 +771,12 @@ subroutine ice_SetGSMap_mct( mpicom, ID, gsMap_ice, xoff, yoff, nxgin, nygin ) allocate(gindex(lsize),stat=ier) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -787,7 +787,7 @@ subroutine ice_SetGSMap_mct( mpicom, ID, gsMap_ice, xoff, yoff, nxgin, nygin ) enddo !i enddo !j enddo !iblk - + call mct_gsMap_init( gsMap_ice, gindex, mpicom, ID, lsize, gsize ) deallocate(gindex) @@ -802,7 +802,7 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) ! integer , intent(in) :: lsize type(mct_gsMap), intent(in) :: gsMap_i - type(mct_ggrid), intent(inout) :: dom_i + type(mct_ggrid), intent(inout) :: dom_i ! ! Local Variables ! @@ -824,7 +824,7 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) call mct_gGrid_init( GGrid=dom_i, CoordChars=trim(seq_flds_dom_coord), & OtherChars=trim(seq_flds_dom_other), lsize=lsize ) call mct_aVect_zero(dom_i%data) - ! + ! allocate(data(lsize)) ! ! Determine global gridpoint number attribute, GlobGridNum, which is set automatically by MCT @@ -835,63 +835,63 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) ! Determine domain (numbering scheme is: West to East and South to North to South pole) ! Initialize attribute vector with special value ! - data(:) = -9999.0_R8 - call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) - data(:) = 0.0_R8 - call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) - call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) + data(:) = -9999.0_R8 + call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) + data(:) = 0.0_R8 + call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) + call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) ! ! Fill in correct values for domain components ! - data(:) = -9999.0_R8 + data(:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 - data(n) = TLON(i,j,iblk)*rad_to_deg + data(n) = TLON(i,j,iblk)*rad_to_deg enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"lon",data,lsize) + call mct_gGrid_importRattr(dom_i,"lon",data,lsize) - data(:) = -9999.0_R8 + data(:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 - data(n) = TLAT(i,j,iblk)*rad_to_deg + data(n) = TLAT(i,j,iblk)*rad_to_deg enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"lat",data,lsize) + call mct_gGrid_importRattr(dom_i,"lat",data,lsize) - data(:) = -9999.0_R8 + data(:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -899,17 +899,17 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"area",data,lsize) + call mct_gGrid_importRattr(dom_i,"area",data,lsize) data(:) = 0.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -917,17 +917,17 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"mask",data,lsize) + call mct_gGrid_importRattr(dom_i,"mask",data,lsize) data(:) = 0.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -939,7 +939,7 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"frac",data,lsize) + call mct_gGrid_importRattr(dom_i,"frac",data,lsize) deallocate(data) deallocate(idata) @@ -948,7 +948,7 @@ end subroutine ice_domain_mct !======================================================================= - subroutine ice_setdef_mct( i2x_i ) + subroutine ice_setdef_mct( i2x_i ) !----------------------------------------------------- type(mct_aVect) , intent(inout) :: i2x_i @@ -1196,7 +1196,7 @@ subroutine ice_setcoupling_mct(mpicom_i, ICEID, gsmap_i, dom_i, gsmap_base) start(1) = 1 pe_loc(1) = 0 - do n = 2,npes + do n = 2,npes pe_loc(n) = n-1 start(n) = start(n-1) + length(n-1) enddo @@ -1231,14 +1231,14 @@ subroutine ice_setcoupling_mct(mpicom_i, ICEID, gsmap_i, dom_i, gsmap_base) ! Initialize attribute vector with special value allocate(data(lsize)) - data(:) = -9999.0_R8 - call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) - data(:) = 0.0_R8 - call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) - call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) + data(:) = -9999.0_R8 + call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) + data(:) = 0.0_R8 + call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) + call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) deallocate(data) ! Read domain arrays diff --git a/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 b/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 index 9e549a5ca..6c04271d2 100644 --- a/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 +++ b/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 @@ -1,5 +1,5 @@ module ice_cpl_indices - + use seq_flds_mod use mct_mod @@ -7,25 +7,25 @@ module ice_cpl_indices public ! By default make data private - ! ice -> drv + ! ice -> drv integer :: index_i2x_Si_ifrac ! fractional ice coverage wrt ocean integer :: index_i2x_Si_snowh ! snow height (m) - integer :: index_i2x_Si_t ! temperature - integer :: index_i2x_Si_tref ! 2m reference temperature - integer :: index_i2x_Si_qref ! 2m reference specific humidity + integer :: index_i2x_Si_t ! temperature + integer :: index_i2x_Si_tref ! 2m reference temperature + integer :: index_i2x_Si_qref ! 2m reference specific humidity integer :: index_i2x_Si_logz0 ! surface roughness length (m) - integer :: index_i2x_Si_avsdr ! albedo: visible, direct - integer :: index_i2x_Si_avsdf ! albedo: near ir, direct - integer :: index_i2x_Si_anidr ! albedo: visible, diffuse - integer :: index_i2x_Si_anidf ! albedo: near ir, diffuse + integer :: index_i2x_Si_avsdr ! albedo: visible, direct + integer :: index_i2x_Si_avsdf ! albedo: near ir, direct + integer :: index_i2x_Si_anidr ! albedo: visible, diffuse + integer :: index_i2x_Si_anidf ! albedo: near ir, diffuse integer :: index_i2x_Si_u10 ! 10m wind - integer :: index_i2x_Faii_lwup ! upward longwave heat flux - integer :: index_i2x_Faii_lat ! latent heat flux - integer :: index_i2x_Faii_sen ! sensible heat flux - integer :: index_i2x_Faii_evap ! evaporation water flux - integer :: index_i2x_Faii_taux ! wind stress, zonal - integer :: index_i2x_Faii_tauy ! wind stress, meridional + integer :: index_i2x_Faii_lwup ! upward longwave heat flux + integer :: index_i2x_Faii_lat ! latent heat flux + integer :: index_i2x_Faii_sen ! sensible heat flux + integer :: index_i2x_Faii_evap ! evaporation water flux + integer :: index_i2x_Faii_taux ! wind stress, zonal + integer :: index_i2x_Faii_tauy ! wind stress, meridional integer :: index_i2x_Faii_swnet ! sw: net integer :: index_i2x_Fioi_swpen ! sw: net penetrating ice integer :: index_i2x_Fioi_melth ! heat flux from melting ice (<0) @@ -76,14 +76,14 @@ module ice_cpl_indices integer :: index_x2i_So_dhdx ! ocn surface slope, zonal integer :: index_x2i_So_dhdy ! ocn surface slope, meridional integer :: index_x2i_Faxa_lwdn ! downward lw heat flux - integer :: index_x2i_Faxa_rain ! prec: liquid - integer :: index_x2i_Faxa_snow ! prec: frozen + integer :: index_x2i_Faxa_rain ! prec: liquid + integer :: index_x2i_Faxa_snow ! prec: frozen integer :: index_x2i_Faxa_swndr ! sw: nir direct downward integer :: index_x2i_Faxa_swvdr ! sw: vis direct downward integer :: index_x2i_Faxa_swndf ! sw: nir diffuse downward integer :: index_x2i_Faxa_swvdf ! sw: vis diffuse downward integer :: index_x2i_Faxa_swnet ! sw: net - integer :: index_x2i_Fioo_q ! ocn freeze or melt heat + integer :: index_x2i_Fioo_q ! ocn freeze or melt heat integer :: index_x2i_Faxa_bcphidry ! flux: Black Carbon hydrophilic dry deposition integer :: index_x2i_Faxa_bcphodry ! flux: Black Carbon hydrophobic dry deposition integer :: index_x2i_Faxa_bcphiwet ! flux: Black Carbon hydrophilic wet deposition diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 3acf9bdaa..7ac4f0bb7 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -30,7 +30,7 @@ module ice_import_export use ice_domain_size , only: nx_global, ny_global, block_size_x, block_size_y, max_blocks use ice_grid , only: tlon, tlat, tarea, tmask, anglet, hm use ice_grid , only: grid_type, grid_average_X2Y - use ice_boundary , only: ice_HaloUpdate + use ice_boundary , only: ice_HaloUpdate use ice_communicate , only: my_task, master_task, MPI_COMM_ICE, get_num_procs use ice_calendar , only: istep, istep1, diagfreq use ice_fileunits , only: nu_diag @@ -66,11 +66,11 @@ subroutine ice_import( x2i ) integer,parameter :: nflds=17,nfldv=6,nfldb=27 real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) real (kind=dbl_kind) :: workx, worky - real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP + real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP character(len=char_len) :: tfrz_option logical (kind=log_kind) :: modal_aero, z_tracers, skl_bgc logical (kind=log_kind) :: tr_aero, tr_iage, tr_FY, tr_pond - logical (kind=log_kind) :: tr_lvl, tr_zaero, tr_bgc_Nit + logical (kind=log_kind) :: tr_lvl, tr_zaero, tr_bgc_Nit real (kind=dbl_kind) :: tffresh character(len=*), parameter :: subname = '(ice_import)' !----------------------------------------------------- @@ -102,7 +102,7 @@ subroutine ice_import( x2i ) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -250,7 +250,7 @@ subroutine ice_import( x2i ) deallocate(aflds) !------------------------------------------------------- - ! Set aerosols from coupler + ! Set aerosols from coupler !------------------------------------------------------- allocate(aflds(nx_block,ny_block,nfldb,nblocks)) @@ -258,7 +258,7 @@ subroutine ice_import( x2i ) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -310,7 +310,7 @@ subroutine ice_import( x2i ) aflds(i,j,7,iblk) = x2i(index_x2i_So_doc, n) * p5 ! split evenly for now aflds(i,j,8,iblk) = x2i(index_x2i_So_doc, n) * p5 !x2i(index_x2i_So_doc2, n) aflds(i,j,9,iblk) = c0 - aflds(i,j,10,iblk) = c0 !x2i(index_x2i_So_dic, n) + aflds(i,j,10,iblk) = c0 !x2i(index_x2i_So_dic, n) aflds(i,j,11,iblk) = x2i(index_x2i_So_don, n) aflds(i,j,12,iblk) = x2i(index_x2i_So_no3, n) aflds(i,j,13,iblk) = x2i(index_x2i_So_sio3, n) @@ -322,12 +322,12 @@ subroutine ice_import( x2i ) aflds(i,j,19,iblk) = c0 !x2i(index_x2i_So_fep2, n) aflds(i,j,20,iblk) = x2i(index_x2i_So_fed, n) aflds(i,j,21,iblk) = c0 !x2i(index_x2i_So_fed2, n) - aflds(i,j,22,iblk) = c0 !x2i(index_x2i_So_zaer1, n) - aflds(i,j,23,iblk) = c0 !x2i(index_x2i_So_zaer2, n) - aflds(i,j,24,iblk) = c0 !x2i(index_x2i_So_zaer3, n) - aflds(i,j,25,iblk) = c0 !x2i(index_x2i_So_zaer4, n) - aflds(i,j,26,iblk) = c0 !x2i(index_x2i_So_zaer5, n) - aflds(i,j,27,iblk) = c0 !x2i(index_x2i_So_zaer6, n) + aflds(i,j,22,iblk) = c0 !x2i(index_x2i_So_zaer1, n) + aflds(i,j,23,iblk) = c0 !x2i(index_x2i_So_zaer2, n) + aflds(i,j,24,iblk) = c0 !x2i(index_x2i_So_zaer3, n) + aflds(i,j,25,iblk) = c0 !x2i(index_x2i_So_zaer4, n) + aflds(i,j,26,iblk) = c0 !x2i(index_x2i_So_zaer5, n) + aflds(i,j,27,iblk) = c0 !x2i(index_x2i_So_zaer6, n) endif enddo enddo @@ -346,7 +346,7 @@ subroutine ice_import( x2i ) do i = 1,nx_block faero_atm(i,j,1,iblk) = aflds(i,j,1,iblk) faero_atm(i,j,2,iblk) = aflds(i,j,2,iblk) - faero_atm(i,j,3,iblk) = aflds(i,j,3,iblk) + faero_atm(i,j,3,iblk) = aflds(i,j,3,iblk) enddo !i enddo !j enddo !iblk @@ -357,7 +357,7 @@ subroutine ice_import( x2i ) do iblk = 1, nblocks do j = 1,ny_block do i = 1,nx_block - algalN(i,j,1,iblk) = aflds(i,j,4,iblk) + algalN(i,j,1,iblk) = aflds(i,j,4,iblk) algalN(i,j,2,iblk) = aflds(i,j,5,iblk) algalN(i,j,3,iblk) = aflds(i,j,6,iblk) doc(i,j,1,iblk) = aflds(i,j,7,iblk) @@ -409,16 +409,16 @@ subroutine ice_import( x2i ) do i = 1,nx_block ! ocean - workx = uocn (i,j,iblk) ! currents, m/s + workx = uocn (i,j,iblk) ! currents, m/s worky = vocn (i,j,iblk) - uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + worky*sin(ANGLET(i,j,iblk)) vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & - workx*sin(ANGLET(i,j,iblk)) workx = ss_tltx (i,j,iblk) ! sea sfc tilt, m/m worky = ss_tlty (i,j,iblk) - ss_tltx(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + ss_tltx(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + worky*sin(ANGLET(i,j,iblk)) ss_tlty(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & - workx*sin(ANGLET(i,j,iblk)) @@ -430,7 +430,7 @@ subroutine ice_import( x2i ) #endif if (tfrz_option == 'minus1p8') then - Tf (i,j,iblk) = -1.8_dbl_kind + Tf (i,j,iblk) = -1.8_dbl_kind elseif (tfrz_option == 'linear_salt') then Tf (i,j,iblk) = -0.0544_r8*sss(i,j,iblk) ! THIS IS THE ORIGINAL POP FORMULA elseif (tfrz_option == 'mushy') then @@ -463,7 +463,7 @@ subroutine ice_import( x2i ) !$OMP END PARALLEL DO call t_stopf ('cice_imp_ocn') - ! Interpolate ocean dynamics variables from T-cell centers to + ! Interpolate ocean dynamics variables from T-cell centers to ! U-cell centers. if (.not.prescribed_ice) then @@ -487,7 +487,7 @@ subroutine ice_import( x2i ) ! atmosphere workx = uatm(i,j,iblk) ! wind velocity, m/s - worky = vatm(i,j,iblk) + worky = vatm(i,j,iblk) uatm (i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + worky*sin(ANGLET(i,j,iblk)) ! note uatm, vatm, wind vatm (i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & ! are on the T-grid here @@ -506,7 +506,7 @@ end subroutine ice_import !=============================================================================== - subroutine ice_export( i2x ) + subroutine ice_export( i2x ) !----------------------------------------------------- ! @@ -514,7 +514,7 @@ subroutine ice_export( i2x ) real(r8), intent(inout) :: i2x(:,:) ! ! Local Variables - integer :: i, j, iblk, n, ij + integer :: i, j, iblk, n, ij integer :: ilo, ihi, jlo, jhi !beginning and end of physical domain integer (kind=int_kind) :: icells ! number of ocean/ice cells integer (kind=int_kind), dimension (nx_block*ny_block) :: indxi ! compressed indices in i @@ -532,7 +532,7 @@ subroutine ice_export( i2x ) workx, worky ! tmps for converting grid real (kind=dbl_kind) :: & - vonkar, zref, iceruf, tffresh + vonkar, zref, iceruf, tffresh type(block) :: this_block ! block information for current block integer :: icnt,icnt1,iblk1,icnt1sum,icnt1max ! gridcell and block counters @@ -614,7 +614,7 @@ subroutine ice_export( i2x ) icnt1 = 0 iblk1 = 0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -630,12 +630,12 @@ subroutine ice_export( i2x ) if ( tmask(i,j,iblk)) i2x(:,n) = c0 - !-------states-------------------- - i2x(index_i2x_Si_ifrac ,n) = ailohi(i,j,iblk) + !-------states-------------------- + i2x(index_i2x_Si_ifrac ,n) = ailohi(i,j,iblk) if ( tmask(i,j,iblk) .and. ailohi(i,j,iblk) > c0 ) then icnt = icnt + 1 - !-------states-------------------- + !-------states-------------------- i2x(index_i2x_Si_t ,n) = Tsrf(i,j,iblk) i2x(index_i2x_Si_avsdr ,n) = alvdr(i,j,iblk) i2x(index_i2x_Si_anidr ,n) = alidr(i,j,iblk) @@ -659,17 +659,17 @@ subroutine ice_export( i2x ) endif !--- a/i fluxes computed by ice - i2x(index_i2x_Faii_taux ,n) = tauxa(i,j,iblk) - i2x(index_i2x_Faii_tauy ,n) = tauya(i,j,iblk) - i2x(index_i2x_Faii_lat ,n) = flat(i,j,iblk) - i2x(index_i2x_Faii_sen ,n) = fsens(i,j,iblk) - i2x(index_i2x_Faii_lwup ,n) = flwout(i,j,iblk) - i2x(index_i2x_Faii_evap ,n) = evap(i,j,iblk) + i2x(index_i2x_Faii_taux ,n) = tauxa(i,j,iblk) + i2x(index_i2x_Faii_tauy ,n) = tauya(i,j,iblk) + i2x(index_i2x_Faii_lat ,n) = flat(i,j,iblk) + i2x(index_i2x_Faii_sen ,n) = fsens(i,j,iblk) + i2x(index_i2x_Faii_lwup ,n) = flwout(i,j,iblk) + i2x(index_i2x_Faii_evap ,n) = evap(i,j,iblk) i2x(index_i2x_Faii_swnet,n) = fswabs(i,j,iblk) !--- i/o fluxes computed by ice i2x(index_i2x_Fioi_melth,n) = fhocn(i,j,iblk) - i2x(index_i2x_Fioi_swpen,n) = fswthru(i,j,iblk) ! hf from melting + i2x(index_i2x_Fioi_swpen,n) = fswthru(i,j,iblk) ! hf from melting i2x(index_i2x_Fioi_meltw,n) = fresh(i,j,iblk) ! h2o flux from melting ??? i2x(index_i2x_Fioi_salt ,n) = fsalt(i,j,iblk) ! salt flux from melting ??? i2x(index_i2x_Fioi_taux ,n) = tauxo(i,j,iblk) ! stress : i/o zonal ??? @@ -680,18 +680,18 @@ subroutine ice_export( i2x ) if (index_i2x_Fioi_diat > 0) i2x(index_i2x_Fioi_diat ,n) = falgalN(i,j,1,iblk) * R_C2N(1) if (index_i2x_Fioi_sp > 0) i2x(index_i2x_Fioi_sp ,n) = falgalN(i,j,2,iblk) * R_C2N(2) if (index_i2x_Fioi_phaeo > 0) i2x(index_i2x_Fioi_phaeo ,n) = falgalN(i,j,3,iblk) * R_C2N(3) - if (index_i2x_Fioi_doc > 0) i2x(index_i2x_Fioi_doc ,n) = fdoc(i,j,1,iblk) + fdoc(i,j,2,iblk) - if (index_i2x_Fioi_doc2 > 0) i2x(index_i2x_Fioi_doc2 ,n) = c0 !fdoc(i,j,2,iblk) + if (index_i2x_Fioi_doc > 0) i2x(index_i2x_Fioi_doc ,n) = fdoc(i,j,1,iblk) + fdoc(i,j,2,iblk) + if (index_i2x_Fioi_doc2 > 0) i2x(index_i2x_Fioi_doc2 ,n) = c0 !fdoc(i,j,2,iblk) if (index_i2x_Fioi_doc3 > 0) i2x(index_i2x_Fioi_doc3 ,n) = c0 !fdoc(i,j,3,iblk) if (index_i2x_Fioi_dic > 0) i2x(index_i2x_Fioi_dic ,n) = c0 !fdic(i,j,1,iblk) - if (index_i2x_Fioi_don > 0) i2x(index_i2x_Fioi_don ,n) = fdon(i,j,1,iblk) - if (index_i2x_Fioi_no3 > 0) i2x(index_i2x_Fioi_no3 ,n) = fnit(i,j,iblk) - if (index_i2x_Fioi_sio3 > 0) i2x(index_i2x_Fioi_sio3 ,n) = fsil(i,j,iblk) - if (index_i2x_Fioi_nh4 > 0) i2x(index_i2x_Fioi_nh4 ,n) = famm(i,j,iblk) - if (index_i2x_Fioi_dms > 0) i2x(index_i2x_Fioi_dms ,n) = fdms(i,j,iblk) - if (index_i2x_Fioi_dmspp > 0) i2x(index_i2x_Fioi_dmspp ,n) = c0 - if (index_i2x_Fioi_dmsp > 0) i2x(index_i2x_Fioi_dmsp ,n) = fdmsp(i,j,iblk) - if (index_i2x_Fioi_donr > 0) i2x(index_i2x_Fioi_donr ,n) = fhum(i,j,iblk) + if (index_i2x_Fioi_don > 0) i2x(index_i2x_Fioi_don ,n) = fdon(i,j,1,iblk) + if (index_i2x_Fioi_no3 > 0) i2x(index_i2x_Fioi_no3 ,n) = fnit(i,j,iblk) + if (index_i2x_Fioi_sio3 > 0) i2x(index_i2x_Fioi_sio3 ,n) = fsil(i,j,iblk) + if (index_i2x_Fioi_nh4 > 0) i2x(index_i2x_Fioi_nh4 ,n) = famm(i,j,iblk) + if (index_i2x_Fioi_dms > 0) i2x(index_i2x_Fioi_dms ,n) = fdms(i,j,iblk) + if (index_i2x_Fioi_dmspp > 0) i2x(index_i2x_Fioi_dmspp ,n) = c0 + if (index_i2x_Fioi_dmsp > 0) i2x(index_i2x_Fioi_dmsp ,n) = fdmsp(i,j,iblk) + if (index_i2x_Fioi_donr > 0) i2x(index_i2x_Fioi_donr ,n) = fhum(i,j,iblk) ! convert from umol Fe/m^3 to mmol Fe/m^3 if (index_i2x_Fioi_fep1 > 0) i2x(index_i2x_Fioi_fep1 ,n) = c0 !ffep(i,j,1,iblk) / 1000.0_dbl_kind if (index_i2x_Fioi_fep2 > 0) i2x(index_i2x_Fioi_fep2 ,n) = c0 !ffep(i,j,2,iblk) / 1000.0_dbl_kind diff --git a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 index 0868ef2fa..78b7d15c4 100644 --- a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 +++ b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 @@ -1,5 +1,5 @@ !=================================================================== -!BOP +!BOP ! ! !MODULE: ice_prescribed_mod - Prescribed Ice Model ! @@ -19,7 +19,7 @@ ! 2005-Apr-19 - B. Kauffman, J. Schramm, M. Vertenstein, NCAR - design ! ! !INTERFACE: ---------------------------------------------------------- - + module ice_prescribed_mod ! !USES: @@ -72,7 +72,7 @@ module ice_prescribed_mod integer(SHR_KIND_IN),parameter :: nFilesMaximum = 400 ! max number of files integer(kind=int_kind) :: stream_year_first ! first year in stream to use integer(kind=int_kind) :: stream_year_last ! last year in stream to use - integer(kind=int_kind) :: model_year_align ! align stream_year_first + integer(kind=int_kind) :: model_year_align ! align stream_year_first ! with this model year character(len=char_len_long) :: stream_fldVarName @@ -88,7 +88,7 @@ module ice_prescribed_mod type(shr_strdata_type) :: sdat ! prescribed data stream character(len=char_len_long) :: fldList ! list of fields in data stream - real(kind=dbl_kind), allocatable :: ice_cov(:,:,:) ! ice cover + real(kind=dbl_kind), allocatable :: ice_cov(:,:,:) ! ice cover ! real (kind=dbl_kind), parameter :: & ! cp_sno = 0.0_dbl_kind & ! specific heat of snow (J/kg/K) @@ -109,13 +109,13 @@ module ice_prescribed_mod ! ! !IROUTINE: ice_prescribed_init - prescribed ice initialization ! -! !INTERFACE: +! !INTERFACE: subroutine ice_prescribed_init(compid, gsmap, dom) use mpi ! MPI Fortran module use shr_pio_mod, only : shr_pio_getiotype, shr_pio_getiosys ! !DESCRIPTION: -! Prescribed ice initialization - needed to -! work with new shr_strdata module derived type +! Prescribed ice initialization - needed to +! work with new shr_strdata module derived type ! ! !REVISION HISTORY: ! 2009-Oct-12 - M. Vertenstein @@ -130,7 +130,7 @@ subroutine ice_prescribed_init(compid, gsmap, dom) !EOP !----- Local ------ integer(kind=int_kind) :: nml_error ! namelist i/o error flag - integer(kind=int_kind) :: n, nFile, ierr + integer(kind=int_kind) :: n, nFile, ierr character(len=8) :: fillalgo character(len=*), parameter :: subname = '(ice_prescribed_init)' character(*),parameter :: F00 = "(4a)" @@ -227,9 +227,9 @@ subroutine ice_prescribed_init(compid, gsmap, dom) if (my_task == master_task) then write(nu_diag,*) ' ' write(nu_diag,*) 'This is the prescribed ice coverage option.' - write(nu_diag,*) ' stream_year_first = ',stream_year_first - write(nu_diag,*) ' stream_year_last = ',stream_year_last - write(nu_diag,*) ' model_year_align = ',model_year_align + write(nu_diag,*) ' stream_year_first = ',stream_year_first + write(nu_diag,*) ' stream_year_last = ',stream_year_last + write(nu_diag,*) ' model_year_align = ',model_year_align write(nu_diag,*) ' stream_fldVarName = ',trim(stream_fldVarName) do n = 1,nFile write(nu_diag,*) ' stream_fldFileName = ',trim(stream_fldFileName(n)),n @@ -280,7 +280,7 @@ subroutine ice_prescribed_init(compid, gsmap, dom) hin_max(1) = 999._dbl_kind end if end subroutine ice_prescribed_init - + !======================================================================= !BOP =================================================================== ! @@ -316,7 +316,7 @@ subroutine ice_prescribed_run(mDateIn, secIn) logical, save :: first_time = .true. character(len=*), parameter :: subname = '(ice_prescribed_run)' character(*),parameter :: F00 = "(a,2g20.13)" - + !------------------------------------------------------------------------ ! Interpolate to new ice coverage !------------------------------------------------------------------------ @@ -327,16 +327,16 @@ subroutine ice_prescribed_run(mDateIn, secIn) allocate(ice_cov(nx_block,ny_block,max_blocks)) endif - ice_cov(:,:,:) = c0 ! This initializes ghost cells as well + ice_cov(:,:,:) = c0 ! This initializes ghost cells as well n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -384,11 +384,11 @@ end subroutine ice_prescribed_run ! 2001-May - B. P. Briegleb - Original version ! ! !INTERFACE: ------------------------------------------------------------------ - + subroutine ice_prescribed_phys ! !USES: - + use ice_flux use ice_state use ice_arrays_column, only : hin_max @@ -396,9 +396,9 @@ subroutine ice_prescribed_phys use ice_dyn_evp implicit none - + ! !INPUT/OUTPUT PARAMETERS: - + !EOP !----- Local ------ @@ -411,12 +411,12 @@ subroutine ice_prescribed_phys real(kind=dbl_kind) :: slope ! diff in underlying ocean tmp and ice surface tmp real(kind=dbl_kind) :: Ti ! ice level temperature real(kind=dbl_kind) :: Tmlt ! ice level melt temperature - real(kind=dbl_kind) :: qin_save(nilyr) + real(kind=dbl_kind) :: qin_save(nilyr) real(kind=dbl_kind) :: qsn_save(nslyr) real(kind=dbl_kind) :: hi ! ice prescribed (hemispheric) ice thickness real(kind=dbl_kind) :: hs ! snow thickness real(kind=dbl_kind) :: zn ! normalized ice thickness - real(kind=dbl_kind) :: salin(nilyr) ! salinity (ppt) + real(kind=dbl_kind) :: salin(nilyr) ! salinity (ppt) real(kind=dbl_kind) :: rad_to_deg, pi, puny real(kind=dbl_kind) :: rhoi, rhos, cp_ice, cp_ocn, lfresh, depressT @@ -444,11 +444,11 @@ subroutine ice_prescribed_phys ! aicen(:,:,:,:) = c0 ! vicen(:,:,:,:) = c0 ! eicen(:,:,:,:) = c0 - + ! do nc=1,ncat ! trcrn(:,:,nt_Tsfc,nc,:) = Tf(:,:,:) ! enddo - + !----------------------------------------------------------------- ! Set ice cover over land to zero, not sure if this should be ! be done earier, before time/spatial interp?????? @@ -502,8 +502,8 @@ subroutine ice_prescribed_phys endif aicen(i,j,nc,iblk) = ice_cov(i,j,iblk) - vicen(i,j,nc,iblk) = hi*aicen(i,j,nc,iblk) - vsnon(i,j,nc,iblk) = hs*aicen(i,j,nc,iblk) + vicen(i,j,nc,iblk) = hi*aicen(i,j,nc,iblk) + vsnon(i,j,nc,iblk) = hs*aicen(i,j,nc,iblk) !--------------------------------------------------------- ! make linear temp profile and compute enthalpy @@ -564,7 +564,7 @@ subroutine ice_prescribed_phys trcr_base = trcr_base(1:ntrcr,:), & n_trcr_strata = n_trcr_strata(1:ntrcr), & nt_strata = nt_strata(1:ntrcr,:)) - + enddo ! i enddo ! j enddo ! iblk diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 22234d27f..c68583648 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -231,7 +231,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics if (debug_model) then plabeld = 'post step_therm1' @@ -401,7 +401,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -634,7 +634,7 @@ subroutine coupling_prep (iblk) fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk), & Uref=Uref(:,:,iblk), wind=wind(:,:,iblk) ) - + !----------------------------------------------------------------- ! Define ice-ocean bgc fluxes !----------------------------------------------------------------- @@ -710,7 +710,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt index 6eb3c9cca..14d97c481 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt +++ b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt @@ -1,17 +1,17 @@ ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index 56287feb1..0b1b9349c 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -202,7 +202,7 @@ subroutine ice_mesh_set_distgrid(localpet, npes, distgrid, rc) deallocate(gindex) end subroutine ice_mesh_set_distgrid - + !======================================================================= subroutine ice_mesh_setmask_from_maskfile(ice_maskfile, ice_mesh, rc) @@ -429,7 +429,7 @@ subroutine ice_mesh_create_scolumn(scol_lon, scol_lat, ice_mesh, rc) ! Allocate module variable ocn_gridcell_frac allocate(ocn_gridcell_frac(nx_block,ny_block,max_blocks)) ocn_gridcell_frac(:,:,:) = scol_frac - + end subroutine ice_mesh_create_scolumn !=============================================================================== diff --git a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 index dc40177d8..84973e9dd 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 @@ -175,7 +175,7 @@ subroutine ice_prescribed_init(clock, mesh, rc) end do write(nu_diag,*) ' ' endif - + ! initialize sdat call shr_strdata_init_from_inline(sdat, & my_task = my_task, & diff --git a/cicecore/drivers/nuopc/dmi/CICE.F90 b/cicecore/drivers/nuopc/dmi/CICE.F90 index d6a28c3ba..79dd06fca 100644 --- a/cicecore/drivers/nuopc/dmi/CICE.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE.F90 @@ -1,21 +1,21 @@ !======================================================================= ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! !======================================================================= ! ! Main driver routine for CICE. Initializes and steps through the model. @@ -49,7 +49,7 @@ program icemodel call CICE_Run !----------------------------------------------------------------- - ! Finalize CICE + ! Finalize CICE !----------------------------------------------------------------- call CICE_Finalize diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 2c90061af..78d462d4c 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -196,11 +196,11 @@ subroutine cice_init(mpi_comm) if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -252,7 +252,7 @@ subroutine cice_init(mpi_comm) call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -339,7 +339,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -350,17 +350,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -371,7 +371,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -382,7 +382,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -396,7 +396,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -410,7 +410,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -425,7 +425,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -476,7 +476,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 00c527da0..6e799723e 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -131,7 +131,7 @@ subroutine CICE_Run(stop_now_cpl) ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -175,7 +175,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -257,7 +257,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics if (debug_model) then @@ -422,7 +422,7 @@ subroutine ice_step if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -431,7 +431,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -466,12 +466,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -607,8 +607,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -629,7 +629,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -659,22 +659,22 @@ subroutine coupling_prep (iblk) Qref_iso =Qref_iso (:,:,:,iblk), & fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk)) - + #ifdef CICE_IN_NEMO !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod #endif call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling @@ -686,10 +686,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -722,7 +722,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 202207c38..0ec1dea5a 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -22,7 +22,7 @@ module cice_cap use ice_state use CICE_RunMod use CICE_InitMod - use CICE_FinalMod + use CICE_FinalMod !end cice specific use ESMF use NUOPC @@ -34,11 +34,11 @@ module cice_cap model_label_Finalize => label_Finalize implicit none - + private - + public SetServices - + ! type cice_internalstate_type ! end type @@ -167,7 +167,7 @@ module cice_cap call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, rc=dbrc) end subroutine - + !----------------------------------------------------------------------------- subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) @@ -196,7 +196,7 @@ module cice_cap integer :: lbnd(2),ubnd(2) type(block) :: this_block type(ESMF_DELayout) :: delayout - real(ESMF_KIND_R8), pointer :: tarray(:,:) + real(ESMF_KIND_R8), pointer :: tarray(:,:) real(ESMF_KIND_R8), pointer :: coordXcenter(:,:) real(ESMF_KIND_R8), pointer :: coordYcenter(:,:) real(ESMF_KIND_R8), pointer :: coordXcorner(:,:) @@ -215,7 +215,7 @@ module cice_cap ! created can wrap on the data pointers in internal part of CICE write(tmpstr,'(a,2i8)') subname//' ice nx,ny = ',nx_global,ny_global call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=dbrc) - + ! distgrid = ESMF_DistGridCreate(minIndex=(/1,1/), maxIndex=(/nx_global,ny_global/), & ! regDecomp=(/2,2/), rc=rc) @@ -307,9 +307,9 @@ module cice_cap rc = rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) + call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc) + call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_GridAddItem(gridIn, itemFlag=ESMF_GRIDITEM_MASK, itemTypeKind=ESMF_TYPEKIND_I4, & staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) @@ -341,7 +341,7 @@ module cice_cap if (lbnd(1) /= 1 .or. lbnd(2) /= 1 .or. ubnd(1) /= ihi-ilo+1 .or. ubnd(2) /= jhi-jlo+1) then write(tmpstr,'(a,5i8)') subname//' iblk bnds ERROR ' call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=dbrc) - write(tmpstr,'(a,4i8)') subname//' iblk center bnds 2',ihi, ilo, jhi,jlo + write(tmpstr,'(a,4i8)') subname//' iblk center bnds 2',ihi, ilo, jhi,jlo call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=dbrc) rc = ESMF_FAILURE return @@ -460,14 +460,14 @@ module cice_cap call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=dbrc) end subroutine - + !----------------------------------------------------------------------------- ! CICE model uses same clock as parent gridComp subroutine SetClock(gcomp, rc) type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - + ! local variables type(ESMF_Clock) :: clock type(ESMF_TimeInterval) :: stabilityTimeStep, timestep @@ -493,10 +493,10 @@ module cice_cap line=__LINE__, & file=__FILE__)) & return ! bail out - + ! initialize internal clock ! here: parent Clock and stability timeStep determine actual model timeStep - call ESMF_TimeIntervalSet(stabilityTimeStep, s=nint(dt), rc=rc) + call ESMF_TimeIntervalSet(stabilityTimeStep, s=nint(dt), rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & @@ -506,7 +506,7 @@ module cice_cap line=__LINE__, & file=__FILE__)) & return ! bail out - + end subroutine !----------------------------------------------------------------------------- @@ -514,7 +514,7 @@ module cice_cap subroutine ModelAdvance(gcomp, rc) type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - + ! local variables type(ESMF_Clock) :: clock type(ESMF_State) :: importState, exportState @@ -539,7 +539,7 @@ module cice_cap call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, rc=dbrc) import_slice = import_slice + 1 export_slice = export_slice + 1 - + ! query the Component for its clock, importState and exportState call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, & exportState=exportState, rc=rc) @@ -549,27 +549,27 @@ module cice_cap return ! bail out ! HERE THE MODEL ADVANCES: currTime -> currTime + timeStep - + ! Because of the way that the internal Clock was set in SetClock(), ! its timeStep is likely smaller than the parent timeStep. As a consequence - ! the time interval covered by a single parent timeStep will result in + ! the time interval covered by a single parent timeStep will result in ! multiple calls to the ModelAdvance() routine. Every time the currTime ! will come in by one internal timeStep advanced. This goes until the ! stopTime of the internal Clock has been reached. - + call ESMF_ClockPrint(clock, options="currTime", & preString="------>Advancing CICE from: ", rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & return ! bail out - + call ESMF_ClockGet(clock, currTime=currTime, timeStep=timeStep, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & return ! bail out - + call ESMF_TimePrint(currTime + timeStep, & preString="--------------------------------> to: ", rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & @@ -611,14 +611,14 @@ module cice_cap write(info,*) subname,' --- run phase 4 called --- ',rc call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, rc=dbrc) if(profile_memory) call ESMF_VMLogMemInfo("Leaving CICE Model_ADVANCE: ") - end subroutine + end subroutine subroutine cice_model_finalize(gcomp, rc) ! input arguments type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - + ! local variables type(ESMF_Clock) :: clock type(ESMF_Time) :: currTime @@ -707,7 +707,7 @@ module cice_cap integer :: npet, nx, ny, pet, elb(2), eub(2), clb(2), cub(2), tlb(2), tub(2) type(ESMF_VM) :: vm character(len=*),parameter :: subname='(cice_cap:CICE_RealizeFields)' - + rc = ESMF_SUCCESS do i = 1, nfields @@ -734,7 +734,7 @@ module cice_cap file=__FILE__)) & return ! bail out endif - + if (NUOPC_IsConnected(state, fieldName=field_defs(i)%shortname)) then call NUOPC_Realize(state, field=field, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & @@ -814,15 +814,15 @@ module cice_cap call fld_list_add(fldsToIce_num, fldsToIce, "freezing_melting_potential" ,"1" , "will provide") call fld_list_add(fldsToIce_num, fldsToIce, "mixed_layer_depth" ,"m" , "will provide") ! fields for export - call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_fraction" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_zonal" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_merid" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_temperature" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_fraction" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_zonal" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_merid" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_temperature" ,"1" , "will provide") ! call fld_list_add(fldsFrIce_num, fldsFrIce, "ice_mask" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_sw_pen_to_ocn" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_sw_pen_to_ocn" ,"1" , "will provide") call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_fresh_water_to_ocean_rate" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_salt_rate" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "net_heat_flx_to_ocn" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_salt_rate" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "net_heat_flx_to_ocn" ,"1" , "will provide") call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_ice_volume" ,"1" , "will provide") call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_snow_volume" ,"1" , "will provide") @@ -885,9 +885,9 @@ module cice_cap real(kind=ESMF_KIND_R8), pointer :: dataPtr_ocncm(:,:,:) real(kind=ESMF_KIND_R8), pointer :: dataPtr_fmpot(:,:,:) real(kind=ESMF_KIND_R8), pointer :: dataPtr_mld(:,:,:) - integer :: ilo,ihi,jlo,jhi + integer :: ilo,ihi,jlo,jhi integer :: i,j,iblk,n,i1,i2,j1,j2 - real(kind=ESMF_KIND_R8) :: ue, vn, AngT_s + real(kind=ESMF_KIND_R8) :: ue, vn, AngT_s type(block) :: this_block character(len=*),parameter :: subname='(cice_cap:CICE_Import)' @@ -921,7 +921,7 @@ module cice_cap j1 = j - jlo + 1 sss (i,j,iblk) = dataPtr_sss (i1,j1,iblk) ! sea surface salinity (maybe for mushy layer) sst (i,j,iblk) = dataPtr_sst (i1,j1,iblk) - Tffresh ! sea surface temp (may not be needed?) - + frzmlt (i,j,iblk) = dataPtr_fmpot (i1,j1,iblk) ue = dataPtr_ocncz (i1,j1,iblk) vn = dataPtr_ocncm (i1,j1,iblk) @@ -969,7 +969,7 @@ module cice_cap integer :: ilo,ihi,jlo,jhi integer :: i,j,iblk,n,i1,i2,j1,j2 real(kind=ESMF_KIND_R8) :: ui, vj, angT - + type(block) :: this_block character(len=*),parameter :: subname='(cice_cap:CICE_Export)' !TODO clean up fields @@ -1035,7 +1035,7 @@ module cice_cap ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=dbrc) - + end subroutine end module cice_cap diff --git a/cicecore/drivers/standalone/cice/CICE.F90 b/cicecore/drivers/standalone/cice/CICE.F90 index d6a28c3ba..79dd06fca 100644 --- a/cicecore/drivers/standalone/cice/CICE.F90 +++ b/cicecore/drivers/standalone/cice/CICE.F90 @@ -1,21 +1,21 @@ !======================================================================= ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! !======================================================================= ! ! Main driver routine for CICE. Initializes and steps through the model. @@ -49,7 +49,7 @@ program icemodel call CICE_Run !----------------------------------------------------------------- - ! Finalize CICE + ! Finalize CICE !----------------------------------------------------------------- call CICE_Finalize diff --git a/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 b/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 index 28811c3cd..a8b074883 100644 --- a/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 @@ -30,7 +30,6 @@ module CICE_FinalMod subroutine CICE_Finalize - use ice_restart_shared, only: runid use ice_timers, only: ice_timer_stop, ice_timer_print_all, timer_total, & timer_stats diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 7208da481..07a151a01 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -66,7 +66,7 @@ subroutine cice_init floe_binwidth, c_fsd_range use ice_state, only: alloc_state use ice_flux_bgc, only: alloc_flux_bgc - use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + use ice_calendar, only: dt, dt_dyn, write_ic, & init_calendar, advance_timestep, calc_timesteps use ice_communicate, only: init_communicate, my_task, master_task use ice_diagnostics, only: init_diags @@ -190,11 +190,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -243,7 +243,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') @@ -334,7 +334,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -345,17 +345,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -366,7 +366,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -377,7 +377,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -391,7 +391,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -405,7 +405,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -420,7 +420,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -471,7 +471,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 5547ba765..00c7921d1 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -45,7 +45,7 @@ module CICE_RunMod subroutine CICE_Run - use ice_calendar, only: istep, istep1, dt, stop_now, advance_timestep + use ice_calendar, only: dt, stop_now, advance_timestep use ice_forcing, only: get_forcing_atmo, get_forcing_ocn, & get_wave_spec use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & @@ -125,7 +125,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -169,7 +169,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -418,7 +418,7 @@ subroutine ice_step if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -426,7 +426,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -461,12 +461,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -602,8 +602,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -624,7 +624,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -654,22 +654,22 @@ subroutine coupling_prep (iblk) Qref_iso =Qref_iso (:,:,:,iblk), & fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk)) - + #ifdef CICE_IN_NEMO !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod #endif call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling @@ -681,10 +681,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -717,7 +717,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index a252bc1b7..ad355d783 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -181,11 +181,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -222,7 +222,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -305,7 +305,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -316,17 +316,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -337,7 +337,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -348,7 +348,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -362,7 +362,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -376,7 +376,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -391,7 +391,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -414,7 +414,7 @@ subroutine init_restart if (restart_iso) then call read_restart_iso else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) enddo ! iblk @@ -426,7 +426,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 index 5a4b3d54e..bd7ed3165 100644 --- a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -8,7 +8,7 @@ program gridavgchk ! There are lots of issues here ! areas (T, U, N, E) are not locally conservative, affect X2YF ! X2YF is unmasked which can create havoc in U2T type directions - ! X2YS is masked but there can be no active cells to average (for instance, + ! X2YS is masked but there can be no active cells to average (for instance, ! single gridcell wide channels U2T where resuilt is zero) ! land block elimination can lead to missing data on halo ! This test tries to deal with all these things.... @@ -36,7 +36,7 @@ program gridavgchk integer(int_kind) :: i, j, n, ib, ie, jb, je, iblock integer(int_kind) :: iglob, jglob integer(int_kind) :: blockID, numBlocks - type (block) :: this_block + type (block) :: this_block real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) ! input real(dbl_kind) ,allocatable :: arraysx(:,:,:), arraysy(:,:,:) ! extra input for NE2T, NE2U diff --git a/cicecore/drivers/unittest/optargs/optargs.F90 b/cicecore/drivers/unittest/optargs/optargs.F90 index 14c738d47..4acf7ac9f 100644 --- a/cicecore/drivers/unittest/optargs/optargs.F90 +++ b/cicecore/drivers/unittest/optargs/optargs.F90 @@ -193,7 +193,7 @@ program optargs computeD = .true. ierrV = oa_A + oa_B + oa_C + oa_D Ai1 = 7. - B = 9. + B = 9. Ci1 = 7. Di1 = 12; Di2=3. resultV = 49. @@ -205,7 +205,7 @@ program optargs computeD = .true. ierrV = oa_A + oa_B + oa_D Ai1 = 10. - B = 11. + B = 11. Di1 = 12; Di2=3. resultV = 40. call oa_layer1(Ao=Ao,B=B,Co=Co,Do=Do,Ai1=Ai1,Ci1=Ci1,Di1=Di1,Di2=Di2,ierr=ierr) diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index a252bc1b7..ad355d783 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -181,11 +181,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -222,7 +222,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -305,7 +305,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -316,17 +316,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -337,7 +337,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -348,7 +348,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -362,7 +362,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -376,7 +376,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -391,7 +391,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -414,7 +414,7 @@ subroutine init_restart if (restart_iso) then call read_restart_iso else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) enddo ! iblk @@ -426,7 +426,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index f314959cb..aba435b0e 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -1,7 +1,7 @@ program sumchk - ! This tests the CICE ice_global_reductions infrastructure by + ! This tests the CICE ice_global_reductions infrastructure by ! using CICE_InitMod (from the standalone model) to read/initialize ! a CICE grid/configuration. Then methods in ice_global_reductions ! are verified using hardwired inputs with known outputs. @@ -28,7 +28,7 @@ program sumchk integer(int_kind) :: i, j, k, l, m, n, iblock, ib, ie, jb, je integer(int_kind) :: blockID, numBlocks - type (block) :: this_block + type (block) :: this_block real(dbl_kind) ,allocatable :: arrayA(:,:,:),arrayB(:,:,:),arrayC(:,:,:) integer(int_kind),allocatable :: arrayiA(:,:,:),arrayiB(:,:,:) diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index dbad4292c..c9e8be8db 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -1,7 +1,7 @@ !======================================================================= ! Grid-dependent arrays needed for column package -! These were originally module variables in modules that became part of +! These were originally module variables in modules that became part of ! the column package ! author: Elizabeth C. Hunke, LANL @@ -94,9 +94,9 @@ module ice_arrays_column ! albedo components for history real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & - albicen, & ! bare ice - albsnon, & ! snow - albpndn, & ! pond + albicen, & ! bare ice + albsnon, & ! snow + albpndn, & ! pond apeffn ! effective pond area used for radiation calculation real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & @@ -128,12 +128,12 @@ module ice_arrays_column ! aerosol optical properties -> band | ! v aerosol ! for combined dust category, use category 4 properties - real (kind=dbl_kind), dimension(:,:), allocatable, public :: & + real (kind=dbl_kind), dimension(:,:), allocatable, public :: & kaer_tab, & ! aerosol mass extinction cross section (m2/kg) waer_tab, & ! aerosol single scatter albedo (fraction) gaer_tab ! aerosol asymmetry parameter (cos(theta)) - real (kind=dbl_kind), dimension(:,:), allocatable, public :: & + real (kind=dbl_kind), dimension(:,:), allocatable, public :: & kaer_bc_tab, & ! BC mass extinction cross section (m2/kg) waer_bc_tab, & ! BC single scatter albedo (fraction) gaer_bc_tab ! BC aerosol asymmetry parameter (cos(theta)) @@ -146,7 +146,7 @@ module ice_arrays_column real (kind=dbl_kind), dimension (:), allocatable, public :: & bgrid , & ! biology nondimensional vertical grid points igrid , & ! biology vertical interface points - cgrid , & ! CICE vertical coordinate + cgrid , & ! CICE vertical coordinate icgrid , & ! interface grid for CICE (shortwave variable) swgrid ! grid for ice tracers used in dEdd scheme @@ -187,21 +187,21 @@ module ice_arrays_column ! 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic + icepack_max_don + icepack_max_fe ! Fep(1:icepack_max_fe) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + icepack_max_fe: ! 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe - ! zaero(1:icepack_max_aero) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe: + ! zaero(1:icepack_max_aero) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe: ! 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe ! + icepack_max_aero ! humic == 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe - ! + icepack_max_aero + ! + icepack_max_aero integer (kind=int_kind), dimension(:,:,:,:), allocatable, public :: & - algal_peak ! vertical location of algal maximum, 0 if no maximum + algal_peak ! vertical location of algal maximum, 0 if no maximum - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:,:), allocatable, public :: & Zoo ! N losses accumulated in timestep (ie. zooplankton/bacteria) ! mmol/m^3 - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & dhbr_top , & ! brine top change dhbr_bot ! brine bottom change @@ -214,7 +214,7 @@ module ice_arrays_column real (kind=dbl_kind), & dimension (:,:,:,:,:), allocatable, public :: & - bphi , & ! porosity of layers + bphi , & ! porosity of layers bTiz ! layer temperatures interpolated on bio grid (C) real (kind=dbl_kind), & @@ -222,23 +222,23 @@ module ice_arrays_column darcy_V ! darcy velocity positive up (m/s) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - zsal_tot , & ! Total ice salinity in per grid cell (g/m^2) - chl_net , & ! Total chla (mg chla/m^2) per grid cell - NO_net ! Total nitrate per grid cell + zsal_tot , & ! Total ice salinity in per grid cell (g/m^2) + chl_net , & ! Total chla (mg chla/m^2) per grid cell + NO_net ! Total nitrate per grid cell logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - Rayleigh_criteria ! .true. means Ra_c was reached + Rayleigh_criteria ! .true. means Ra_c was reached real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & Rayleigh_real ! .true. = c1, .false. = c0 - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & sice_rho ! avg sea ice density (kg/m^3) ! ech: diagnostic only? - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & - fzsaln, & ! category fzsal(kg/m^2/s) + fzsaln, & ! category fzsal(kg/m^2/s) fzsaln_g ! salt flux from gravity drainage only real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -249,26 +249,26 @@ module ice_arrays_column zfswin ! Shortwave flux into layers interpolated on bio grid (W/m^2) real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable, public :: & - iDi , & ! igrid Diffusivity (m^2/s) - iki ! Ice permeability (m^2) + iDi , & ! igrid Diffusivity (m^2/s) + iki ! Ice permeability (m^2) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & upNO , & ! nitrate uptake rate (mmol/m^2/d) times aice upNH ! ammonium uptake rate (mmol/m^2/d) times aice - + real (kind=dbl_kind), & dimension(:,:,:,:,:), allocatable, public :: & - trcrn_sw ! bgc tracers active in the delta-Eddington shortwave + trcrn_sw ! bgc tracers active in the delta-Eddington shortwave ! calculation on the shortwave grid (swgrid) real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & - ice_bio_net , & ! depth integrated tracer (mmol/m^2) + ice_bio_net , & ! depth integrated tracer (mmol/m^2) snow_bio_net ! depth integrated snow tracer (mmol/m^2) logical (kind=log_kind), public :: & oceanmixed_ice, & ! if true, use internal ocean mixed layer - restore_bgc ! + restore_bgc ! character(char_len), public :: & fe_data_type ! 'default', 'clim' @@ -280,7 +280,7 @@ module ice_arrays_column optics_file, & ! modal aero optics file optics_file_fieldname ! modal aero optics file fieldname - real (kind=dbl_kind), dimension(:), allocatable, public :: & + real (kind=dbl_kind), dimension(:), allocatable, public :: & R_C2N_DON ! carbon to nitrogen mole ratio of DON pool real (kind=dbl_kind), dimension(:), allocatable, public :: & @@ -353,11 +353,11 @@ subroutine alloc_arrays_column grow_net (nx_block,ny_block,max_blocks), & ! Specific growth rate (/s) per grid cell PP_net (nx_block,ny_block,max_blocks), & ! Total production (mg C/m^2/s) per grid cell hbri (nx_block,ny_block,max_blocks), & ! brine height, area-averaged for comparison with hi (m) - zsal_tot (nx_block,ny_block,max_blocks), & ! Total ice salinity in per grid cell (g/m^2) - chl_net (nx_block,ny_block,max_blocks), & ! Total chla (mg chla/m^2) per grid cell - NO_net (nx_block,ny_block,max_blocks), & ! Total nitrate per grid cell + zsal_tot (nx_block,ny_block,max_blocks), & ! Total ice salinity in per grid cell (g/m^2) + chl_net (nx_block,ny_block,max_blocks), & ! Total chla (mg chla/m^2) per grid cell + NO_net (nx_block,ny_block,max_blocks), & ! Total nitrate per grid cell Rayleigh_criteria & - (nx_block,ny_block,max_blocks), & ! .true. means Ra_c was reached + (nx_block,ny_block,max_blocks), & ! .true. means Ra_c was reached Rayleigh_real(nx_block,ny_block,max_blocks), & ! .true. = c1, .false. = c0 fzsal (nx_block,ny_block,max_blocks), & ! Total flux of salt to ocean at time step for conservation fzsal_g (nx_block,ny_block,max_blocks), & ! Total gravity drainage flux @@ -371,9 +371,9 @@ subroutine alloc_arrays_column alidrn (nx_block,ny_block,ncat,max_blocks), & ! near-ir direct albedo (fraction) alvdfn (nx_block,ny_block,ncat,max_blocks), & ! visible diffuse albedo (fraction) alidfn (nx_block,ny_block,ncat,max_blocks), & ! near-ir diffuse albedo (fraction) - albicen (nx_block,ny_block,ncat,max_blocks), & ! bare ice - albsnon (nx_block,ny_block,ncat,max_blocks), & ! snow - albpndn (nx_block,ny_block,ncat,max_blocks), & ! pond + albicen (nx_block,ny_block,ncat,max_blocks), & ! bare ice + albsnon (nx_block,ny_block,ncat,max_blocks), & ! snow + albpndn (nx_block,ny_block,ncat,max_blocks), & ! pond apeffn (nx_block,ny_block,ncat,max_blocks), & ! effective pond area used for radiation calculation snowfracn (nx_block,ny_block,ncat,max_blocks), & ! Category snow fraction used in radiation fswsfcn (nx_block,ny_block,ncat,max_blocks), & ! SW absorbed at ice/snow surface (W m-2) @@ -385,21 +385,21 @@ subroutine alloc_arrays_column fswintn (nx_block,ny_block,ncat,max_blocks), & ! SW absorbed in ice interior, below surface (W m-2) first_ice_real & (nx_block,ny_block,ncat,max_blocks), & ! .true. = c1, .false. = c0 - first_ice (nx_block,ny_block,ncat,max_blocks), & ! distinguishes ice that disappears (e.g. melts) and reappears (e.g. transport) + first_ice (nx_block,ny_block,ncat,max_blocks), & ! distinguishes ice that disappears (melts) and reappears (transport) dhbr_top (nx_block,ny_block,ncat,max_blocks), & ! brine top change dhbr_bot (nx_block,ny_block,ncat,max_blocks), & ! brine bottom change darcy_V (nx_block,ny_block,ncat,max_blocks), & ! darcy velocity positive up (m/s) sice_rho (nx_block,ny_block,ncat,max_blocks), & ! avg sea ice density (kg/m^3) ! ech: diagnostic only? - fzsaln (nx_block,ny_block,ncat,max_blocks), & ! category fzsal(kg/m^2/s) + fzsaln (nx_block,ny_block,ncat,max_blocks), & ! category fzsal(kg/m^2/s) fzsaln_g (nx_block,ny_block,ncat,max_blocks), & ! salt flux from gravity drainage only Iswabsn (nx_block,ny_block,nilyr,ncat,max_blocks), & ! SW radiation absorbed in ice layers (W m-2) Sswabsn (nx_block,ny_block,nslyr,ncat,max_blocks), & ! SW radiation absorbed in snow layers (W m-2) fswpenln (nx_block,ny_block,nilyr+1,ncat,max_blocks), & ! visible SW entering ice layers (W m-2) Zoo (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! N losses accumulated in timestep (ie. zooplankton/bacteria) zfswin (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! Shortwave flux into layers interpolated on bio grid (W/m^2) - iDi (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! igrid Diffusivity (m^2/s) - iki (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! Ice permeability (m^2) - bphi (nx_block,ny_block,nblyr+2,ncat,max_blocks), & ! porosity of layers + iDi (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! igrid Diffusivity (m^2/s) + iki (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! Ice permeability (m^2) + bphi (nx_block,ny_block,nblyr+2,ncat,max_blocks), & ! porosity of layers bTiz (nx_block,ny_block,nblyr+2,ncat,max_blocks), & ! layer temperatures interpolated on bio grid (C) stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of Memory1') @@ -409,9 +409,9 @@ subroutine alloc_arrays_column fbio_snoice (nx_block,ny_block,max_nbtrcr,max_blocks), & ! fluxes from snow to ice fbio_atmice (nx_block,ny_block,max_nbtrcr,max_blocks), & ! fluxes from atm to ice ocean_bio_all(nx_block,ny_block,max_nbtrcr,max_blocks), & ! fixed order, all values even for tracers false - ice_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated tracer (mmol/m^2) + ice_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated tracer (mmol/m^2) snow_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated snow tracer (mmol/m^2) - algal_peak (nx_block,ny_block,max_algae ,max_blocks), & ! vertical location of algal maximum, 0 if no maximum + algal_peak (nx_block,ny_block,max_algae ,max_blocks), & ! vertical location of algal maximum, 0 if no maximum stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of Memory2') @@ -420,7 +420,7 @@ subroutine alloc_arrays_column c_hi_range(ncat) , & ! bgrid(nblyr+2) , & ! biology nondimensional vertical grid points igrid(nblyr+1) , & ! biology vertical interface points - cgrid(nilyr+1) , & ! CICE vertical coordinate + cgrid(nilyr+1) , & ! CICE vertical coordinate icgrid(nilyr+1) , & ! interface grid for CICE (shortwave variable) swgrid(nilyr+1) , & ! grid for ice tracers used in dEdd scheme stat=ierr) diff --git a/cicecore/shared/ice_calendar.F90 b/cicecore/shared/ice_calendar.F90 index 7684fef67..ad1a87b4c 100644 --- a/cicecore/shared/ice_calendar.F90 +++ b/cicecore/shared/ice_calendar.F90 @@ -4,7 +4,7 @@ ! ! Authors: Elizabeth C. Hunke, LANL ! Tony Craig, NCAR -! Craig MacLachlan, UK Met Office +! Craig MacLachlan, UK Met Office ! ! 2006 ECH: Removed 'w' option for history; added 'h' and histfreq_n. ! Converted to free form source (F90). @@ -199,7 +199,7 @@ subroutine init_calendar hour=0 ! computed in calendar, but needs some reasonable initial value istep1 = istep0 ! number of steps at current timestep ! real (dumped) or imagined (use to set calendar) - idate0 = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) + idate0 = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) stop_now = 0 ! end program execution if stop_now=1 dt_dyn = dt/real(ndtd,kind=dbl_kind) ! dynamics et al timestep force_restart_now = .false. @@ -385,7 +385,7 @@ subroutine calendar() call abort_ice(subname//'ERROR: model year too large') endif - idate = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) + idate = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) yday = daycal(mmonth) + mday ! day of the year hour = int(msec/seconds_per_hour) @@ -638,7 +638,6 @@ integer function compute_days_between(year0,month0,day0,year1,month1,day1) integer (kind=int_kind), intent(in) :: day1 ! end day ! Internal variable - logical (kind=log_kind) :: isleap ! Leap year logical integer (kind=int_kind) :: nday0, nday1 character(len=*),parameter :: subname='(compute_days_between)' @@ -911,7 +910,7 @@ subroutine calendar_time2date(atimesecs,ayear,amon,aday,asec,year_ref,mon_ref,da tday = 1 tsec = 0 - ! add initial seconds to timesecs and treat lsec_ref as zero + ! add initial seconds to timesecs and treat lsec_ref as zero ltimesecs = atimesecs + real(lsec_ref,kind=dbl_kind) ! first estimate of tyear diff --git a/cicecore/shared/ice_constants.F90 b/cicecore/shared/ice_constants.F90 index c49732e35..f2da2ef9d 100644 --- a/cicecore/shared/ice_constants.F90 +++ b/cicecore/shared/ice_constants.F90 @@ -1,7 +1,7 @@ !======================================================================= ! ! This module defines a variety of physical and numerical constants -! used throughout the ice model +! used throughout the ice model ! ! author Elizabeth C. Hunke, LANL @@ -33,7 +33,7 @@ module ice_constants real (kind=dbl_kind), public :: & shlat = 30.0_dbl_kind ,&! artificial masking edge (deg) nhlat = -30.0_dbl_kind ! artificial masking edge (deg) - + !----------------------------------------------------------------- ! numbers used outside the column package !----------------------------------------------------------------- @@ -91,12 +91,12 @@ module ice_constants ! location of fields for staggered grids !----------------------------------------------------------------- - integer (int_kind), parameter, public :: & - field_loc_unknown = 0, & - field_loc_noupdate = -1, & - field_loc_center = 1, & - field_loc_NEcorner = 2, & - field_loc_Nface = 3, & + integer (int_kind), parameter, public :: & + field_loc_unknown = 0, & + field_loc_noupdate = -1, & + field_loc_center = 1, & + field_loc_NEcorner = 2, & + field_loc_Nface = 3, & field_loc_Eface = 4, & field_loc_Wface = 5 @@ -105,11 +105,11 @@ module ice_constants ! changes of direction across tripole boundary !----------------------------------------------------------------- - integer (int_kind), parameter, public :: & - field_type_unknown = 0, & - field_type_noupdate = -1, & - field_type_scalar = 1, & - field_type_vector = 2, & + integer (int_kind), parameter, public :: & + field_type_unknown = 0, & + field_type_noupdate = -1, & + field_type_scalar = 1, & + field_type_vector = 2, & field_type_angle = 3 !----------------------------------------------------------------- @@ -138,9 +138,10 @@ subroutine ice_init_constants( & omega_in , & ! angular velocity of earth (rad/sec) radius_in , & ! earth radius (m) spval_dbl_in , & ! special value (double precision) - spval_in , & ! special value for netCDF output shlat_in , & ! artificial masking edge (deg) nhlat_in ! artificial masking edge (deg) + real (kind=real_kind), intent(in), optional :: & + spval_in ! special value for netCDF output character(len=*),parameter :: subname='(ice_init_constants)' diff --git a/cicecore/shared/ice_distribution.F90 b/cicecore/shared/ice_distribution.F90 index 1a23b63be..0f3f6c198 100644 --- a/cicecore/shared/ice_distribution.F90 +++ b/cicecore/shared/ice_distribution.F90 @@ -61,7 +61,7 @@ function create_distribution(dist_type, nprocs, work_per_block) ! by call the appropriate subroutine based on distribution type ! requested. Currently three distributions are supported: ! 2-d Cartesian distribution (cartesian), a load-balanced -! distribution using a rake algorithm based on an input amount of work +! distribution using a rake algorithm based on an input amount of work ! per block, and a space-filling-curve algorithm. character (*), intent(in) :: & @@ -180,14 +180,6 @@ subroutine create_local_block_ids(block_ids, distribution) do n=1,size(distribution%blockLocation) if (distribution%blockLocation(n) == my_task+1) then block_ids(distribution%blockLocalID(n)) = n - - if (debug_blocks .and. my_task == master_task) then - write(nu_diag,'(2a,3i8)') & - subname,' block id, proc, local_block: ', & - block_ids(distribution%blockLocalID(n)), & - distribution%blockLocation(n), & - distribution%blockLocalID(n) - endif endif end do endif @@ -597,7 +589,7 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) newDistrb%nprocs = nprocs call proc_decomposition(nprocs, nprocsX, nprocsY) - + !---------------------------------------------------------------------- ! @@ -639,7 +631,7 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) do j=1,nprocsY do i=1,nprocsX - processor = (j-1)*nprocsX + i ! number the processors + processor = (j-1)*nprocsX + i ! number the processors ! left to right, bot to top is = (i-1)*numBlocksXPerProc + 1 ! starting block in i @@ -783,7 +775,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- dist = create_distrb_cart(nprocs, workPerBlock) - + !---------------------------------------------------------------------- ! ! if the number of blocks is close to the number of processors, @@ -909,7 +901,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) call ice_distributionRake (workTmp, procTmp, workPerBlock, & priority, dist) end do - + deallocate(workTmp, procTmp, stat=istat) if (istat > 0) then call abort_ice( & @@ -1092,7 +1084,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) integer (int_kind), dimension(:), allocatable :: & proc_tmp ! temp processor id - + character(len=*),parameter :: subname='(create_distrb_roundrobin)' !---------------------------------------------------------------------- @@ -1143,7 +1135,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) do j=1,nblocks_y do i=1,nblocks_x - + globalID = globalID + 1 if (workPerBlock(globalID) /= 0) then @@ -1199,7 +1191,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_roundrobin - + !*********************************************************************** function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) @@ -1237,7 +1229,7 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) blocklist ! temp block ordered list integer (int_kind), dimension(:,:), allocatable :: & blockchk ! temp block check array - + character(len=*),parameter :: subname='(create_distrb_spiralcenter)' !---------------------------------------------------------------------- @@ -1424,7 +1416,7 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_spiralcenter - + !*********************************************************************** function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) @@ -1461,7 +1453,7 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) proc_tmp ! temp processor id logical (log_kind) :: up ! direction of pe counting - + character(len=*),parameter :: subname='(create_distrb_wghtfile)' !---------------------------------------------------------------------- @@ -1590,7 +1582,7 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_wghtfile - + !*********************************************************************** function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) @@ -1628,7 +1620,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) logical (log_kind), dimension(:), allocatable :: & bfree ! map of assigned blocks - + integer (int_kind) :: cnt, blktogether, i2 integer (int_kind) :: totblocks, nchunks logical (log_kind) :: keepgoing @@ -1704,7 +1696,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) blktogether = max(1,nint(float(totblocks)/float(6*nprocs))) ! write(nu_diag,*) 'ice_distrb_sectrobin totblocks = ',totblocks,nblocks_y*nblocks_x - + !------------------------------ ! southern group of blocks ! weave back and forth in i vs j @@ -1897,7 +1889,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_sectrobin - + !*********************************************************************** function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) @@ -1933,7 +1925,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) integer (int_kind), dimension(:), allocatable :: & proc_tmp ! temp processor id - + integer (int_kind) :: n character(len=*),parameter :: subname='(create_distrb_sectcart)' @@ -1997,7 +1989,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) cnt = 0 do j2=1,nblocks_y do i2=1,nblocks_x/2 - + if (n == 1) then i = i2 j = j2 @@ -2066,7 +2058,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_sectcart - + !********************************************************************** function create_distrb_spacecurve(nprocs,work_per_block) @@ -2400,7 +2392,7 @@ subroutine ice_distributionRake (procWork, procID, blockWork, & integer (int_kind) :: & i, n, &! dummy loop indices np1, &! n+1 corrected for cyclical wrap - iproc, inext, &! processor ids for current and next + iproc, inext, &! processor ids for current and next nprocs, numBlocks, &! number of blocks, processors lastPriority, &! priority for most recent block minPriority, &! minimum priority diff --git a/cicecore/shared/ice_domain_size.F90 b/cicecore/shared/ice_domain_size.F90 index 56381b986..999a35f48 100644 --- a/cicecore/shared/ice_domain_size.F90 +++ b/cicecore/shared/ice_domain_size.F90 @@ -51,11 +51,11 @@ module ice_domain_size !*** values for the parameter below. A value higher than !*** necessary will not cause the code to fail, but will !*** allocate more memory than is necessary. A value that - !*** is too low will cause the code to exit. + !*** is too low will cause the code to exit. !*** A good initial guess is found using !*** max_blocks = (nx_global/block_size_x)*(ny_global/block_size_y)/ !*** num_procs - + !======================================================================= end module ice_domain_size diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index ccb518807..1854dda64 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -84,7 +84,7 @@ module ice_fileunits nu_diag_set = .false. ! flag to indicate whether nu_diag is already set integer (kind=int_kind), public :: & - ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below + ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below ice_IOUnitsMaxUnit = 99 ! or above, set by setup_nml logical (kind=log_kind), dimension(:), allocatable :: & @@ -101,8 +101,8 @@ module ice_fileunits !======================================================================= -! This routine grabs needed unit numbers. -! nu_diag is set to 6 (stdout) but may be reset later by the namelist. +! This routine grabs needed unit numbers. +! nu_diag is set to 6 (stdout) but may be reset later by the namelist. ! nu_nml is obtained separately. subroutine init_fileunits @@ -203,7 +203,7 @@ end subroutine get_fileunit !======================================================================= -! This routine releases unit numbers at the end of a run. +! This routine releases unit numbers at the end of a run. subroutine release_all_fileunits diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 89a378948..5339aa6ec 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -10,7 +10,7 @@ module ice_init_column use ice_blocks, only: nx_block, ny_block use ice_constants use ice_communicate, only: my_task, master_task, ice_barrier - use ice_domain_size, only: ncat, max_blocks + use ice_domain_size, only: ncat, max_blocks use ice_domain_size, only: nblyr, nilyr, nslyr use ice_domain_size, only: n_aero, n_zaero, n_algae use ice_domain_size, only: n_doc, n_dic, n_don @@ -270,12 +270,12 @@ subroutine init_shortwave Iswabsn(:,:,:,:,iblk) = c0 Sswabsn(:,:,:,:,iblk) = c0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = 1, ny_block ! can be jlo, jhi do i = 1, nx_block ! can be ilo, ihi @@ -397,7 +397,7 @@ subroutine init_shortwave l_print_point=l_print_point, & initonly = .true.) endif - + !----------------------------------------------------------------- ! Define aerosol tracer on shortwave grid !----------------------------------------------------------------- @@ -414,7 +414,7 @@ subroutine init_shortwave enddo ! j !----------------------------------------------------------------- - ! Aggregate albedos + ! Aggregate albedos ! Match loop order in coupling_prep for same order of operations !----------------------------------------------------------------- @@ -528,7 +528,7 @@ end subroutine init_FY ! Initialize ice lvl tracers (call prior to reading restart data) - subroutine init_lvl(iblk, alvl, vlvl) + subroutine init_lvl(iblk, alvl, vlvl) use ice_constants, only: c0, c1 use ice_arrays_column, only: ffracn, dhsn @@ -599,7 +599,7 @@ subroutine init_meltponds_topo(apnd, hpnd, ipnd) apnd(:,:,:) = c0 hpnd(:,:,:) = c0 ipnd(:,:,:) = c0 - + end subroutine init_meltponds_topo !======================================================================= @@ -751,7 +751,7 @@ end subroutine init_aerosol ! Initialize vertical profile for biogeochemistry - subroutine init_bgc() + subroutine init_bgc() use ice_arrays_column, only: zfswin, trcrn_sw, & ocean_bio_all, ice_bio_net, snow_bio_net, & @@ -781,15 +781,15 @@ subroutine init_bgc() logical (kind=log_kind) :: & RayleighC , & solve_zsal - + type (block) :: & this_block ! block information for current block real(kind=dbl_kind), allocatable :: & trcrn_bgc(:,:) - + real(kind=dbl_kind), dimension(nilyr,ncat) :: & - sicen + sicen real(kind=dbl_kind) :: & RayleighR @@ -814,13 +814,13 @@ subroutine init_bgc() allocate(trcrn_bgc(ntrcr,ncat)) - bphi(:,:,:,:,:) = c0 ! initial porosity for no ice + bphi(:,:,:,:,:) = c0 ! initial porosity for no ice iDi (:,:,:,:,:) = c0 ! interface diffusivity bTiz(:,:,:,:,:) = c0 ! initial bio grid ice temperature iki (:,:,:,:,:) = c0 ! permeability ocean_bio_all(:,:,:,:) = c0 - ice_bio_net (:,:,:,:) = c0 ! integrated ice tracer conc (mmol/m^2 or mg/m^2) + ice_bio_net (:,:,:,:) = c0 ! integrated ice tracer conc (mmol/m^2 or mg/m^2) snow_bio_net (:,:,:,:) = c0 ! integrated snow tracer conc (mmol/m^2 or mg/m^2) zfswin (:,:,:,:,:) = c0 ! shortwave flux on bio grid trcrn_sw (:,:,:,:,:) = c0 ! tracers active in the shortwave calculation @@ -831,20 +831,20 @@ subroutine init_bgc() !----------------------------------------------------------------- ! zsalinity initialization !----------------------------------------------------------------- - - if (solve_zsal) then ! default values + + if (solve_zsal) then ! default values !$OMP PARALLEL DO PRIVATE(iblk,i,j,k,n,ilo,ihi,jlo,jhi,this_block,trcrn_bgc) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi call icepack_init_zsalinity(nblyr=nblyr, ntrcr_o=ntrcr_o, & Rayleigh_criteria = RayleighC, & Rayleigh_real = RayleighR, & @@ -863,7 +863,7 @@ subroutine init_bgc() enddo endif enddo ! i - enddo ! j + enddo ! j enddo ! iblk !$OMP END PARALLEL DO call icepack_warnings_flush(nu_diag) @@ -877,15 +877,15 @@ subroutine init_bgc() ! biogeochemistry initialization !----------------------------------------------------------------- - if (.not. restart_bgc) then - + if (.not. restart_bgc) then + !----------------------------------------------------------------- ! Initial Ocean Values if not coupled to the ocean bgc !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -919,14 +919,14 @@ subroutine init_bgc() !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi call icepack_load_ocean_bio_array(max_nbtrcr=icepack_max_nbtrcr, & max_algae=icepack_max_algae, max_don=icepack_max_don, & @@ -934,7 +934,7 @@ subroutine init_bgc() max_dic=icepack_max_dic, max_aero=icepack_max_aero, & nit =nit (i,j, iblk), amm=amm(i,j, iblk), sil =sil (i,j, iblk), & dmsp=dmsp(i,j, iblk), dms=dms(i,j, iblk), algalN=algalN(i,j,:,iblk), & - doc =doc (i,j,:,iblk), don=don(i,j,:,iblk), dic =dic (i,j,:,iblk), & + doc =doc (i,j,:,iblk), don=don(i,j,:,iblk), dic =dic (i,j,:,iblk), & fed =fed (i,j,:,iblk), fep=fep(i,j,:,iblk), zaeros=zaeros(i,j,:,iblk), & hum=hum (i,j, iblk), ocean_bio_all=ocean_bio_all(i,j,:,iblk)) @@ -948,18 +948,18 @@ subroutine init_bgc() if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. restart_bgc) then + if (.not. restart_bgc) then !$OMP PARALLEL DO PRIVATE(iblk,i,j,k,n,ilo,ihi,jlo,jhi,this_block,sicen,trcrn_bgc) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi do n = 1, ncat do k = 1, nilyr sicen(k,n) = trcrn(i,j,nt_sice+k-1,n,iblk) @@ -987,7 +987,7 @@ subroutine init_bgc() ! read restart to complete BGC initialization !----------------------------------------------------------------- - if (restart_zsal .or. restart_bgc) call read_restart_bgc + if (restart_zsal .or. restart_bgc) call read_restart_bgc deallocate(trcrn_bgc) @@ -1030,7 +1030,7 @@ subroutine init_hbrine() if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__,line= __LINE__) - first_ice(:,:,:,:) = .true. + first_ice(:,:,:,:) = .true. if (tr_brine) trcrn(:,:,nt_fbri,:,:) = c1 end subroutine init_hbrine @@ -1038,7 +1038,7 @@ end subroutine init_hbrine !======================================================================= ! Namelist variables, set to default values; may be altered at run time -! +! ! author Elizabeth C. Hunke, LANL ! Nicole Jeffery, LANL @@ -1060,7 +1060,7 @@ subroutine input_zbgc tr_bgc_N, tr_bgc_C, tr_bgc_chl, & tr_bgc_DON, tr_bgc_Fe, tr_zaero, & tr_bgc_hum, tr_aero - + integer (kind=int_kind) :: & ktherm @@ -1087,7 +1087,7 @@ subroutine input_zbgc restore_bgc, restart_bgc, scale_bgc, solve_zsal, restart_zsal, & tr_bgc_Nit, tr_bgc_C, tr_bgc_chl, tr_bgc_Am, tr_bgc_Sil, & tr_bgc_DMS, tr_bgc_PON, tr_bgc_hum, tr_bgc_DON, tr_bgc_Fe, & - grid_o, grid_o_t, l_sk, grid_oS, optics_file, optics_file_fieldname, & + grid_o, grid_o_t, l_sk, grid_oS, optics_file, optics_file_fieldname, & l_skS, phi_snow, initbio_frac, frazil_scav, & ratio_Si2N_diatoms , ratio_Si2N_sp , ratio_Si2N_phaeo , & ratio_S2N_diatoms , ratio_S2N_sp , ratio_S2N_phaeo , & @@ -1124,9 +1124,9 @@ subroutine input_zbgc fedtype_1 , feptype_1 , zaerotype_bc1 , & zaerotype_bc2 , zaerotype_dust1 , zaerotype_dust2 , & zaerotype_dust3 , zaerotype_dust4 , ratio_C2N_diatoms , & - ratio_C2N_sp , ratio_C2N_phaeo , ratio_chl2N_diatoms, & + ratio_C2N_sp , ratio_C2N_phaeo , ratio_chl2N_diatoms, & ratio_chl2N_sp , ratio_chl2N_phaeo , F_abs_chl_diatoms , & - F_abs_chl_sp , F_abs_chl_phaeo , ratio_C2N_proteins + F_abs_chl_sp , F_abs_chl_phaeo , ratio_C2N_proteins !----------------------------------------------------------------- @@ -1151,22 +1151,22 @@ subroutine input_zbgc restart_bgc = .false. ! biogeochemistry restart restart_zsal = .false. ! salinity restart restart_hbrine = .false. ! hbrine restart - scale_bgc = .false. ! initial bgc tracers proportional to S - skl_bgc = .false. ! solve skeletal biochemistry + scale_bgc = .false. ! initial bgc tracers proportional to S + skl_bgc = .false. ! solve skeletal biochemistry z_tracers = .false. ! solve vertically resolved tracers dEdd_algae = .false. ! dynamic algae contributes to shortwave absorption ! in delta-Eddington calculation - solve_zbgc = .false. ! turn on z layer biochemistry - tr_bgc_PON = .false. !--------------------------------------------- + solve_zbgc = .false. ! turn on z layer biochemistry + tr_bgc_PON = .false. !--------------------------------------------- tr_bgc_Nit = .false. ! biogeochemistry (skl or zbgc) tr_bgc_C = .false. ! if skl_bgc = .true. then skl tr_bgc_chl = .false. ! if z_tracers = .true. then vertically resolved tr_bgc_Sil = .false. ! if z_tracers + solve_zbgc = .true. then - tr_bgc_Am = .false. ! vertically resolved with reactions + tr_bgc_Am = .false. ! vertically resolved with reactions tr_bgc_DMS = .false. !------------------------------------------------ - tr_bgc_DON = .false. ! + tr_bgc_DON = .false. ! tr_bgc_hum = .false. ! - tr_bgc_Fe = .false. ! + tr_bgc_Fe = .false. ! tr_bgc_N = .true. ! ! brine height parameter @@ -1175,17 +1175,17 @@ subroutine input_zbgc ! skl biology parameters bgc_flux_type = 'Jin2006'! type of ocean-ice poston velocity ('constant') - ! z biology parameters - grid_o = c5 ! for bottom flux - grid_o_t = c5 ! for top flux - l_sk = 7.0_dbl_kind ! characteristic diffusive scale (m) + ! z biology parameters + grid_o = c5 ! for bottom flux + grid_o_t = c5 ! for top flux + l_sk = 7.0_dbl_kind ! characteristic diffusive scale (m) initbio_frac = c1 ! fraction of ocean trcr concentration in bio trcrs - frazil_scav = c1 ! increase in initial bio tracer from ocean scavenging - ratio_Si2N_diatoms = 1.8_dbl_kind ! algal Si to N (mol/mol) + frazil_scav = c1 ! increase in initial bio tracer from ocean scavenging + ratio_Si2N_diatoms = 1.8_dbl_kind ! algal Si to N (mol/mol) ratio_Si2N_sp = c0 ! diatoms, small plankton, phaeocystis ratio_Si2N_phaeo = c0 ratio_S2N_diatoms = 0.03_dbl_kind ! algal S to N (mol/mol) - ratio_S2N_sp = 0.03_dbl_kind + ratio_S2N_sp = 0.03_dbl_kind ratio_S2N_phaeo = 0.03_dbl_kind ratio_Fe2C_diatoms = 0.0033_dbl_kind ! algal Fe to C (umol/mol) ratio_Fe2C_sp = 0.0033_dbl_kind @@ -1196,7 +1196,7 @@ subroutine input_zbgc ratio_Fe2DON = 0.023_dbl_kind ! Fe to N of DON (nmol/umol) ratio_Fe2DOC_s = p1 ! Fe to C of DOC (nmol/umol) saccharids ratio_Fe2DOC_l = 0.033_dbl_kind ! Fe to C of DOC (nmol/umol) lipids - fr_resp = 0.05_dbl_kind ! frac of algal growth lost due to respiration + fr_resp = 0.05_dbl_kind ! frac of algal growth lost due to respiration tau_min = 5200.0_dbl_kind ! rapid mobile to stationary exchanges (s) tau_max = 1.73e5_dbl_kind ! long time mobile to stationary exchanges (s) algal_vel = 1.11e-8_dbl_kind! 0.5 cm/d(m/s) Lavoie 2005 1.5 cm/day @@ -1205,13 +1205,13 @@ subroutine input_zbgc chlabs_diatoms = 0.03_dbl_kind ! chl absorption (1/m/(mg/m^3)) chlabs_sp = 0.01_dbl_kind chlabs_phaeo = 0.05_dbl_kind - alpha2max_low_diatoms = 0.8_dbl_kind ! light limitation (1/(W/m^2)) + alpha2max_low_diatoms = 0.8_dbl_kind ! light limitation (1/(W/m^2)) alpha2max_low_sp = 0.67_dbl_kind alpha2max_low_phaeo = 0.67_dbl_kind - beta2max_diatoms = 0.018_dbl_kind ! light inhibition (1/(W/m^2)) + beta2max_diatoms = 0.018_dbl_kind ! light inhibition (1/(W/m^2)) beta2max_sp = 0.0025_dbl_kind beta2max_phaeo = 0.01_dbl_kind - mu_max_diatoms = 1.2_dbl_kind ! maximum growth rate (1/day) + mu_max_diatoms = 1.2_dbl_kind ! maximum growth rate (1/day) mu_max_sp = 0.851_dbl_kind mu_max_phaeo = 0.851_dbl_kind grow_Tdep_diatoms = 0.06_dbl_kind ! Temperature dependence of growth (1/C) @@ -1241,10 +1241,10 @@ subroutine input_zbgc K_Fe_diatoms = c1 ! iron half saturation (nM) K_Fe_sp = 0.2_dbl_kind K_Fe_phaeo = p1 - f_don_protein = 0.6_dbl_kind ! fraction of spilled grazing to proteins - kn_bac_protein = 0.03_dbl_kind ! Bacterial degredation of DON (1/d) - f_don_Am_protein = 0.25_dbl_kind ! fraction of remineralized DON to ammonium - f_doc_s = 0.4_dbl_kind ! fraction of mortality to DOC + f_don_protein = 0.6_dbl_kind ! fraction of spilled grazing to proteins + kn_bac_protein = 0.03_dbl_kind ! Bacterial degredation of DON (1/d) + f_don_Am_protein = 0.25_dbl_kind ! fraction of remineralized DON to ammonium + f_doc_s = 0.4_dbl_kind ! fraction of mortality to DOC f_doc_l = 0.4_dbl_kind f_exude_s = c1 ! fraction of exudation to DOC f_exude_l = c1 @@ -1254,15 +1254,15 @@ subroutine input_zbgc fsal = c1 ! Salinity limitation (ppt) op_dep_min = p1 ! Light attenuates for optical depths exceeding min fr_graze_s = p5 ! fraction of grazing spilled or slopped - fr_graze_e = p5 ! fraction of assimilation excreted + fr_graze_e = p5 ! fraction of assimilation excreted fr_mort2min = p5 ! fractionation of mortality to Am fr_dFe = 0.3_dbl_kind ! fraction of remineralized nitrogen ! (in units of algal iron) - k_nitrif = c0 ! nitrification rate (1/day) + k_nitrif = c0 ! nitrification rate (1/day) t_iron_conv = 3065.0_dbl_kind ! desorption loss pFe to dFe (day) - max_loss = 0.9_dbl_kind ! restrict uptake to % of remaining value - max_dfe_doc1 = 0.2_dbl_kind ! max ratio of dFe to saccharides in the ice - !(nM Fe/muM C) + max_loss = 0.9_dbl_kind ! restrict uptake to % of remaining value + max_dfe_doc1 = 0.2_dbl_kind ! max ratio of dFe to saccharides in the ice + !(nM Fe/muM C) fr_resp_s = 0.75_dbl_kind ! DMSPd fraction of respiration loss as DMSPd y_sk_DMS = p5 ! fraction conversion given high yield t_sk_conv = 3.0_dbl_kind ! Stefels conversion time (d) @@ -1296,11 +1296,11 @@ subroutine input_zbgc F_abs_chl_diatoms = 2.0_dbl_kind ! scales absorbed radiation for dEdd F_abs_chl_sp = 4.0_dbl_kind F_abs_chl_phaeo = 5.0 - ratio_C2N_proteins = 7.0_dbl_kind ! ratio of C to N in proteins (mol/mol) + ratio_C2N_proteins = 7.0_dbl_kind ! ratio of C to N in proteins (mol/mol) ! z salinity parameters - grid_oS = c5 ! for bottom flux - l_skS = 7.0_dbl_kind ! characteristic diffusive scale (m) + grid_oS = c5 ! for bottom flux + l_skS = 7.0_dbl_kind ! characteristic diffusive scale (m) !----------------------------------------------------------------- ! read from input file @@ -1333,10 +1333,10 @@ subroutine input_zbgc ! broadcast !----------------------------------------------------------------- - call broadcast_scalar(solve_zsal, master_task) - call broadcast_scalar(restart_zsal, master_task) + call broadcast_scalar(solve_zsal, master_task) + call broadcast_scalar(restart_zsal, master_task) call broadcast_scalar(tr_brine, master_task) - call broadcast_scalar(restart_hbrine, master_task) + call broadcast_scalar(restart_hbrine, master_task) call broadcast_scalar(phi_snow, master_task) call broadcast_scalar(grid_oS, master_task) @@ -1354,14 +1354,14 @@ subroutine input_zbgc call broadcast_scalar(tr_bgc_Am, master_task) call broadcast_scalar(tr_bgc_Sil, master_task) call broadcast_scalar(tr_bgc_hum, master_task) - call broadcast_scalar(tr_bgc_DMS, master_task) - call broadcast_scalar(tr_bgc_PON, master_task) - call broadcast_scalar(tr_bgc_DON, master_task) - call broadcast_scalar(tr_bgc_Fe, master_task) + call broadcast_scalar(tr_bgc_DMS, master_task) + call broadcast_scalar(tr_bgc_PON, master_task) + call broadcast_scalar(tr_bgc_DON, master_task) + call broadcast_scalar(tr_bgc_Fe, master_task) call broadcast_scalar(z_tracers, master_task) call broadcast_scalar(tr_zaero, master_task) - call broadcast_scalar(dEdd_algae, master_task) + call broadcast_scalar(dEdd_algae, master_task) call broadcast_scalar(modal_aero, master_task) call broadcast_scalar(optics_file, master_task) call broadcast_scalar(optics_file_fieldname, master_task) @@ -1395,31 +1395,31 @@ subroutine input_zbgc call broadcast_scalar(chlabs_diatoms , master_task) call broadcast_scalar(chlabs_sp , master_task) call broadcast_scalar(chlabs_phaeo , master_task) - call broadcast_scalar(alpha2max_low_diatoms , master_task) + call broadcast_scalar(alpha2max_low_diatoms , master_task) call broadcast_scalar(alpha2max_low_sp , master_task) call broadcast_scalar(alpha2max_low_phaeo , master_task) - call broadcast_scalar(beta2max_diatoms , master_task) - call broadcast_scalar(beta2max_sp , master_task) - call broadcast_scalar(beta2max_phaeo , master_task) - call broadcast_scalar(mu_max_diatoms , master_task) - call broadcast_scalar(mu_max_sp , master_task) - call broadcast_scalar(mu_max_phaeo , master_task) - call broadcast_scalar(grow_Tdep_diatoms, master_task) - call broadcast_scalar(grow_Tdep_sp , master_task) - call broadcast_scalar(grow_Tdep_phaeo , master_task) - call broadcast_scalar(fr_graze_diatoms , master_task) - call broadcast_scalar(fr_graze_sp , master_task) - call broadcast_scalar(fr_graze_phaeo , master_task) - call broadcast_scalar(mort_pre_diatoms , master_task) - call broadcast_scalar(mort_pre_sp , master_task) - call broadcast_scalar(mort_pre_phaeo , master_task) - call broadcast_scalar(mort_Tdep_diatoms, master_task) - call broadcast_scalar(mort_Tdep_sp , master_task) - call broadcast_scalar(mort_Tdep_phaeo , master_task) - call broadcast_scalar(k_exude_diatoms , master_task) - call broadcast_scalar(k_exude_sp , master_task) - call broadcast_scalar(k_exude_phaeo , master_task) - call broadcast_scalar(K_Nit_diatoms , master_task) + call broadcast_scalar(beta2max_diatoms , master_task) + call broadcast_scalar(beta2max_sp , master_task) + call broadcast_scalar(beta2max_phaeo , master_task) + call broadcast_scalar(mu_max_diatoms , master_task) + call broadcast_scalar(mu_max_sp , master_task) + call broadcast_scalar(mu_max_phaeo , master_task) + call broadcast_scalar(grow_Tdep_diatoms, master_task) + call broadcast_scalar(grow_Tdep_sp , master_task) + call broadcast_scalar(grow_Tdep_phaeo , master_task) + call broadcast_scalar(fr_graze_diatoms , master_task) + call broadcast_scalar(fr_graze_sp , master_task) + call broadcast_scalar(fr_graze_phaeo , master_task) + call broadcast_scalar(mort_pre_diatoms , master_task) + call broadcast_scalar(mort_pre_sp , master_task) + call broadcast_scalar(mort_pre_phaeo , master_task) + call broadcast_scalar(mort_Tdep_diatoms, master_task) + call broadcast_scalar(mort_Tdep_sp , master_task) + call broadcast_scalar(mort_Tdep_phaeo , master_task) + call broadcast_scalar(k_exude_diatoms , master_task) + call broadcast_scalar(k_exude_sp , master_task) + call broadcast_scalar(k_exude_phaeo , master_task) + call broadcast_scalar(K_Nit_diatoms , master_task) call broadcast_scalar(K_Nit_sp , master_task) call broadcast_scalar(K_Nit_phaeo , master_task) call broadcast_scalar(K_Am_diatoms , master_task) @@ -1435,17 +1435,17 @@ subroutine input_zbgc call broadcast_scalar(kn_bac_protein , master_task) call broadcast_scalar(f_don_Am_protein , master_task) call broadcast_scalar(f_doc_s , master_task) - call broadcast_scalar(f_doc_l , master_task) + call broadcast_scalar(f_doc_l , master_task) call broadcast_scalar(f_exude_s , master_task) call broadcast_scalar(f_exude_l , master_task) - call broadcast_scalar(k_bac_s , master_task) + call broadcast_scalar(k_bac_s , master_task) call broadcast_scalar(k_bac_l , master_task) call broadcast_scalar(T_max , master_task) call broadcast_scalar(fsal , master_task) call broadcast_scalar(op_dep_min , master_task) - call broadcast_scalar(fr_graze_s , master_task) - call broadcast_scalar(fr_graze_e , master_task) - call broadcast_scalar(fr_mort2min , master_task) + call broadcast_scalar(fr_graze_s , master_task) + call broadcast_scalar(fr_graze_e , master_task) + call broadcast_scalar(fr_mort2min , master_task) call broadcast_scalar(fr_dFe , master_task) call broadcast_scalar(k_nitrif , master_task) call broadcast_scalar(t_iron_conv , master_task) @@ -1453,18 +1453,18 @@ subroutine input_zbgc call broadcast_scalar(max_dfe_doc1 , master_task) call broadcast_scalar(fr_resp_s , master_task) call broadcast_scalar(y_sk_DMS , master_task) - call broadcast_scalar(t_sk_conv , master_task) + call broadcast_scalar(t_sk_conv , master_task) call broadcast_scalar(t_sk_ox , master_task) call broadcast_scalar(algaltype_diatoms, master_task) - call broadcast_scalar(algaltype_sp , master_task) - call broadcast_scalar(algaltype_phaeo , master_task) + call broadcast_scalar(algaltype_sp , master_task) + call broadcast_scalar(algaltype_phaeo , master_task) call broadcast_scalar(nitratetype , master_task) call broadcast_scalar(ammoniumtype , master_task) call broadcast_scalar(silicatetype , master_task) - call broadcast_scalar(dmspptype , master_task) - call broadcast_scalar(dmspdtype , master_task) + call broadcast_scalar(dmspptype , master_task) + call broadcast_scalar(dmspdtype , master_task) call broadcast_scalar(humtype , master_task) - call broadcast_scalar(doctype_s , master_task) + call broadcast_scalar(doctype_s , master_task) call broadcast_scalar(doctype_l , master_task) call broadcast_scalar(dontype_protein , master_task) call broadcast_scalar(fedtype_1 , master_task) @@ -1484,7 +1484,7 @@ subroutine input_zbgc call broadcast_scalar(F_abs_chl_diatoms , master_task) call broadcast_scalar(F_abs_chl_sp , master_task) call broadcast_scalar(F_abs_chl_phaeo , master_task) - call broadcast_scalar(ratio_C2N_proteins , master_task) + call broadcast_scalar(ratio_C2N_proteins , master_task) !----------------------------------------------------------------- ! zsalinity and brine @@ -1503,7 +1503,7 @@ subroutine input_zbgc write(nu_diag,*) subname,' ERROR: solve_zsal=T but 0 zsalinity tracers' endif abort_flag = 101 - endif + endif if (solve_zsal .and. ((.not. tr_brine) .or. (ktherm /= 1))) then if (my_task == master_task) then @@ -1517,7 +1517,7 @@ subroutine input_zbgc write(nu_diag,*) subname,' ERROR: tr_brine=T but no biology layers compiled' endif abort_flag = 103 - endif + endif !----------------------------------------------------------------- ! biogeochemistry @@ -1552,14 +1552,14 @@ subroutine input_zbgc abort_flag = 107 endif - if (dEdd_algae .AND. trim(shortwave) /= 'dEdd') then + if (dEdd_algae .AND. trim(shortwave) /= 'dEdd') then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: dEdd_algae = T but shortwave /= dEdd' endif abort_flag = 108 endif - if (dEdd_algae .AND. (.NOT. tr_bgc_N) .AND. (.NOT. tr_zaero)) then + if (dEdd_algae .AND. (.NOT. tr_bgc_N) .AND. (.NOT. tr_zaero)) then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: need tr_bgc_N or tr_zaero for dEdd_algae' endif @@ -1572,8 +1572,8 @@ subroutine input_zbgc endif abort_flag = 110 endif - - if (modal_aero .AND. trim(shortwave) /= 'dEdd') then + + if (modal_aero .AND. trim(shortwave) /= 'dEdd') then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: modal_aero = T but shortwave /= dEdd' endif @@ -1889,7 +1889,7 @@ subroutine count_tracers tr_bgc_N, tr_bgc_C, tr_bgc_chl, & tr_bgc_DON, tr_bgc_Fe, tr_zaero, & tr_bgc_hum - + logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers @@ -1971,7 +1971,7 @@ subroutine count_tracers nt_ipnd = ntrcr ! on level-ice ponds (if frzpnd='hlid') endif if (tr_pond_topo) then - ntrcr = ntrcr + 1 ! + ntrcr = ntrcr + 1 ! nt_ipnd = ntrcr ! refrozen pond ice lid thickness endif endif @@ -2014,7 +2014,7 @@ subroutine count_tracers !tcx, modify code so we don't have to reset n_aero here n_aero = 0 !echmod - this is not getting set correctly (overwritten later?) endif - + !----------------------------------------------------------------- ! initialize zbgc tracer indices !----------------------------------------------------------------- @@ -2755,7 +2755,7 @@ subroutine init_zbgc if (skl_bgc .or. z_tracers) then if (tr_bgc_N) then - do mm = 1, n_algae + do mm = 1, n_algae call init_bgc_trcr(nk, nt_fbri, & nt_bgc_N(mm), nlt_bgc_N(mm), & algaltype(mm), nt_depend, & @@ -2775,14 +2775,14 @@ subroutine init_zbgc nt_strata, bio_index) bio_index_o(nlt_bgc_Nit) = icepack_max_algae + 1 endif ! tr_bgc_Nit - + if (tr_bgc_C) then ! ! Algal C is not yet distinct from algal N ! * Reqires exudation and/or changing C:N ratios ! for implementation ! - ! do mm = 1,n_algae + ! do mm = 1,n_algae ! call init_bgc_trcr(nk, nt_fbri, & ! nt_bgc_C(mm), nlt_bgc_C(mm), & ! algaltype(mm), nt_depend, & @@ -2832,7 +2832,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_Am) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic + 2 - endif + endif if (tr_bgc_Sil) then call init_bgc_trcr(nk, nt_fbri, & nt_bgc_Sil, nlt_bgc_Sil, & @@ -2841,7 +2841,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_Sil) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic + 3 - endif + endif if (tr_bgc_DMS) then ! all together call init_bgc_trcr(nk, nt_fbri, & nt_bgc_DMSPp, nlt_bgc_DMSPp, & @@ -2866,7 +2866,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_DMS) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic + 6 - endif + endif if (tr_bgc_PON) then call init_bgc_trcr(nk, nt_fbri, & nt_bgc_PON, nlt_bgc_PON, & @@ -2908,8 +2908,8 @@ subroutine init_zbgc bio_index_o(nlt_bgc_Fep(mm)) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic & + icepack_max_don + icepack_max_fe + 7 + mm enddo ! mm - endif ! tr_bgc_Fe - + endif ! tr_bgc_Fe + if (tr_bgc_hum) then call init_bgc_trcr(nk, nt_fbri, & nt_bgc_hum, nlt_bgc_hum, & @@ -2918,7 +2918,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_hum) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic & - + icepack_max_don + 2*icepack_max_fe + icepack_max_aero + + icepack_max_don + 2*icepack_max_fe + icepack_max_aero endif endif ! skl_bgc or z_tracers @@ -2942,7 +2942,7 @@ subroutine init_zbgc ! and 2 snow layers (snow surface + interior) nk = nblyr + 1 - nt_depend = 2 + nt_fbri + ntd + nt_depend = 2 + nt_fbri + ntd ! z layer aerosols if (tr_zaero) then @@ -2963,15 +2963,15 @@ subroutine init_zbgc endif ! tr_zaero if (nbtrcr > 0) then - do k = 1,nbtrcr - zbgc_frac_init(k) = c1 - trcr_depend(nt_zbgc_frac+k-1) = 2+nt_fbri + do k = 1,nbtrcr + zbgc_frac_init(k) = c1 + trcr_depend(nt_zbgc_frac+k-1) = 2+nt_fbri trcr_base(nt_zbgc_frac+ k - 1,1) = c0 trcr_base(nt_zbgc_frac+ k - 1,2) = c1 trcr_base(nt_zbgc_frac+ k - 1,3) = c0 - n_trcr_strata(nt_zbgc_frac+ k - 1)= 1 + n_trcr_strata(nt_zbgc_frac+ k - 1)= 1 nt_strata(nt_zbgc_frac+ k - 1,1) = nt_fbri - nt_strata(nt_zbgc_frac+ k - 1,2) = 0 + nt_strata(nt_zbgc_frac+ k - 1,2) = 0 tau_ret(k) = c1 tau_rel(k) = c1 if (bgc_tracer_type(k) >= c0 .and. bgc_tracer_type(k) < p5) then @@ -2999,7 +2999,7 @@ subroutine init_zbgc do k = 1, nbtrcr zbgc_init_frac(k) = frazil_scav if (bgc_tracer_type(k) < c0) zbgc_init_frac(k) = initbio_frac - enddo + enddo !----------------------------------------------------------------- ! set values in icepack @@ -3016,7 +3016,7 @@ subroutine init_zbgc !----------------------------------------------------------------- ! final consistency checks - !----------------------------------------------------------------- + !----------------------------------------------------------------- if (nbtrcr > icepack_max_nbtrcr) then write (nu_diag,*) subname,' ' write (nu_diag,*) subname,'nbtrcr > icepack_max_nbtrcr' @@ -3037,13 +3037,13 @@ subroutine init_zbgc write(nu_diag,1020) ' number of bio tracers = ', nbtrcr write(nu_diag,1020) ' number of Isw tracers = ', nbtrcr_sw - + elseif (z_tracers) then - + write(nu_diag,1020) ' number of ztracers = ', nbtrcr write(nu_diag,1020) ' number of Isw tracers = ', nbtrcr_sw write(nu_diag,1000) ' initbio_frac = ', initbio_frac - write(nu_diag,1000) ' frazil_scav = ', frazil_scav + write(nu_diag,1000) ' frazil_scav = ', frazil_scav endif ! skl_bgc or solve_bgc endif ! master_task @@ -3092,7 +3092,7 @@ subroutine init_bgc_trcr(nk, nt_fbri, & integer (kind=int_kind) :: & k , & ! loop index n_strata , & ! temporary values - nt_strata1, & ! + nt_strata1, & ! nt_strata2 real (kind=dbl_kind) :: & @@ -3105,7 +3105,7 @@ subroutine init_bgc_trcr(nk, nt_fbri, & !-------- bgc_tracer_type(nlt_bgc) = bgctype - + if (nk > 1) then ! include vertical bgc in snow do k = nk, nk+1 trcr_depend (nt_bgc + k ) = 2 ! snow volume @@ -3117,10 +3117,10 @@ subroutine init_bgc_trcr(nk, nt_fbri, & nt_strata (nt_bgc + k,2) = 0 enddo - trcr_base1 = c0 - trcr_base2 = c1 + trcr_base1 = c0 + trcr_base2 = c1 trcr_base3 = c0 - n_strata = 1 + n_strata = 1 nt_strata1 = nt_fbri nt_strata2 = 0 else ! nk = 1 diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index b28ae2f60..a15f9d2c1 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -41,7 +41,7 @@ module ice_restart_column write_restart_bgc, read_restart_bgc, & write_restart_hbrine, read_restart_hbrine - logical (kind=log_kind), public :: & + logical (kind=log_kind), public :: & restart_age , & ! if .true., read age tracer restart file restart_FY , & ! if .true., read FY tracer restart file restart_lvl , & ! if .true., read lvl tracer restart file @@ -54,7 +54,7 @@ module ice_restart_column restart_fsd , & ! if .true., read floe size restart file restart_iso , & ! if .true., read isotope tracer restart file restart_aero , & ! if .true., read aerosol tracer restart file - restart_zsal , & ! if .true., read Salinity from restart file + restart_zsal , & ! if .true., read Salinity from restart file restart_hbrine , & ! if .true., read hbrine from restart file restart_bgc ! if .true., read bgc restart file @@ -302,7 +302,7 @@ end subroutine write_restart_pond_cesm subroutine read_restart_pond_cesm() - use ice_fileunits, only: nu_restart_pond + use ice_fileunits, only: nu_restart_pond use ice_state, only: trcrn ! local variables @@ -379,7 +379,7 @@ end subroutine write_restart_pond_lvl subroutine read_restart_pond_lvl() use ice_arrays_column, only: dhsn, ffracn - use ice_fileunits, only: nu_restart_pond + use ice_fileunits, only: nu_restart_pond use ice_flux, only: fsnow use ice_state, only: trcrn @@ -459,7 +459,7 @@ end subroutine write_restart_pond_topo subroutine read_restart_pond_topo() - use ice_fileunits, only: nu_restart_pond + use ice_fileunits, only: nu_restart_pond use ice_state, only: trcrn ! local variables @@ -502,7 +502,7 @@ subroutine write_restart_snow() logical (kind=log_kind) :: diag integer (kind=int_kind) :: nt_smice, nt_smliq, nt_rhos, nt_rsnw, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(write_restart_snow)' call icepack_query_tracer_indices(nt_smice_out=nt_smice, & @@ -544,7 +544,7 @@ subroutine read_restart_snow() logical (kind=log_kind) :: & diag integer (kind=int_kind) :: nt_smice, nt_smliq, nt_rhos, nt_rsnw, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(read_restart_snow)' call icepack_query_tracer_indices(nt_smice_out=nt_smice, & @@ -589,7 +589,7 @@ subroutine write_restart_fsd() logical (kind=log_kind) :: diag integer (kind=int_kind) :: nt_fsd, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(write_restart_fsd)' call icepack_query_tracer_indices(nt_fsd_out=nt_fsd) @@ -624,7 +624,7 @@ subroutine read_restart_fsd() logical (kind=log_kind) :: & diag integer (kind=int_kind) :: nt_fsd, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(read_restart_fsd)' call icepack_query_tracer_indices(nt_fsd_out=nt_fsd) @@ -660,7 +660,7 @@ subroutine write_restart_iso() logical (kind=log_kind) :: diag integer (kind=int_kind) :: nt_isosno, nt_isoice, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(write_restart_iso)' call icepack_query_tracer_indices(nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) @@ -702,7 +702,7 @@ subroutine read_restart_iso() logical (kind=log_kind) :: & diag integer (kind=int_kind) :: nt_isosno, nt_isoice, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(read_restart_iso)' call icepack_query_tracer_indices(nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) @@ -885,14 +885,14 @@ subroutine read_restart_hbrine() !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi do n = 1, ncat if (first_ice_real(i,j,n,iblk) >= p5) then first_ice (i,j,n,iblk) = .true. @@ -900,7 +900,7 @@ subroutine read_restart_hbrine() first_ice (i,j,n,iblk) = .false. endif enddo ! ncat - enddo ! i + enddo ! i enddo ! j enddo ! iblk !$OMP END PARALLEL DO @@ -946,14 +946,14 @@ subroutine write_restart_hbrine() !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi do n = 1, ncat ! zero out first_ice over land if (tmask(i,j,iblk) .and. first_ice (i,j,n,iblk)) then @@ -992,7 +992,7 @@ subroutine write_restart_bgc() doc, don, dic, fed, fep, zaeros, hum use ice_grid, only: tmask use ice_state, only: trcrn - use ice_flux, only: sss + use ice_flux, only: sss use ice_restart, only: write_restart_field ! local variables @@ -1011,27 +1011,27 @@ subroutine write_restart_bgc() nt_bgc_DMSPp, nt_bgc_Nit, nt_bgc_Sil, & nt_bgc_PON, nt_zbgc_frac, nt_bgc_hum, nbtrcr - integer (kind=int_kind), dimension(icepack_max_algae) :: & - nt_bgc_N , & ! diatoms, phaeocystis, pico/small - nt_bgc_C , & ! diatoms, phaeocystis, pico/small - nt_bgc_chl ! diatoms, phaeocystis, pico/small + integer (kind=int_kind), dimension(icepack_max_algae) :: & + nt_bgc_N , & ! diatoms, phaeocystis, pico/small + nt_bgc_C , & ! diatoms, phaeocystis, pico/small + nt_bgc_chl ! diatoms, phaeocystis, pico/small - integer (kind=int_kind), dimension(icepack_max_doc) :: & + integer (kind=int_kind), dimension(icepack_max_doc) :: & nt_bgc_DOC ! dissolved organic carbon - integer (kind=int_kind), dimension(icepack_max_don) :: & + integer (kind=int_kind), dimension(icepack_max_don) :: & nt_bgc_DON ! dissolved organic nitrogen - integer (kind=int_kind), dimension(icepack_max_dic) :: & + integer (kind=int_kind), dimension(icepack_max_dic) :: & nt_bgc_DIC ! dissolved inorganic carbon - integer (kind=int_kind), dimension(icepack_max_fe) :: & + integer (kind=int_kind), dimension(icepack_max_fe) :: & nt_bgc_Fed, & ! dissolved iron nt_bgc_Fep ! particulate iron - integer (kind=int_kind), dimension(icepack_max_aero) :: & + integer (kind=int_kind), dimension(icepack_max_aero) :: & nt_zaero ! black carbon and other aerosols - + logical (kind=log_kind) :: tr_bgc_Nit, tr_bgc_Am, tr_bgc_Sil,& tr_bgc_DMS, tr_bgc_PON, tr_bgc_N, tr_bgc_C, & tr_bgc_DON, tr_bgc_Fe, tr_zaero , tr_bgc_chl, & @@ -1102,19 +1102,19 @@ subroutine write_restart_bgc() !----------------------------------------------------------------- ! Salinity and extras !----------------------------------------------------------------- - if (solve_zsal) then + if (solve_zsal) then do k = 1,nblyr write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_S+k-1,:,:),'ruf8', & 'zSalinity'//trim(nchar),ncat,diag) enddo - + call write_restart_field(nu_dump_bgc,0,sss,'ruf8','sss',1,diag) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1188,7 +1188,7 @@ subroutine write_restart_bgc() if (tr_bgc_PON) & call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_PON,:,:), & 'ruf8','bgc_PON',ncat,diag) - + if (tr_bgc_DON) then do k = 1, n_don write(nchar,'(i3.3)') k @@ -1197,19 +1197,19 @@ subroutine write_restart_bgc() enddo endif if (tr_bgc_Fe ) then - do k = 1, n_fed + do k = 1, n_fed write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Fed (k),:,:), & 'ruf8','bgc_Fed'//trim(nchar),ncat,diag) enddo - do k = 1, n_fep + do k = 1, n_fep write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Fep (k),:,:), & 'ruf8','bgc_Fep'//trim(nchar),ncat,diag) enddo endif - else + else !----------------------------------------------------------------- ! Z layer BGC @@ -1380,7 +1380,7 @@ subroutine write_restart_bgc() write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,dic(:,:,k,:),'ruf8','dic'//trim(nchar),1,diag) enddo !k - endif + endif if (tr_bgc_Nit) & call write_restart_field(nu_dump_bgc,0,nit, 'ruf8','nit', 1,diag) if (tr_bgc_Am) & @@ -1433,7 +1433,7 @@ subroutine read_restart_bgc() use ice_domain_size, only: ncat, n_algae, n_doc, n_dic,& n_don, n_zaero, n_fed, n_fep use ice_fileunits, only: nu_restart_bgc - use ice_flux, only: sss + use ice_flux, only: sss use ice_flux_bgc, only: nit, amm, sil, dmsp, dms, algalN, & doc, don, dic, fed, fep, zaeros, hum use ice_state, only: trcrn @@ -1456,27 +1456,27 @@ subroutine read_restart_bgc() nt_bgc_DMSPp, nt_bgc_Nit, nt_bgc_Sil, & nt_bgc_PON, nt_zbgc_frac, nt_bgc_hum, nbtrcr - integer (kind=int_kind), dimension(icepack_max_algae) :: & - nt_bgc_N , & ! diatoms, phaeocystis, pico/small - nt_bgc_C , & ! diatoms, phaeocystis, pico/small - nt_bgc_chl ! diatoms, phaeocystis, pico/small + integer (kind=int_kind), dimension(icepack_max_algae) :: & + nt_bgc_N , & ! diatoms, phaeocystis, pico/small + nt_bgc_C , & ! diatoms, phaeocystis, pico/small + nt_bgc_chl ! diatoms, phaeocystis, pico/small - integer (kind=int_kind), dimension(icepack_max_doc) :: & + integer (kind=int_kind), dimension(icepack_max_doc) :: & nt_bgc_DOC ! dissolved organic carbon - integer (kind=int_kind), dimension(icepack_max_don) :: & + integer (kind=int_kind), dimension(icepack_max_don) :: & nt_bgc_DON ! dissolved organic nitrogen - integer (kind=int_kind), dimension(icepack_max_dic) :: & + integer (kind=int_kind), dimension(icepack_max_dic) :: & nt_bgc_DIC ! dissolved inorganic carbon - integer (kind=int_kind), dimension(icepack_max_fe) :: & + integer (kind=int_kind), dimension(icepack_max_fe) :: & nt_bgc_Fed, & ! dissolved iron nt_bgc_Fep ! particulate iron - integer (kind=int_kind), dimension(icepack_max_aero) :: & + integer (kind=int_kind), dimension(icepack_max_aero) :: & nt_zaero ! black carbon and other aerosols - + logical (kind=log_kind) :: tr_bgc_Nit, tr_bgc_Am, tr_bgc_Sil,& tr_bgc_DMS, tr_bgc_PON, tr_bgc_N, tr_bgc_C, & tr_bgc_DON, tr_bgc_Fe, tr_zaero , tr_bgc_chl, & @@ -1514,7 +1514,7 @@ subroutine read_restart_bgc() ! Salinity and extras !----------------------------------------------------------------- - if (restart_zsal) then + if (restart_zsal) then if (my_task == master_task) write(nu_diag,*) subname,'zSalinity restart' do k = 1,nblyr @@ -1522,21 +1522,21 @@ subroutine read_restart_bgc() call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_S+k-1,:,:),'ruf8', & 'zSalinity'//trim(nchar),ncat,diag,field_loc_center,field_type_scalar) enddo - + if (my_task == master_task) write(nu_diag,*) subname,'sea surface salinity' call read_restart_field(nu_restart_bgc,0,sss,'ruf8','sss',1,diag) call read_restart_field(nu_restart_bgc,0,Rayleigh_real,'ruf8','Rayleigh',1,diag) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi if (Rayleigh_real (i,j,iblk) .GE. c1) then Rayleigh_criteria (i,j,iblk) = .true. elseif (Rayleigh_real (i,j,iblk) < c1) then @@ -1618,13 +1618,13 @@ subroutine read_restart_bgc() enddo endif if (tr_bgc_Fe) then - do k = 1, n_fed + do k = 1, n_fed write(nchar,'(i3.3)') k call read_restart_field(nu_restart_bgc,0, & trcrn(:,:,nt_bgc_Fed (k),:,:), & 'ruf8','bgc_Fed'//trim(nchar),ncat,diag) enddo - do k = 1, n_fep + do k = 1, n_fep write(nchar,'(i3.3)') k call read_restart_field(nu_restart_bgc,0, & trcrn(:,:,nt_bgc_Fep (k),:,:), & @@ -1862,7 +1862,7 @@ subroutine read_restart_bgc() enddo !k endif endif ! restart_bgc - + end subroutine read_restart_bgc !======================================================================= diff --git a/cicecore/shared/ice_restart_shared.F90 b/cicecore/shared/ice_restart_shared.F90 index 6578ef3ad..7c178fec0 100644 --- a/cicecore/shared/ice_restart_shared.F90 +++ b/cicecore/shared/ice_restart_shared.F90 @@ -41,7 +41,7 @@ module ice_restart_shared integer function lenstr(label) - character*(*) label + character(len=*) :: label character(len=*),parameter :: subname='(lenstr)' diff --git a/cicecore/shared/ice_spacecurve.F90 b/cicecore/shared/ice_spacecurve.F90 index 931b2312b..205c50e77 100644 --- a/cicecore/shared/ice_spacecurve.F90 +++ b/cicecore/shared/ice_spacecurve.F90 @@ -4,8 +4,8 @@ module ice_spacecurve ! !DESCRIPTION: -! This module contains routines necessary to -! create space-filling curves. +! This module contains routines necessary to +! create space-filling curves. ! ! !REVISION HISTORY: ! @@ -22,7 +22,7 @@ module ice_spacecurve implicit none private -! !PUBLIC TYPES: +! !PUBLIC TYPES: type, public :: factor_t integer(int_kind) :: numfact ! The # of factors for a value @@ -30,7 +30,7 @@ module ice_spacecurve integer(int_kind), dimension(:), pointer :: used end type -! !PUBLIC MEMBER FUNCTIONS: +! !PUBLIC MEMBER FUNCTIONS: public :: GenSpaceCurve @@ -53,11 +53,10 @@ module ice_spacecurve FindandMark integer(int_kind), dimension(:,:), allocatable :: & - dir, &! direction to move along each level - ordered ! the ordering + ordered ! the ordering integer(int_kind), dimension(:), allocatable :: & pos ! position along each of the axes - + integer(int_kind) :: & maxdim, &! dimensionality of entire space vcnt ! visitation count @@ -68,7 +67,7 @@ module ice_spacecurve !EOC !*********************************************************************** -contains +contains !*********************************************************************** !BOP @@ -79,19 +78,19 @@ recursive function Cinco(l,type,ma,md,ja,jd) result(ierr) ! !DESCRIPTION: ! This subroutine implements a Cinco space-filling curve. -! Cinco curves connect a Nb x Nb block of points where +! Cinco curves connect a Nb x Nb block of points where ! -! Nb = 5^p +! Nb = 5^p ! ! !REVISION HISTORY: ! same as module ! -! !INPUT PARAMETERS +! !INPUT PARAMETERS integer(int_kind), intent(in) :: & - l, & ! level of the space-filling curve + l, & ! level of the space-filling curve type, & ! type of SFC curve ma, & ! Major axis [0,1] md, & ! direction of major axis [-1,1] @@ -115,8 +114,8 @@ recursive function Cinco(l,type,ma,md,ja,jd) result(ierr) lmd, &! local major direction (next level) lja, &! local joiner axis (next level) ljd, &! local joiner direction (next level) - ltype, &! type of SFC on next level - ll ! next level down + ltype, &! type of SFC on next level + ll ! next level down character(len=*),parameter :: subname='(Cinco)' @@ -589,8 +588,8 @@ end function Cinco recursive function PeanoM(l,type,ma,md,ja,jd) result(ierr) ! !DESCRIPTION: -! This function implements a meandering Peano -! space-filling curve. A meandering Peano curve +! This function implements a meandering Peano +! space-filling curve. A meandering Peano curve ! connects a Nb x Nb block of points where ! ! Nb = 3^p @@ -947,8 +946,8 @@ end function hilbert function IncrementCurve(ja,jd) result(ierr) ! !DESCRIPTION: -! This function creates the curve which is stored in the -! the ordered array. The curve is implemented by +! This function creates the curve which is stored in the +! the ordered array. The curve is implemented by ! incrementing the curve in the direction [jd] of axis [ja]. ! ! !REVISION HISTORY: @@ -990,7 +989,7 @@ end function IncrementCurve function log2( n) ! !DESCRIPTION: -! This function calculates the log2 of its integer +! This function calculates the log2 of its integer ! input. ! ! !REVISION HISTORY: @@ -999,8 +998,8 @@ function log2( n) ! !INPUT PARAMETERS: integer(int_kind), intent(in) :: n ! integer value to find the log2 - -! !OUTPUT PARAMETERS: + +! !OUTPUT PARAMETERS: integer(int_kind) :: log2 @@ -1030,10 +1029,10 @@ function log2( n) else ! n > 1 log2 = 1 tmp =n - do while (tmp > 1 .and. tmp/2 .ne. 1) + do while (tmp > 1 .and. tmp/2 .ne. 1) tmp=tmp/2 log2=log2+1 - enddo + enddo endif !EOP @@ -1048,9 +1047,9 @@ end function log2 ! !INTERFACE: function IsLoadBalanced(nelem,npart) - + ! !DESCRIPTION: -! This function determines if we can create +! This function determines if we can create ! a perfectly load-balanced partitioning. ! ! !REVISION HISTORY: @@ -1063,7 +1062,7 @@ function IsLoadBalanced(nelem,npart) npart ! size of partition ! !OUTPUT PARAMETERS: - logical :: IsLoadBalanced ! .TRUE. if a perfectly load balanced + logical :: IsLoadBalanced ! .TRUE. if a perfectly load balanced ! partition is possible !EOP !BOC @@ -1080,7 +1079,7 @@ function IsLoadBalanced(nelem,npart) !----------------------------------------------------------------------- tmp1 = nelem/npart - if (npart*tmp1 == nelem ) then + if (npart*tmp1 == nelem ) then IsLoadBalanced=.TRUE. else IsLoadBalanced=.FALSE. @@ -1129,7 +1128,7 @@ function GenCurve(l,type,ma,md,ja,jd) result(ierr) !----------------------------------------------------------------------- !------------------------------------------------- - ! create the space-filling curve on the next level + ! create the space-filling curve on the next level !------------------------------------------------- if(type == 2) then @@ -1140,7 +1139,7 @@ function GenCurve(l,type,ma,md,ja,jd) result(ierr) if (f3 .and. my_task == master_task) write(nu_diag,*) subname,' calling PeanoM (3)' ierr = PeanoM(l,type,ma,md,ja,jd) f3 = .false. - elseif ( type == 5) then + elseif ( type == 5) then if (f5 .and. my_task == master_task) write(nu_diag,*) subname,' calling Cinco (5)' ierr = Cinco(l,type,ma,md,ja,jd) f5 = .false. @@ -1263,7 +1262,7 @@ end subroutine PrintFactor function Factor(num) result(res) ! !DESCRIPTION: -! This function factors the input value num into a +! This function factors the input value num into a ! product of 2,3, and 5. ! ! !REVISION HISTORY: @@ -1350,8 +1349,8 @@ function Factor(num) result(res) enddo !------------------------------------ - ! make sure that the input value - ! only contains factors of 2,3,and 5 + ! make sure that the input value + ! only contains factors of 2,3,and 5 !------------------------------------ tmp=1 do i=1,n @@ -1373,10 +1372,10 @@ end function Factor ! !INTERFACE: function IsFactorable(n) - + ! !DESCRIPTION: ! This function determines if we can factor -! n into 2,3,and 5. +! n into 2,3,and 5. ! ! !REVISION HISTORY: ! same as module @@ -1420,7 +1419,7 @@ end function IsFactorable subroutine map(l) ! !DESCRIPTION: -! Interface routine between internal subroutines and public +! Interface routine between internal subroutines and public ! subroutines. ! ! !REVISION HISTORY: @@ -1471,7 +1470,7 @@ subroutine PrintCurve(Mesh) ! !DESCRIPTION: -! This subroutine prints the several low order +! This subroutine prints the several low order ! space-filling curves in an easy to read format ! ! !REVISION HISTORY: @@ -1693,7 +1692,7 @@ end subroutine PrintCurve subroutine GenSpaceCurve(Mesh) ! !DESCRIPTION: -! This subroutine is the public interface into the +! This subroutine is the public interface into the ! space-filling curve functionality ! ! !REVISION HISTORY: @@ -1717,7 +1716,7 @@ subroutine GenSpaceCurve(Mesh) dim ! dimension of SFC... currently limited to 2D integer(int_kind) :: gridsize ! number of points on a side - + character(len=*),parameter :: subname='(GenSpaceCurve)' !----------------------------------------------------------------------- @@ -1743,19 +1742,19 @@ subroutine GenSpaceCurve(Mesh) ! Setup the working arrays for the traversal !-------------------------------------------- allocate(pos(0:dim-1)) - + !----------------------------------------------------- ! The array ordered will contain the visitation order !----------------------------------------------------- ordered(:,:) = 0 - call map(level) + call map(level) Mesh(:,:) = ordered(:,:) deallocate(pos,ordered) - end subroutine GenSpaceCurve + end subroutine GenSpaceCurve !EOC !----------------------------------------------------------------------- diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 43ce00010..fbe172f51 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -11,43 +11,7 @@ endif set jobfile = $1 -set ntasks = ${ICE_NTASKS} -set nthrds = ${ICE_NTHRDS} -set maxtpn = ${ICE_MACHINE_TPNODE} -set acct = ${ICE_ACCOUNT} - -@ ncores = ${ntasks} * ${nthrds} -@ taskpernode = ${maxtpn} / $nthrds -if (${taskpernode} == 0) set taskpernode = 1 -@ nnodes = ${ntasks} / ${taskpernode} -if (${nnodes} * ${taskpernode} < ${ntasks}) @ nnodes = $nnodes + 1 -set taskpernodelimit = ${taskpernode} -if (${taskpernodelimit} > ${ntasks}) set taskpernodelimit = ${ntasks} -@ corespernode = ${taskpernodelimit} * ${nthrds} - -set ptile = $taskpernode -if ($ptile > ${maxtpn} / 2) @ ptile = ${maxtpn} / 2 - -set runlength = ${ICE_RUNLENGTH} -if ($?ICE_MACHINE_MAXRUNLENGTH) then - if (${runlength} > ${ICE_MACHINE_MAXRUNLENGTH}) then - set runlength = ${ICE_MACHINE_MAXRUNLENGTH} - endif -endif - -set queue = "${ICE_QUEUE}" -set batchtime = "00:15:00" -if (${runlength} == 0) set batchtime = "00:29:00" -if (${runlength} == 1) set batchtime = "00:59:00" -if (${runlength} == 2) set batchtime = "2:00:00" -if (${runlength} == 3) set batchtime = "3:00:00" -if (${runlength} == 4) set batchtime = "4:00:00" -if (${runlength} == 5) set batchtime = "5:00:00" -if (${runlength} == 6) set batchtime = "6:00:00" -if (${runlength} == 7) set batchtime = "7:00:00" -if (${runlength} >= 8) set batchtime = "8:00:00" - -set shortcase = `echo ${ICE_CASENAME} | cut -c1-15` +source ${ICE_SCRIPTS}/setup_machparams.csh #========================================== @@ -284,7 +248,7 @@ cat >> ${jobfile} << EOFB #SBATCH --nodes ${nnodes} #SBATCH --ntasks ${ntasks} #SBATCH --cpus-per-task ${nthrds} -#SBATCH --mem-per-cpu=5G +#SBATCH --mem-per-cpu=${batchmem}G #SBATCH --comment="image=eccc/eccc_all_default_ubuntu-18.04-amd64_latest" EOFB diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index a4b6ca37d..bc9ff2b99 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -8,18 +8,7 @@ echo "running cice.launch.csh" set jobfile = $1 -set ntasks = ${ICE_NTASKS} -set nthrds = ${ICE_NTHRDS} -set maxtpn = ${ICE_MACHINE_TPNODE} - -@ ncores = ${ntasks} * ${nthrds} -@ taskpernode = ${maxtpn} / $nthrds -if (${taskpernode} == 0) set taskpernode = 1 -@ nnodes = ${ntasks} / ${taskpernode} -if (${nnodes} * ${taskpernode} < ${ntasks}) @ nnodes = $nnodes + 1 -set taskpernodelimit = ${taskpernode} -if (${taskpernodelimit} > ${ntasks}) set taskpernodelimit = ${ntasks} -@ corespernode = ${taskpernodelimit} * ${nthrds} +source ${ICE_SCRIPTS}/setup_machparams.csh #========================================== if (${ICE_MACHINE} =~ cheyenne*) then diff --git a/configuration/scripts/cice.settings b/configuration/scripts/cice.settings index 9b57aab3f..76ae6ad9e 100644 --- a/configuration/scripts/cice.settings +++ b/configuration/scripts/cice.settings @@ -32,6 +32,7 @@ setenv ICE_BFBCOMP undefined setenv ICE_BFBTYPE restart setenv ICE_SPVAL undefined setenv ICE_RUNLENGTH -1 +setenv ICE_MEMUSE -1 setenv ICE_ACCOUNT undefined setenv ICE_QUEUE undefined diff --git a/configuration/scripts/machines/Macros.cheyenne_gnu b/configuration/scripts/machines/Macros.cheyenne_gnu index 082130f77..5d3859ec8 100644 --- a/configuration/scripts/machines/Macros.cheyenne_gnu +++ b/configuration/scripts/machines/Macros.cheyenne_gnu @@ -12,7 +12,7 @@ FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow --std f2008 # FFLAGS += -O0 -g -fcheck=all -finit-real=snan -fimplicit-none -ffpe-trap=invalid,zero,overflow CFLAGS += -O0 endif diff --git a/configuration/scripts/machines/Macros.cheyenne_intel b/configuration/scripts/machines/Macros.cheyenne_intel index 52fc07ebb..6fb3a002a 100644 --- a/configuration/scripts/machines/Macros.cheyenne_intel +++ b/configuration/scripts/machines/Macros.cheyenne_intel @@ -13,7 +13,7 @@ FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) # FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -link_mpi=dbg - FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -link_mpi=dbg + FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -link_mpi=dbg -stand f08 # FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays -link_mpi=dbg else FFLAGS += -O2 diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu index c962c35f3..fb29543f8 100755 --- a/configuration/scripts/machines/env.cheyenne_gnu +++ b/configuration/scripts/machines/env.cheyenne_gnu @@ -31,8 +31,9 @@ endif if ($?ICE_BFBTYPE) then if ($ICE_BFBTYPE =~ qcchk*) then - module load python - source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest endif endif diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel index 89a8920b6..2c6eedec6 100755 --- a/configuration/scripts/machines/env.cheyenne_intel +++ b/configuration/scripts/machines/env.cheyenne_intel @@ -31,8 +31,9 @@ endif if ($?ICE_BFBTYPE) then if ($ICE_BFBTYPE =~ qcchk*) then - module load python - source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest endif endif diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi index 5caa9d992..e6e339f08 100755 --- a/configuration/scripts/machines/env.cheyenne_pgi +++ b/configuration/scripts/machines/env.cheyenne_pgi @@ -31,8 +31,9 @@ endif if ($?ICE_BFBTYPE) then if ($ICE_BFBTYPE =~ qcchk*) then - module load python - source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest endif endif diff --git a/configuration/scripts/options/set_env.memlarge b/configuration/scripts/options/set_env.memlarge new file mode 100644 index 000000000..2572e3ae7 --- /dev/null +++ b/configuration/scripts/options/set_env.memlarge @@ -0,0 +1,2 @@ +setenv ICE_MEMUSE 8 + diff --git a/configuration/scripts/options/set_env.memmed b/configuration/scripts/options/set_env.memmed new file mode 100644 index 000000000..5d7169268 --- /dev/null +++ b/configuration/scripts/options/set_env.memmed @@ -0,0 +1,2 @@ +setenv ICE_MEMUSE 4 + diff --git a/configuration/scripts/options/set_env.memsmall b/configuration/scripts/options/set_env.memsmall new file mode 100644 index 000000000..dc9e3c1ee --- /dev/null +++ b/configuration/scripts/options/set_env.memsmall @@ -0,0 +1,2 @@ +setenv ICE_MEMUSE 1 + diff --git a/configuration/scripts/setup_machparams.csh b/configuration/scripts/setup_machparams.csh new file mode 100755 index 000000000..db9f00244 --- /dev/null +++ b/configuration/scripts/setup_machparams.csh @@ -0,0 +1,64 @@ +#!/bin/csh -f + +# inputs +# mpi tasks +set ntasks = ${ICE_NTASKS} +# threads +set nthrds = ${ICE_NTHRDS} +# max tasks per node +set maxtpn = ${ICE_MACHINE_TPNODE} +# batch charge account +set acct = ${ICE_ACCOUNT} + +# compute total cores needed and distribution of cores on nodes +# ncores = total cores needed (tasks * threads) +# taskpernode = number of MPI tasks per node based on size of node and threads +# nodes = number of total nodes needed based on tasks/threads +# taskpernodelimit = max(taskpernode, ntasks), when using less than 1 node +# corespernode = number of cores per node used +@ ncores = ${ntasks} * ${nthrds} +@ taskpernode = ${maxtpn} / $nthrds +if (${taskpernode} == 0) set taskpernode = 1 +@ nnodes = ${ntasks} / ${taskpernode} +if (${nnodes} * ${taskpernode} < ${ntasks}) @ nnodes = $nnodes + 1 +set taskpernodelimit = ${taskpernode} +if (${taskpernodelimit} > ${ntasks}) set taskpernodelimit = ${ntasks} +@ corespernode = ${taskpernodelimit} * ${nthrds} + +set runlength = ${ICE_RUNLENGTH} +if ($?ICE_MACHINE_MAXRUNLENGTH) then + if (${runlength} > ${ICE_MACHINE_MAXRUNLENGTH}) then + set runlength = ${ICE_MACHINE_MAXRUNLENGTH} + endif +endif + +set memuse = ${ICE_MEMUSE} +if ($?ICE_MACHINE_MAXMEMUSE) then + if (${memuse} > ${ICE_MACHINE_MAXMEMUSE}) then + set memuse = ${ICE_MACHINE_MAXMEMUSE} + endif +endif + +set queue = "${ICE_QUEUE}" +set batchtime = "00:15:00" +if (${runlength} == 0) set batchtime = "00:29:00" +if (${runlength} == 1) set batchtime = "00:59:00" +if (${runlength} == 2) set batchtime = "2:00:00" +if (${runlength} == 3) set batchtime = "3:00:00" +if (${runlength} == 4) set batchtime = "4:00:00" +if (${runlength} == 5) set batchtime = "5:00:00" +if (${runlength} == 6) set batchtime = "6:00:00" +if (${runlength} == 7) set batchtime = "7:00:00" +if (${runlength} >= 8) set batchtime = "8:00:00" +set batchmem = "5" +if (${memuse} == 1) set batchmem = "5" +if (${memuse} == 2) set batchmem = "10" +if (${memuse} == 3) set batchmem = "15" +if (${memuse} == 4) set batchmem = "20" +if (${memuse} == 5) set batchmem = "50" +if (${memuse} == 6) set batchmem = "100" +if (${memuse} == 7) set batchmem = "150" +if (${memuse} >= 8) set batchmem = "200" + +set shortcase = `echo ${ICE_CASENAME} | cut -c1-15` + diff --git a/configuration/scripts/tests/qctest.yml b/configuration/scripts/tests/qctest.yml new file mode 100644 index 000000000..72479a563 --- /dev/null +++ b/configuration/scripts/tests/qctest.yml @@ -0,0 +1,11 @@ +name: qctest +channels: + - conda-forge + - nodefaults +dependencies: +# Python dependencies for plotting scripts + - numpy + - matplotlib-base + - cartopy + - netcdf4 + diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 500209326..a3f7d11bc 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -5,7 +5,9 @@ Implementation ======================== CICE is written in FORTRAN90 and runs on platforms using UNIX, LINUX, -and other operating systems. The code is based on a two-dimensional +and other operating systems. The current coding standard is Fortran2003 +with use of Fortran2008 feature CONTIGUOUS in the 1d evp solver. +The code is based on a two-dimensional horizontal orthogonal grid that is broken into two-dimensional horizontal blocks and parallelized over blocks with MPI and OpenMP threads. The code also includes some optimizations diff --git a/doc/source/user_guide/ug_running.rst b/doc/source/user_guide/ug_running.rst index aca7d4933..3f3cd3495 100644 --- a/doc/source/user_guide/ug_running.rst +++ b/doc/source/user_guide/ug_running.rst @@ -535,10 +535,10 @@ On macOS: .. code-block:: bash - # Download the Miniconda installer to ~/Downloads/miniconda.sh - curl -L https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/Downloads/miniconda.sh + # Download the Miniconda installer to ~/miniconda.sh + curl -L https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/miniconda.sh # Install Miniconda - bash ~/Downloads/miniconda.sh + bash ~/miniconda.sh # Follow the prompts diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 05a16a6fb..284de72f1 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -1052,6 +1052,13 @@ To install the necessary Python packages, the ``pip`` Python utility can be used pip install --user matplotlib pip install --user cartopy +You can also setup a conda env with the same utitities + +.. code-block:: bash + + conda env create -f configuration/scripts/tests/qctest.yml + conda activate qctest + To run the validation test, setup a baseline run with the original baseline model and then a perturbation run based on recent model changes. Use ``--set qc`` in both runs in addition to other settings needed. Then use the QC script to compare history output, diff --git a/icepack b/icepack index 4fea17c15..3a039e598 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 4fea17c15fb63e1424cd71c0ef4365e2135d32db +Subproject commit 3a039e598e6395333a278bb1822f03e9bc954ac6