From 0809cde2250858b5f33d09db7cb764c33ff398fa Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 16 Dec 2021 14:01:36 -0800 Subject: [PATCH] Add grid_ocn, grid_atm feature (#47) * - Refactor strair and strocn implementation a bit - Note strocnxT, strocnyT are per ice area and different units from other strocn[x,y] variables. Added task to C-grid clean issue. - Remove strocnxT_f, strocnyT_f from dyn_finish interface and compute separately - Remove strairxU/T, strairyU/T arguments from dyn_prep1, compute strairxU, strairyU more cleanly - Update variable documentation in code - Remove straxE, strayE, straxN, strayN, probably won't need - Update grid_average_X2Y - Add grid_average_X2Y for 'NE2T' and 'NE2U' as an overloaded interface - Add grid_average_X2Y "A" implementation which is unmasked normalized weighted average. This is like S but ignores masks - Add grid_average_X2Y explicit implementation (dir,work1,wght1,mask1,work2) for 'S' and 'A' averaging options - Eliminate "in-place" operation, not really needed, can only cause confusion, require all averaging from one variable to a different variable. - Update gridavgchk unit test to test new grid_average_X2Y options - Identify bug related to location of uocn,vocn variables. Used on U grid but variables on T grid. Added task to C-grid clean issue. * - Add grid_atm and grid_ocn to namelist as well as grid_*_[thrm,dynu,dynv] to support flexible grid definitions - Refactor grid_average_X2Y interfaces to better support flexibility wrt grids - Update gridavgchk unit test * - Update [u,v]ocn, ss_tlt[x,y] implementation to improve flexibility and use grid_atm and grid_ocn info - Migrate averaging and memory allocation of U, N, E fields to dynamics - Update history capability to support grid_atm and grid_ocn values for some history variables * - Update u/vocn and ss_tltx/y usage to support the grid_ocn value - Fix bug in init/get_forcing_ocn, affects only cases with atm_data_type=box2001 and ocn_data_type/=box2001 - Update set_nml files for box cases to more clearly specify atm/ocn/ice_data_type and define grid_ocn where needed * update documentation * recover lost grid_file initialization line * update uatm, vatm handling * - Update strax, stray implementation to support flexible grid interpolation - Update grid_average_X2Y internal interface names - Update documentation * - Rename grid_system to grid_ice - Update documentation * rename grid_system to grid_ice * rename grid_system to grid_ice * update comment for uocn, vocn --- .../cicedynB/analysis/ice_diagnostics.F90 | 14 +- cicecore/cicedynB/analysis/ice_history.F90 | 127 ++- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 67 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 205 +++-- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 60 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 69 +- .../dynamics/ice_transport_driver.F90 | 12 +- .../cicedynB/dynamics/ice_transport_remap.F90 | 6 +- cicecore/cicedynB/general/ice_flux.F90 | 99 +-- cicecore/cicedynB/general/ice_forcing.F90 | 24 +- cicecore/cicedynB/general/ice_init.F90 | 109 ++- cicecore/cicedynB/general/ice_step_mod.F90 | 37 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 838 +++++++++++++++--- .../infrastructure/ice_restart_driver.F90 | 8 +- .../io/io_netcdf/ice_restart.F90 | 4 +- .../infrastructure/io/io_pio2/ice_restart.F90 | 4 +- .../drivers/direct/hadgem3/CICE_RunMod.F90 | 4 +- .../direct/nemo_concepts/CICE_RunMod.F90 | 4 +- cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 4 +- .../drivers/mct/cesm1/ice_import_export.F90 | 16 +- cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 4 +- .../drivers/nuopc/cmeps/ice_import_export.F90 | 16 +- cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 4 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 13 +- .../drivers/standalone/cice/CICE_RunMod.F90 | 4 +- .../unittest/gridavgchk/gridavgchk.F90 | 436 +++++++-- configuration/scripts/ice_in | 4 +- configuration/scripts/options/set_nml.box2001 | 2 + configuration/scripts/options/set_nml.boxadv | 4 + .../scripts/options/set_nml.boxislandse | 1 + .../scripts/options/set_nml.boxislandsn | 1 + .../scripts/options/set_nml.boxislandsne | 1 + .../scripts/options/set_nml.boxnodyn | 1 + .../scripts/options/set_nml.boxrestore | 4 + .../scripts/options/set_nml.boxslotcyl | 4 + configuration/scripts/options/set_nml.gbox128 | 2 + configuration/scripts/options/set_nml.gridb | 2 +- configuration/scripts/options/set_nml.gridcd | 2 +- doc/source/cice_index.rst | 13 +- doc/source/master_list.bib | 24 +- doc/source/user_guide/ug_case_settings.rst | 13 +- doc/source/user_guide/ug_implementation.rst | 116 +++ 42 files changed, 1800 insertions(+), 582 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index 1b9f70044..d07cc0a93 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -127,7 +127,7 @@ subroutine runtime_diags (dt) alvdr_init, alvdf_init, alidr_init, alidf_init use ice_flux_bgc, only: faero_atm, faero_ocn, fiso_atm, fiso_ocn use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval - use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_system + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice use ice_state ! everything ! tcraig, this is likely to cause circular dependency because ice_prescribed_mod is high level routine #ifdef CESMCOUPLED @@ -297,7 +297,7 @@ subroutine runtime_diags (dt) enddo enddo ! Eventually do energy diagnostic on T points. -! if (grid_system == 'CD') then +! if (grid_ice == 'CD') then ! !$OMP PARALLEL DO PRIVATE(iblk,i,j) ! do iblk = 1, nblocks ! do j = 1, ny_block @@ -403,7 +403,7 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO - if (grid_system == 'CD') then + if (grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -1663,7 +1663,7 @@ end subroutine debug_ice subroutine print_state(plabel,i,j,iblk) - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nfsd @@ -1790,7 +1790,7 @@ subroutine print_state(plabel,i,j,iblk) write(nu_diag,*) 'uvel(i,j)',uvel(i,j,iblk) write(nu_diag,*) 'vvel(i,j)',vvel(i,j,iblk) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then write(nu_diag,*) 'uvelE(i,j)',uvelE(i,j,iblk) write(nu_diag,*) 'vvelE(i,j)',vvelE(i,j,iblk) write(nu_diag,*) 'uvelN(i,j)',uvelN(i,j,iblk) @@ -1843,7 +1843,7 @@ end subroutine print_state subroutine print_points_state(plabel,ilabel) - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr @@ -1940,7 +1940,7 @@ subroutine print_points_state(plabel,ilabel) write(nu_diag,*) trim(llabel),'uvel=',uvel(i,j,iblk) write(nu_diag,*) trim(llabel),'vvel=',vvel(i,j,iblk) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then write(nu_diag,*) trim(llabel),'uvelE=',uvelE(i,j,iblk) write(nu_diag,*) trim(llabel),'vvelE=',vvelE(i,j,iblk) write(nu_diag,*) trim(llabel),'uvelN=',uvelN(i,j,iblk) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index e0855aa1c..a9cf22529 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -68,7 +68,9 @@ subroutine init_hist (dt) use ice_domain_size, only: max_blocks, max_nstrm, nilyr, nslyr, nblyr, ncat, nfsd use ice_dyn_shared, only: kdyn use ice_flux, only: mlt_onset, frz_onset, albcnt, snwcnt - use ice_grid, only: grid_system + use ice_grid, only: grid_ice, & + grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & + grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv use ice_history_shared ! everything use ice_history_mechred, only: init_hist_mechred_2D, init_hist_mechred_3Dc use ice_history_pond, only: init_hist_pond_2D, init_hist_pond_3Dc @@ -94,9 +96,108 @@ subroutine init_hist (dt) integer (kind=int_kind), dimension(max_nstrm) :: & ntmp integer (kind=int_kind) :: nml_error ! namelist i/o error flag + character (len=25) :: & + str2D_gat, str2d_gau, str2d_gav, & ! dimensions for t, u, v atm grid (ga) + str2D_got, str2d_gou, str2d_gov ! dimensions for t, u, v ocn grid (go) + character (len=25) :: & + cstr_gat, cstr_gau, cstr_gav, & ! mask area name for t, u, v atm grid (ga) + cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) character(len=char_len) :: description character(len=*), parameter :: subname = '(init_hist)' + !----------------------------------------------------------------- + ! set atm/ocn forcing grids + !----------------------------------------------------------------- + + !--- ATM --- + + if (grid_atm_thrm == 'T') then + str2D_gat = tstr2D + cstr_gat = tcstr + elseif (grid_atm_thrm == 'U') then + str2D_gat = ustr2D + cstr_gat = ucstr + elseif (grid_atm_thrm == 'N') then + str2D_gat = nstr2D + cstr_gat = ncstr + elseif (grid_atm_thrm == 'E') then + str2D_gat = estr2D + cstr_gat = ecstr + endif + + if (grid_atm_dynu == 'T') then + str2D_gau = tstr2D + cstr_gau = tcstr + elseif (grid_atm_dynu == 'U') then + str2D_gau = ustr2D + cstr_gau = ucstr + elseif (grid_atm_dynu == 'N') then + str2D_gau = nstr2D + cstr_gau = ncstr + elseif (grid_atm_dynu == 'E') then + str2D_gau = estr2D + cstr_gau = ecstr + endif + + if (grid_atm_dynv == 'T') then + str2D_gav = tstr2D + cstr_gav = tcstr + elseif (grid_atm_dynv == 'U') then + str2D_gav = ustr2D + cstr_gav = ucstr + elseif (grid_atm_dynv == 'N') then + str2D_gav = nstr2D + cstr_gav = ncstr + elseif (grid_atm_dynv == 'E') then + str2D_gav = estr2D + cstr_gav = ecstr + endif + + !--- OCN --- + + if (grid_ocn_thrm == 'T') then + str2D_got = tstr2D + cstr_got = tcstr + elseif (grid_ocn_thrm == 'U') then + str2D_got = ustr2D + cstr_got = ucstr + elseif (grid_ocn_thrm == 'N') then + str2D_got = nstr2D + cstr_got = ncstr + elseif (grid_ocn_thrm == 'E') then + str2D_got = estr2D + cstr_got = ecstr + endif + + if (grid_ocn_dynu == 'T') then + str2D_gou = tstr2D + cstr_gou = tcstr + elseif (grid_ocn_dynu == 'U') then + str2D_gou = ustr2D + cstr_gou = ucstr + elseif (grid_ocn_dynu == 'N') then + str2D_gou = nstr2D + cstr_gou = ncstr + elseif (grid_ocn_dynu == 'E') then + str2D_gou = estr2D + cstr_gou = ecstr + endif + + if (grid_ocn_dynv == 'T') then + str2D_gov = tstr2D + cstr_gov = tcstr + elseif (grid_ocn_dynv == 'U') then + str2D_gov = ustr2D + cstr_gov = ucstr + elseif (grid_ocn_dynv == 'N') then + str2D_gov = nstr2D + cstr_gov = ncstr + elseif (grid_ocn_dynv == 'E') then + str2D_gov = estr2D + cstr_gov = ecstr + endif + + !----------------------------------------------------------------- ! set history dimensions !----------------------------------------------------------------- @@ -279,7 +380,7 @@ subroutine init_hist (dt) f_sispeed = f_CMIP endif - if (grid_system == 'CD') then + if (grid_ice == 'CD') then f_uvelE = f_uvel f_vvelE = f_vvel f_icespdE = f_icespd @@ -684,22 +785,22 @@ subroutine init_hist (dt) "vector direction - coming from", c1, c0, & ns1, f_icedir) - call define_hist_field(n_uatm,"uatm","m/s",ustr2D, ucstr, & + call define_hist_field(n_uatm,"uatm","m/s",str2D_gau, cstr_gau, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uatm) - call define_hist_field(n_vatm,"vatm","m/s",ustr2D, ucstr, & + call define_hist_field(n_vatm,"vatm","m/s",str2D_gav, cstr_gav, & "atm velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vatm) - call define_hist_field(n_atmspd,"atmspd","m/s",ustr2D, ucstr, & + call define_hist_field(n_atmspd,"atmspd","m/s",str2D_gau, cstr_gau, & "atmosphere wind speed", & "vector magnitude", c1, c0, & ns1, f_atmspd) - call define_hist_field(n_atmdir,"atmdir","deg",ustr2D, ucstr, & + call define_hist_field(n_atmdir,"atmdir","deg",str2D_gau, cstr_gau, & "atmosphere wind direction", & "vector direction - coming from", c1, c0, & ns1, f_atmdir) @@ -754,22 +855,22 @@ subroutine init_hist (dt) "none", c1, c0, & ns1, f_sss) - call define_hist_field(n_uocn,"uocn","m/s",ustr2D, ucstr, & + call define_hist_field(n_uocn,"uocn","m/s",str2D_gou, cstr_gou, & "ocean current (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uocn) - call define_hist_field(n_vocn,"vocn","m/s",ustr2D, ucstr, & + call define_hist_field(n_vocn,"vocn","m/s",str2D_gov, cstr_gov, & "ocean current (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vocn) - call define_hist_field(n_ocnspd,"ocnspd","m/s",ustr2D, ucstr, & + call define_hist_field(n_ocnspd,"ocnspd","m/s",str2D_gou, cstr_gou, & "ocean current speed", & "vector magnitude", c1, c0, & ns1, f_ocnspd) - call define_hist_field(n_ocndir,"ocndir","deg",ustr2D, ucstr, & + call define_hist_field(n_ocndir,"ocndir","deg",str2D_gou, cstr_gou, & "ocean current direction", & "vector direction - going to", c1, c0, & ns1, f_ocndir) @@ -1200,7 +1301,7 @@ subroutine init_hist (dt) "none", secday*c100, c0, & ns1, f_shear) - select case (grid_system) + select case (grid_ice) case('B') description = ", on U grid (NE corner values)" case ('CD') @@ -1987,7 +2088,7 @@ subroutine accum_hist (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: nfsd - use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_system + use ice_grid, only: tmask, lmask_n, lmask_s, dxu, dyu, grid_ice use ice_calendar, only: new_year, write_history, & write_ic, timesecs, histfreq, nstreams, mmonth, & new_month @@ -4297,7 +4398,7 @@ subroutine accum_hist (dt) !--------------------------------------------------------------- ! compute sig1 and sig2 - select case (grid_system) + select case (grid_ice) case('B') call principal_stress (nx_block, ny_block, & stressp_1 (:,:,iblk), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 74e08bdcd..6ede5e667 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,9 +134,10 @@ subroutine eap (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y!, grid_system commented out until implementation of cd-grid + tarear, uarear, grid_average_X2Y, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength !, uvelE, vvelN grid_system commented out until implementation of cd-grid + aice_init, aice0, aicen, vicen, strength ! use ice_timers, only: timer_dynamics, timer_bound, & ! ice_timer_start, ice_timer_stop, & ! timer_tmp1, timer_tmp2, timer_tmp3 @@ -165,6 +166,8 @@ subroutine eap (dt) indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -191,6 +194,10 @@ subroutine eap (dt) type (block) :: & this_block ! block information for current block + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(eap)' call ice_timer_start(timer_dynamics) ! dynamics @@ -238,8 +245,6 @@ subroutine eap (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -254,8 +259,10 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',tmass,umass) - call grid_average_X2Y('T2UF',aice_init, aiu) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T',aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -266,16 +273,16 @@ subroutine eap (dt) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call ice_HaloUpdate (strairx, halo_info, & + call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) - call ice_HaloUpdate (strairy, halo_info, & + call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2UF',strairx) - call grid_average_X2Y('T2UF',strairy) + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif ! tcraig, tcx, turned off this threaded region, in evp, this block and @@ -302,7 +309,7 @@ subroutine eap (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & @@ -478,7 +485,7 @@ subroutine eap (dt) indxui (:,iblk), indxuj (:,iblk), & ksub, & aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & waterx (:,:,iblk), watery (:,:,iblk), & forcex (:,:,iblk), forcey (:,:,iblk), & umassdti (:,:,iblk), fm (:,:,iblk), & @@ -543,26 +550,40 @@ subroutine eap (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strintx (:,:,iblk), strinty (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - call ice_HaloUpdate (strocnxT, halo_info, & + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + call ice_HaloUpdate (work1, halo_info, & field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (strocnyT, halo_info, & + call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2TF',strocnxT) ! shift - call grid_average_X2Y('U2TF',strocnyT) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready -! if (grid_system == 'CD') then +! if (grid_ice == 'CD') then ! call grid_average_X2Y('E2US',uvelE,uvel) ! call grid_average_X2Y('N2US',vvelN,vvel) ! endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index d06ea1bc9..377f1205d 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -86,13 +86,13 @@ subroutine evp (dt) strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & strocnxT, strocnyT, strax, stray, & - strairxN, strairyN, uocnN, vocnN, ss_tltxN, ss_tltyN, icenmask, fmN, & + Tbu, hwater, & + strairxN, strairyN, icenmask, fmN, & strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & - straxN, strayN, TbN, & - strairxE, strairyE, uocnE, vocnE, ss_tltxE, ss_tltyE, iceemask, fmE, & + TbN, & + strairxE, strairyE, iceemask, fmE, & strtltxE, strtltyE, strocnxE, strocnyE, strintxE, strintyE, taubxE, taubyE, & - straxE, strayE, TbE, & - Tbu, hwater, & + TbE, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & @@ -103,7 +103,8 @@ subroutine evp (dt) ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, tinyarea, grid_average_X2Y, tarea, & - grid_type, grid_system + grid_type, grid_ice, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -141,6 +142,10 @@ subroutine evp (dt) indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) + ss_tltxU , & ! sea surface slope, x-direction (m/m) + ss_tltyU , & ! sea surface slope, y-direction (m/m) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -151,6 +156,10 @@ subroutine evp (dt) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnN , & ! i ocean current (m/s) + vocnN , & ! j ocean current (m/s) + ss_tltxN , & ! sea surface slope, x-direction (m/m) + ss_tltyN , & ! sea surface slope, y-direction (m/m) waterxN , & ! for ocean stress calculation, x (m/s) wateryN , & ! for ocean stress calculation, y (m/s) forcexN , & ! work array: combined atm stress and ocn tilt, x @@ -160,6 +169,10 @@ subroutine evp (dt) nmassdti ! mass of N-cell/dte (kg/m^2 s) real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnE , & ! i ocean current (m/s) + vocnE , & ! j ocean current (m/s) + ss_tltxE , & ! sea surface slope, x-direction (m/m) + ss_tltyE , & ! sea surface slope, y-direction (m/m) waterxE , & ! for ocean stress calculation, x (m/s) wateryE , & ! for ocean stress calculation, y (m/s) forcexE , & ! work array: combined atm stress and ocn tilt, x @@ -189,6 +202,10 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + logical (kind=log_kind), save :: first_time = .true. character(len=*), parameter :: subname = '(evp)' @@ -201,7 +218,7 @@ subroutine evp (dt) allocate(fld2(nx_block,ny_block,2,max_blocks)) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate(zetax2T(nx_block,ny_block,max_blocks)) allocate(etax2T(nx_block,ny_block,max_blocks)) @@ -254,16 +271,8 @@ subroutine evp (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) - if (grid_system == 'CD') then - strairxN(:,:,iblk) = strairxT(:,:,iblk) - strairyN(:,:,iblk) = strairyT(:,:,iblk) - strairxE(:,:,iblk) = strairxT(:,:,iblk) - strairyE(:,:,iblk) = strairyT(:,:,iblk) - endif enddo ! iblk !$OMP END PARALLEL DO @@ -276,59 +285,59 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',tmass,umass) - call grid_average_X2Y('T2UF',aice_init, aiu) - - if (grid_system == 'CD') then - call grid_average_X2Y('T2EF',tmass,emass) - call grid_average_X2Y('T2EF',aice_init, aie) - call grid_average_X2Y('T2NF',tmass,nmass) - call grid_average_X2Y('T2NF',aice_init, ain) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T',aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') + call grid_average_X2Y('S',ss_tltx,grid_ocn_dynu,ss_tltxU,'U') + call grid_average_X2Y('S',ss_tlty,grid_ocn_dynv,ss_tltyU,'U') + + if (grid_ice == 'CD') then + call grid_average_X2Y('F',tmass,'T',emass,'E') + call grid_average_X2Y('F',aice_init,'T', aie,'E') + call grid_average_X2Y('F',tmass,'T',nmass,'N') + call grid_average_X2Y('F',aice_init,'T', ain,'N') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnN,'N') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnN,'N') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnE,'E') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnE,'E') endif !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing - ! This wind stress is rotated on u grid and multiplied by aice + ! Map T to U, N, E as needed + ! This wind stress is rotated on u grid and multiplied by aice in coupler !---------------------------------------------------------------- call icepack_query_parameters(calc_strair_out=calc_strair) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call ice_HaloUpdate (strairx, halo_info, & + call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) - call ice_HaloUpdate (strairy, halo_info, & + call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2UF',strairx) - call grid_average_X2Y('T2UF',strairy) - endif - - if (grid_system == 'CD') then + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') + endif - if (.not. calc_strair) then - strairxN(:,:,:) = strax(:,:,:) - strairyN(:,:,:) = stray(:,:,:) - strairxE(:,:,:) = strax(:,:,:) - strairyE(:,:,:) = stray(:,:,:) + if (grid_ice == 'CD') then + if (.not. calc_strair) then + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxN, 'N') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyN, 'N') + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxE, 'E') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyE, 'E') else - call ice_HaloUpdate (strairxN, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyN, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairxE, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyE, halo_info, & - field_loc_center, field_type_vector) - call grid_average_X2Y('T2NF',strairxN) - call grid_average_X2Y('T2NF',strairyN) - call grid_average_X2Y('T2EF',strairxE) - call grid_average_X2Y('T2EF',strairyE) + call grid_average_X2Y('F',strairxT,'T',strairxN,'N') + call grid_average_X2Y('F',strairyT,'T',strairyN,'N') + call grid_average_X2Y('F',strairxT,'T',strairxE,'E') + call grid_average_X2Y('F',strairyT,'T',strairyE,'E') endif - endif + ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength ! need to do more debugging !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) @@ -344,7 +353,7 @@ subroutine evp (dt) jlo = this_block%jlo jhi = this_block%jhi - if (trim(grid_system) == 'B') then + if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellt(iblk), icellu(iblk), & @@ -353,9 +362,9 @@ subroutine evp (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & fm (:,:,iblk), dt, & strtltx (:,:,iblk), strtlty (:,:,iblk), & @@ -374,7 +383,7 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & Tbu (:,:,iblk)) - elseif (trim(grid_system) == 'CD') then + elseif (trim(grid_ice) == 'CD') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellt(iblk), icellu(iblk), & @@ -383,9 +392,9 @@ subroutine evp (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umaskCD (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & + ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & fm (:,:,iblk), dt, & strtltx (:,:,iblk), strtlty (:,:,iblk), & @@ -425,7 +434,7 @@ subroutine evp (dt) enddo ! iblk !$TCXOMP END PARALLEL DO - if (grid_system == 'CD') then + if (grid_ice == 'CD') then !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -505,7 +514,7 @@ subroutine evp (dt) enddo ! iblk !$TCXOMP END PARALLEL DO - endif ! grid_system + endif ! grid_ice call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -521,7 +530,7 @@ subroutine evp (dt) call unstack_velocity_field(fld2, uvel, vvel) call ice_timer_stop(timer_bound) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call ice_timer_start(timer_bound) ! velocities may have changed in dyn_prep2 @@ -558,7 +567,7 @@ subroutine evp (dt) !$TCXOMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks - select case (trim(grid_system)) + select case (trim(grid_ice)) case('B') if ( seabed_stress_method == 'LKD' ) then @@ -627,7 +636,7 @@ subroutine evp (dt) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & - cdn_ocn,aiu,uocn,vocn,forcex,forcey,Tbu, & + cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & umassdti,fm,uarear,tarear,strintx,strinty,uvel_init,vvel_init,& strength,uvel,vvel,dxt,dyt, & stressp_1 ,stressp_2, stressp_3, stressp_4, & @@ -651,9 +660,9 @@ subroutine evp (dt) ! shift velocity components from CD grid locations (N, E) to B grid location (U) for stress_U - if (grid_system == 'CD') then - call grid_average_X2Y('E2US',uvelE,uvel) - call grid_average_X2Y('N2US',vvelN,vvel) + if (grid_ice == 'CD') then + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') endif !----------------------------------------------------------------- @@ -663,7 +672,7 @@ subroutine evp (dt) !$TCXOMP PARALLEL DO PRIVATE(iblk,strtmp) do iblk = 1, nblocks - select case (grid_system) + select case (grid_ice) case('B') call stress (nx_block, ny_block, & ksub, icellt(iblk), & @@ -693,7 +702,7 @@ subroutine evp (dt) indxui (:,iblk), indxuj (:,iblk), & ksub, & aiu (:,:,iblk), strtmp (:,:,:), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & waterx (:,:,iblk), watery (:,:,iblk), & forcex (:,:,iblk), forcey (:,:,iblk), & umassdti (:,:,iblk), fm (:,:,iblk), & @@ -782,7 +791,7 @@ subroutine evp (dt) icelln (iblk), Cdn_ocn (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & ksub, aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnE (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & forcexN (:,:,iblk), forceyN (:,:,iblk), & nmassdti (:,:,iblk), fmN (:,:,iblk), & @@ -809,7 +818,7 @@ subroutine evp (dt) call ice_timer_stop(timer_bound) call unstack_velocity_field(fld2, uvel, vvel) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call ice_timer_start(timer_bound) ! velocities may have changed in dyn_prep2 @@ -832,7 +841,7 @@ subroutine evp (dt) call ice_timer_stop(timer_evp_2d) deallocate(fld2) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then deallocate(zetax2T, etax2T) endif @@ -919,17 +928,16 @@ subroutine evp (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strintx (:,:,iblk), strinty (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - if (grid_system == 'CD') then + if (grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -956,37 +964,40 @@ subroutine evp (dt) strairxE(:,:,iblk), strairyE(:,:,iblk), & strocnxE(:,:,iblk), strocnyE(:,:,iblk)) - ! If we are coupling to a C grid ocean model -! do ij =1, icelle -! i = indxei(ij,iblk) -! j = indxej(ij,iblk) - -! strocnxT(i,j,iblk) = strocnxE(i,j,iblk) / aiE(i,j,iblk) -! enddo - -! do ij =1, icelln -! i = indxni(ij,iblk) -! j = indxnj(ij,iblk) - -! strocnyT(i,j,iblk) = strocnyN(i,j,iblk) / aiN(i,j,iblk) -! enddo - enddo !$OMP END PARALLEL DO endif - call ice_HaloUpdate (strocnxT, halo_info, & + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + !$OMP END PARALLEL DO + call ice_HaloUpdate (work1, halo_info, & field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (strocnyT, halo_info, & + call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2TF',strocnxT) ! shift - call grid_average_X2Y('U2TF',strocnyT) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport - if (grid_system == 'CD') then - call grid_average_X2Y('E2US',uvelE,uvel) - call grid_average_X2Y('N2US',vvelN,vvel) + if (grid_ice == 'CD') then + call grid_average_X2Y('S',uvelE,'E',uvel,'U') + call grid_average_X2Y('S',vvelN,'N',vvel,'U') endif + call ice_timer_stop(timer_dynamics) ! dynamics end subroutine evp diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 6a18d7454..bc0c25f75 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -17,7 +17,7 @@ module ice_dyn_shared use ice_domain_size, only: max_blocks use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters @@ -134,7 +134,7 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice('(alloc_dyn_shared): Out of memory') - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate( & uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep @@ -186,7 +186,7 @@ subroutine init_dyn (dt) allocate(fcor_blk(nx_block,ny_block,max_blocks)) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate(fcorE_blk(nx_block,ny_block,max_blocks)) allocate(fcorN_blk(nx_block,ny_block,max_blocks)) endif @@ -199,7 +199,7 @@ subroutine init_dyn (dt) ! velocity uvel(i,j,iblk) = c0 ! m/s vvel(i,j,iblk) = c0 ! m/s - if (grid_system == 'CD') then ! extra velocity variables + if (grid_ice == 'CD') then ! extra velocity variables uvelE(i,j,iblk) = c0 vvelE(i,j,iblk) = c0 uvelN(i,j,iblk) = c0 @@ -221,7 +221,7 @@ subroutine init_dyn (dt) fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s endif - if (grid_system == 'CD') then + if (grid_ice == 'CD') then if (trim(coriolis) == 'constant') then fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s @@ -250,7 +250,7 @@ subroutine init_dyn (dt) stress12_3(i,j,iblk) = c0 stress12_4(i,j,iblk) = c0 - if (grid_system == 'CD') then + if (grid_ice == 'CD') then stresspT (i,j,iblk) = c0 stressmT (i,j,iblk) = c0 stress12T (i,j,iblk) = c0 @@ -339,8 +339,6 @@ subroutine dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & aice, vice, & vsno, tmask, & - strairxT, strairyT, & - strairx, strairy, & tmass, icetmask) integer (kind=int_kind), intent(in) :: & @@ -350,16 +348,12 @@ subroutine dyn_prep1 (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & aice , & ! concentration of ice vice , & ! volume per unit area of ice (m) - vsno , & ! volume per unit area of snow (m) - strairxT, & ! stress on ice by air, x-direction - strairyT ! stress on ice by air, y-direction + vsno ! volume per unit area of snow (m) logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & tmask ! land/boundary mask, thickness (T-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction tmass ! total mass of ice and snow (kg/m^2) integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & @@ -403,14 +397,6 @@ subroutine dyn_prep1 (nx_block, ny_block, & tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & .and. (tmass(i,j) > m_min) - !----------------------------------------------------------------- - ! prep to convert to U grid - !----------------------------------------------------------------- - ! these quantities include the factor of aice needed for - ! correct treatment of free drift - strairx(i,j) = strairxT(i,j) - strairy(i,j) = strairyT(i,j) - !----------------------------------------------------------------- ! augmented mask (land + open ocean) !----------------------------------------------------------------- @@ -610,7 +596,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & do j = jlo, jhi do i = ilo, ihi iceumask_old(i,j) = iceumask(i,j) ! save -! if (grid_system == 'B') then ! include ice mask. +! if (grid_ice == 'B') then ! include ice mask. ! ice extent mask (U-cells) iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & .and. (umass(i,j) > m_min) @@ -954,8 +940,7 @@ subroutine dyn_finish (nx_block, ny_block, & aiu, fm, & strintx, strinty, & strairx, strairy, & - strocnx, strocny, & - strocnxT, strocnyT) + strocnx, strocny) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -981,10 +966,6 @@ subroutine dyn_finish (nx_block, ny_block, & strocnx , & ! ice-ocean stress, x-direction strocny ! ice-ocean stress, y-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out), optional :: & - strocnxT, & ! ice-ocean stress, x-direction - strocnyT ! ice-ocean stress, y-direction - ! local variables integer (kind=int_kind) :: & @@ -1001,17 +982,6 @@ subroutine dyn_finish (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (present(strocnxT) .and. present(strocnyT)) then - - do j = 1, ny_block - do i = 1, nx_block - strocnxT(i,j) = c0 - strocnyT(i,j) = c0 - enddo - enddo - - endif - ! ocean-ice stress for coupling do ij =1, icellu i = indxui(ij) @@ -1037,14 +1007,6 @@ subroutine dyn_finish (nx_block, ny_block, & ! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) ! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) - if (present(strocnxT) .and. present(strocnyT)) then - - ! Prepare to convert to T grid - ! divide by aice for coupling - strocnxT(i,j) = strocnx(i,j) / aiu(i,j) - strocnyT(i,j) = strocny(i,j) / aiu(i,j) - - endif enddo end subroutine dyn_finish @@ -1325,7 +1287,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & endif enddo - select case (trim(grid_system)) + select case (trim(grid_ice)) case('B') do ij = 1, icellu i = indxui(ij) @@ -1353,7 +1315,7 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & enddo else - call abort_ice(subname // ' insufficient number of arguments for grid_system:' // grid_system) + call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) endif end select diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 66499037a..9d4d220fe 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -200,9 +200,10 @@ subroutine implicit_solver (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxt, dyt, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y !, grid_system commented out until implementation of c grid + tarear, grid_type, grid_average_X2Y, & + grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength!, uvelE, vvelN ommented out until implementation of c grid + aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -218,6 +219,8 @@ subroutine implicit_solver (dt) i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + uocnU , & ! i ocean current (m/s) + vocnU , & ! j ocean current (m/s) tmass , & ! total mass of ice and snow (kg/m^2) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) @@ -252,6 +255,10 @@ subroutine implicit_solver (dt) real (kind=dbl_kind), allocatable :: & sol(:) ! solution vector + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(implicit_solver)' call ice_timer_start(timer_dynamics) ! dynamics @@ -304,8 +311,6 @@ subroutine implicit_solver (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - strairxT(:,:,iblk), strairyT(:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & tmass (:,:,iblk), icetmask(:,:,iblk)) enddo ! iblk @@ -320,8 +325,10 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',tmass,umass) - call grid_average_X2Y('T2UF',aice_init, aiu) + call grid_average_X2Y('F',tmass,'T',umass,'U') + call grid_average_X2Y('F',aice_init,'T', aiu,'U') + call grid_average_X2Y('S',uocn,grid_ocn_dynu,uocnU,'U') + call grid_average_X2Y('S',vocn,grid_ocn_dynv,vocnU,'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -333,15 +340,15 @@ subroutine implicit_solver (dt) file=__FILE__, line=__LINE__) if (.not. calc_strair) then - strairx(:,:,:) = strax(:,:,:) - strairy(:,:,:) = stray(:,:,:) + call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') else - call ice_HaloUpdate (strairx, halo_info, & + call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) - call ice_HaloUpdate (strairy, halo_info, & + call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('T2UF',strairx) - call grid_average_X2Y('T2UF',strairy) + call grid_average_X2Y('F',strairxT,'T',strairx,'U') + call grid_average_X2Y('F',strairyT,'T',strairy,'U') endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -367,7 +374,7 @@ subroutine implicit_solver (dt) aiu (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & ss_tltx (:,:,iblk), ss_tlty (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & @@ -490,6 +497,7 @@ subroutine implicit_solver (dt) indxti , indxtj, & indxui , indxuj, & aiu , ntot , & + uocnU , vocnU , & waterx , watery, & bxfix , byfix , & umassdti, sol , & @@ -642,26 +650,40 @@ subroutine implicit_solver (dt) icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & + uocnU (:,:,iblk), vocnU (:,:,iblk), & aiu (:,:,iblk), fm (:,:,iblk), & strintx (:,:,iblk), strinty (:,:,iblk), & strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strocnxT(:,:,iblk), strocnyT(:,:,iblk)) + strocnx (:,:,iblk), strocny (:,:,iblk)) enddo !$OMP END PARALLEL DO - call ice_HaloUpdate (strocnxT, halo_info, & + ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! TODO: This should be done elsewhere as part of generalization? + ! conservation requires aiu be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) + do iblk = 1, nblocks + do ij = 1, icellu(iblk) + i = indxui(ij,iblk) + j = indxuj(ij,iblk) + work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) + work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + enddo + enddo + call ice_HaloUpdate (work1, halo_info, & field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (strocnyT, halo_info, & + call ice_HaloUpdate (work2, halo_info, & field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('U2TF',strocnxT) ! shift - call grid_average_X2Y('U2TF',strocnyT) + call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift + call grid_average_X2Y('F',work2,'U',strocnyT,'T') + ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready -! if (grid_system == 'CD') then +! if (grid_ice == 'CD') then ! call grid_average_X2Y('E2US',uvelE,uvel) ! call grid_average_X2Y('N2US',vvelN,vvel) ! endif @@ -686,6 +708,7 @@ subroutine anderson_solver (icellt , icellu, & indxti , indxtj, & indxui , indxuj, & aiu , ntot , & + uocn , vocn , & waterx , watery, & bxfix , byfix , & umassdti, sol , & @@ -700,7 +723,7 @@ subroutine anderson_solver (icellt , icellu, & use ice_constants, only: c1 use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks - use ice_flux, only: uocn, vocn, fm, Tbu + use ice_flux, only: fm, Tbu use ice_grid, only: dxt, dyt, dxhy, dyhx, cxp, cyp, cxm, cym, & uarear, tinyarea use ice_state, only: uvel, vvel, strength @@ -721,6 +744,8 @@ subroutine anderson_solver (icellt , icellu, & real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & aiu , & ! ice fraction on u-grid + uocn , & ! i ocean current (m/s) + vocn , & ! j ocean current (m/s) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 5d34b60fc..01eb6b989 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -261,7 +261,7 @@ subroutine transport_remap (dt) use ice_blocks, only: nx_block, ny_block, block, get_block, nghost use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, bound_state, uvelE, vvelN - use ice_grid, only: tarea, grid_system + use ice_grid, only: tarea, grid_ice use ice_calendar, only: istep1 use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_advect, timer_bound @@ -538,14 +538,14 @@ subroutine transport_remap (dt) !------------------------------------------------------------------- ! Main remapping routine: Step ice area and tracers forward in time. !------------------------------------------------------------------- - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call horizontal_remap (dt, ntrace, & uvel (:,:,:), vvel (:,:,:), & aim (:,:,:,:), trm (:,:,:,:,:), & l_fixed_area, & tracer_type, depend, & has_dependents, integral_order, & - l_dp_midpt, grid_system, & + l_dp_midpt, grid_ice, & uvelE(:,:,:),vvelN(:,:,:)) else call horizontal_remap (dt, ntrace, & @@ -554,7 +554,7 @@ subroutine transport_remap (dt) l_fixed_area, & tracer_type, depend, & has_dependents, integral_order, & - l_dp_midpt, grid_system) + l_dp_midpt, grid_ice) endif !------------------------------------------------------------------- @@ -720,7 +720,7 @@ subroutine transport_upwind (dt) use ice_state, only: aice0, aicen, vicen, vsnon, trcrn, & uvel, vvel, trcr_depend, bound_state, trcr_base, & n_trcr_strata, nt_strata, uvelE, vvelN - use ice_grid, only: HTE, HTN, tarea, grid_system + use ice_grid, only: HTE, HTN, tarea, grid_ice use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_bound, timer_advect @@ -771,7 +771,7 @@ subroutine transport_upwind (dt) !------------------------------------------------------------------- ! Average corner velocities to edges. !------------------------------------------------------------------- - if (grid_system == 'CD') then + if (grid_ice == 'CD') then uee(:,:,:)=uvelE(:,:,:) vnn(:,:,:)=vvelN(:,:,:) else diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index aae19378a..662fa7e60 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -319,7 +319,7 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, & integral_order, & - l_dp_midpt, grid_system, & + l_dp_midpt, grid_ice, & uvelE, vvelN) use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & @@ -353,7 +353,7 @@ subroutine horizontal_remap (dt, ntrace, & real (kind=dbl_kind), intent(inout), dimension (nx_block,ny_block,ntrace,ncat,max_blocks) :: & tm ! mean tracer values in each grid cell - character (len=char_len_long), intent(in) :: grid_system + character (len=char_len_long), intent(in) :: grid_ice !------------------------------------------------------------------- ! If l_fixed_area is true, the area of each departure region is @@ -670,7 +670,7 @@ subroutine horizontal_remap (dt, ntrace, & enddo if (l_fixed_area) then - if (grid_system == 'CD') then ! velocities are already on the center + if (grid_ice == 'CD') then ! velocities are already on the center do j = jlo, jhi do i = ilo-1, ihi edgearea_e(i,j) = uvelE(i,j,iblk) * HTE(i,j,iblk) * dt diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 00d9aac97..50f383568 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -33,41 +33,31 @@ module ice_flux !----------------------------------------------------------------- ! Dynamics component + ! All variables are assumed to be on the atm or ocn thermodynamic + ! grid except as noted !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & ! in from atmos (if .not.calc_strair) - strax , & ! wind stress components (N/m^2) - stray , & ! - straxE , & ! wind stress components (N/m^2) - strayE , & ! - straxN , & ! wind stress components (N/m^2) - strayN , & ! + strax , & ! wind stress components (N/m^2), on grid_atm_dynu + stray , & ! on grid_atm_dynv ! in from ocean - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - uocnE , & ! ocean current, x-direction (m/s) - vocnE , & ! ocean current, y-direction (m/s) - uocnN , & ! ocean current, x-direction (m/s) - vocnN , & ! ocean current, y-direction (m/s) - ss_tltx , & ! sea surface slope, x-direction (m/m) - ss_tlty , & ! sea surface slope, y-direction - ss_tltxE, & ! sea surface slope, x-direction (m/m) - ss_tltyE, & ! sea surface slope, y-direction - ss_tltxN, & ! sea surface slope, x-direction (m/m) - ss_tltyN, & ! sea surface slope, y-direction + uocn , & ! ocean current, x-direction (m/s), on grid_ocn_dynu + vocn , & ! ocean current, y-direction (m/s), on grid_ocn_dynv + ss_tltx , & ! sea surface slope, x-direction (m/m), on grid_ocn_dynu + ss_tlty , & ! sea surface slope, y-direction, on grid_ocn_dynv hwater , & ! water depth for seabed stress calc (landfast ice) ! out to atmosphere - strairxT, & ! stress on ice by air, x-direction - strairyT, & ! stress on ice by air, y-direction + strairxT, & ! stress on ice by air, x-direction at T points, computed in icepack + strairyT, & ! stress on ice by air, y-direction at T points, computed in icepack ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction - strocnyT ! ice-ocean stress, y-direction + strocnxT, & ! ice-ocean stress at T points, x-direction at T points, mapped from strocnx, per ice fraction + strocnyT ! ice-ocean stress at T points, y-direction at T points, mapped from strocny, per ice fraction ! diagnostic @@ -77,30 +67,30 @@ module ice_flux sigP , & ! internal ice pressure (N/m) taubx , & ! seabed stress (x) (N/m^2) tauby , & ! seabed stress (y) (N/m^2) - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction - strocnx , & ! ice-ocean stress, x-direction - strocny , & ! ice-ocean stress, y-direction + strairx , & ! stress on ice by air, x-direction at U points, mapped from strairxT + strairy , & ! stress on ice by air, y-direction at U points, mapped from strairyT + strocnx , & ! ice-ocean stress, x-direction at U points, computed in dyn_finish + strocny , & ! ice-ocean stress, y-direction at U points, computed in dyn_finish strtltx , & ! stress due to sea surface slope, x-direction strtlty , & ! stress due to sea surface slope, y-direction strintx , & ! divergence of internal ice stress, x (N/m^2) strinty , & ! divergence of internal ice stress, y (N/m^2) taubxN , & ! seabed stress (x) at N points (N/m^2) taubyN , & ! seabed stress (y) at N points (N/m^2) - strairxN, & ! stress on ice by air, x-direction at N points - strairyN, & ! stress on ice by air, y-direction at N points - strocnxN, & ! ice-ocean stress, x-direction at N points - strocnyN, & ! ice-ocean stress, y-direction at N points + strairxN, & ! stress on ice by air, x-direction at N points, mapped from strairxT + strairyN, & ! stress on ice by air, y-direction at N points, mapped from strairyT + strocnxN, & ! ice-ocean stress, x-direction at N points, computed in dyn_finish + strocnyN, & ! ice-ocean stress, y-direction at N points, computed in dyn_finish strtltxN, & ! stress due to sea surface slope, x-direction at N points strtltyN, & ! stress due to sea surface slope, y-direction at N points strintxN, & ! divergence of internal ice stress, x at N points (N/m^2) strintyN, & ! divergence of internal ice stress, y at N points (N/m^2) taubxE , & ! seabed stress (x) at E points (N/m^2) taubyE , & ! seabed stress (y) at E points (N/m^2) - strairxE, & ! stress on ice by air, x-direction at E points - strairyE, & ! stress on ice by air, y-direction at E points - strocnxE, & ! ice-ocean stress, x-direction at E points - strocnyE, & ! ice-ocean stress, y-direction at E points + strairxE, & ! stress on ice by air, x-direction at E points, mapped from strairxT + strairyE, & ! stress on ice by air, y-direction at E points, mapped from strairyT + strocnxE, & ! ice-ocean stress, x-direction at E points, computed in dyn_finish + strocnyE, & ! ice-ocean stress, y-direction at E points, computed in dyn_finish strtltxE, & ! stress due to sea surface slope, x-direction at E points strtltyE, & ! stress due to sea surface slope, y-direction at E points strintxE, & ! divergence of internal ice stress, x at E points (N/m^2) @@ -135,7 +125,7 @@ module ice_flux stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 - ! ice stress tensor at U and T locations (grid_system = 'CD') (kg/s^2) + ! ice stress tensor at U and T locations (grid_ice = 'CD') (kg/s^2) stresspT, stressmT, stress12T, & ! sigma11+sigma22, sigma11-sigma22, sigma12 stresspU, stressmU, stress12U ! " @@ -170,9 +160,9 @@ module ice_flux real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & zlvl , & ! atm level height (momentum) (m) zlvs , & ! atm level height (scalar quantities) (m) - uatm , & ! wind velocity components (m/s) - vatm , & - wind , & ! wind speed (m/s) + uatm , & ! wind velocity components (m/s), on grid_atm_dynu + vatm , & ! on grid_atm_dynv + wind , & ! wind speed (m/s) , on grid_atm_dynu potT , & ! air potential temperature (K) Tair , & ! air temperature (K) Qa , & ! specific humidity (kg/kg) @@ -374,6 +364,8 @@ module ice_flux !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + uatmT , & ! uatm mapped to T grid (m/s) + vatmT , & ! vatm mapped to T grid (m/s) rside , & ! fraction of ice that melts laterally fside , & ! lateral heat flux (W/m^2) fsw , & ! incoming shortwave radiation (W/m^2) @@ -395,7 +387,7 @@ module ice_flux ! subroutine alloc_flux - use ice_grid, only : grid_system + use ice_grid, only : grid_ice integer (int_kind) :: ierr @@ -547,6 +539,8 @@ subroutine alloc_flux fswthru_ai (nx_block,ny_block,max_blocks), & ! shortwave penetrating to ocean (W/m^2) fresh_da (nx_block,ny_block,max_blocks), & ! fresh water flux to ocean due to data assim (kg/m^2/s) fsalt_da (nx_block,ny_block,max_blocks), & ! salt flux to ocean due to data assimilation(kg/m^2/s) + uatmT (nx_block,ny_block,max_blocks), & ! uatm mapped to T grid + vatmT (nx_block,ny_block,max_blocks), & ! vatm mapped to T grid rside (nx_block,ny_block,max_blocks), & ! fraction of ice that melts laterally fside (nx_block,ny_block,max_blocks), & ! lateral melt rate (W/m^2) fsw (nx_block,ny_block,max_blocks), & ! incoming shortwave radiation (W/m^2) @@ -586,14 +580,8 @@ subroutine alloc_flux stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux): Out of memory') - if (grid_system == "CD") & + if (grid_ice == "CD") & allocate( & - straxN (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) - strayN (nx_block,ny_block,max_blocks), & ! - uocnN (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) - vocnN (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) - ss_tltxN (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) - ss_tltyN (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction taubxN (nx_block,ny_block,max_blocks), & ! seabed stress (x) at N points (N/m^2) taubyN (nx_block,ny_block,max_blocks), & ! seabed stress (y) at N points (N/m^2) strairxN (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at N points @@ -607,12 +595,6 @@ subroutine alloc_flux icenmask (nx_block,ny_block,max_blocks), & ! ice extent mask (N-cell) fmN (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in N-cell (kg/s) TbN (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) - straxE (nx_block,ny_block,max_blocks), & ! wind stress components (N/m^2) - strayE (nx_block,ny_block,max_blocks), & ! - uocnE (nx_block,ny_block,max_blocks), & ! ocean current, x-direction (m/s) - vocnE (nx_block,ny_block,max_blocks), & ! ocean current, y-direction (m/s) - ss_tltxE (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) - ss_tltyE (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) taubyE (nx_block,ny_block,max_blocks), & ! seabed stress (y) at E points (N/m^2) strairxE (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction at E points @@ -650,7 +632,7 @@ subroutine init_coupler_flux use ice_flux_bgc, only: flux_bio_atm, flux_bio, faero_atm, fiso_atm, & fnit, famm, fsil, fdmsp, fdms, fhum, fdust, falgalN, & fdoc, fdon, fdic, ffed, ffep - use ice_grid, only: bathymetry, grid_system + use ice_grid, only: bathymetry, grid_ice integer (kind=int_kind) :: n @@ -755,17 +737,6 @@ subroutine init_coupler_flux frzmlt_init(:,:,:) = c0 ! freezing/melting potential (W/m^2) sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) - if (grid_system == 'CD') then - ss_tltxN(:,:,:) = c0 ! sea surface tilt (m/m) - ss_tltyN(:,:,:) = c0 - ss_tltxE(:,:,:) = c0 ! sea surface tilt (m/m) - ss_tltyE(:,:,:) = c0 - uocnN (:,:,:) = c0 ! surface ocean currents (m/s) - vocnN (:,:,:) = c0 - uocnE (:,:,:) = c0 ! surface ocean currents (m/s) - vocnE (:,:,:) = c0 - endif - do iblk = 1, size(Tf,3) do j = 1, size(Tf,2) do i = 1, size(Tf,1) diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index f3b749f77..013915683 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -516,7 +516,7 @@ subroutine init_forcing_ocn(dt) elseif (trim(ocn_data_type) == 'hycom') then call ocn_data_hycom_init - elseif (trim(atm_data_type) == 'box2001') then + elseif (trim(ocn_data_type) == 'box2001') then call box2001_data_ocn ! uniform forcing options @@ -751,7 +751,7 @@ subroutine get_forcing_ocn (dt) elseif (trim(ocn_data_type) == 'hycom') then ! call ocn_data_hycom(dt) !MHRI: NOT IMPLEMENTED YET - elseif (trim(atm_data_type) == 'box2001') then + elseif (trim(ocn_data_type) == 'box2001') then call box2001_data_ocn ! uniform forcing options elseif (trim(ocn_data_type) == 'uniform_northeast') then @@ -4110,8 +4110,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call grid_average_X2Y('T2UF',work1,ocn_frc_m(:,:,:,n ,m)) - call grid_average_X2Y('T2UF',work2,ocn_frc_m(:,:,:,n+1,m)) + call grid_average_X2Y('F',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') + call grid_average_X2Y('F',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') enddo ! month loop enddo ! field loop @@ -4368,6 +4368,9 @@ subroutine ocn_data_hadgem(dt) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & sstdat ! data value toward which SST is restored + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1 ! temporary array + real (kind=dbl_kind) :: workx, worky logical (kind=log_kind) :: readm @@ -4512,8 +4515,11 @@ subroutine ocn_data_hadgem(dt) ! Interpolate to U grid !----------------------------------------------------------------- - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) + ! tcraig, this is now computed in dynamics for consistency + !work1 = uocn + !call grid_average_X2Y('F',work1,'T',uocn,'U') + !work1 = vocn + !call grid_average_X2Y('F',work1,'T',vocn,'U') endif ! ocn_data_type = hadgem_sst_uvocn @@ -5316,7 +5322,7 @@ subroutine box2001_data_atm call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) - call grid_average_X2Y('T2UF',aice, aiu) + call grid_average_X2Y('F',aice,'T',aiu,'U') period = c4*secday @@ -5443,7 +5449,7 @@ subroutine uniform_data_atm(dir,spd) use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray character(len=*), intent(in) :: dir - real(kind=dbl_kind), intent(in), optional :: spd ! speed for test + real(kind=dbl_kind), intent(in), optional :: spd ! velocity ! local parameters @@ -5509,7 +5515,7 @@ subroutine uniform_data_ocn(dir,spd) character(len=*), intent(in) :: dir - real(kind=dbl_kind), intent(in), optional :: spd ! speed for test + real(kind=dbl_kind), intent(in), optional :: spd ! velocity ! local parameters diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 95223d7ae..3292f6274 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -99,7 +99,10 @@ subroutine input_data use ice_grid, only: grid_file, gridcpl_file, kmt_file, & bathymetry_file, use_bathymetry, & bathymetry_format, kmt_type, & - grid_type, grid_format, grid_system, & + grid_type, grid_format, & + grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & + grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & + grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & dxrect, dyrect, & pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & @@ -185,7 +188,8 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & - close_boundaries, orca_halogrid, grid_system, kmt_type + close_boundaries, orca_halogrid, grid_ice, kmt_type, & + grid_atm, grid_ocn namelist /tracer_nml/ & tr_iage, restart_age, & @@ -325,8 +329,10 @@ subroutine input_data ice_ic = 'default' ! latitude and sst-dependent grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) grid_type = 'rectangular' ! define rectangular grid internally - grid_system = 'B' ! underlying grid system grid_file = 'unknown_grid_file' + grid_ice = 'B' ! underlying grid system + grid_atm = 'A' ! underlying atm forcing/coupling grid + grid_ocn = 'A' ! underlying atm forcing/coupling grid gridcpl_file = 'unknown_gridcpl_file' orca_halogrid = .false. ! orca haloed grid bathymetry_file = 'unknown_bathymetry_file' @@ -700,7 +706,9 @@ subroutine input_data call broadcast_scalar(dyrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) - call broadcast_scalar(grid_system, master_task) + call broadcast_scalar(grid_ice, master_task) + call broadcast_scalar(grid_ocn, master_task) + call broadcast_scalar(grid_atm, master_task) call broadcast_scalar(grid_file, master_task) call broadcast_scalar(gridcpl_file, master_task) call broadcast_scalar(orca_halogrid, master_task) @@ -1336,6 +1344,68 @@ subroutine input_data wave_spec = .false. if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. + ! compute grid locations for thermo, u and v fields + + grid_ice_thrm = 'T' + if (grid_ice == 'A') then + grid_ice_dynu = 'T' + grid_ice_dynv = 'T' + elseif (grid_ice == 'B') then + grid_ice_dynu = 'U' + grid_ice_dynv = 'U' + elseif (grid_ice == 'C') then + grid_ice_dynu = 'E' + grid_ice_dynv = 'N' + elseif (grid_ice == 'CD') then + grid_ice_dynu = 'NE' + grid_ice_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_ice: '//trim(grid_ice) + endif + abort_list = trim(abort_list)//":64" + endif + + grid_atm_thrm = 'T' + if (grid_atm == 'A') then + grid_atm_dynu = 'T' + grid_atm_dynv = 'T' + elseif (grid_atm == 'B') then + grid_atm_dynu = 'U' + grid_atm_dynv = 'U' + elseif (grid_atm == 'C') then + grid_atm_dynu = 'E' + grid_atm_dynv = 'N' + elseif (grid_atm == 'CD') then + grid_atm_dynu = 'NE' + grid_atm_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_atm: '//trim(grid_atm) + endif + abort_list = trim(abort_list)//":65" + endif + + grid_ocn_thrm = 'T' + if (grid_ocn == 'A') then + grid_ocn_dynu = 'T' + grid_ocn_dynv = 'T' + elseif (grid_ocn == 'B') then + grid_ocn_dynu = 'U' + grid_ocn_dynv = 'U' + elseif (grid_ocn == 'C') then + grid_ocn_dynu = 'E' + grid_ocn_dynv = 'N' + elseif (grid_ocn == 'CD') then + grid_ocn_dynu = 'NE' + grid_ocn_dynv = 'NE' + else + if (my_task == master_task) then + write(nu_diag,*) subname//' ERROR: unknown grid_ocn: '//trim(grid_ocn) + endif + abort_list = trim(abort_list)//":66" + endif + !----------------------------------------------------------------- ! spew !----------------------------------------------------------------- @@ -1367,7 +1437,18 @@ subroutine input_data if (trim(grid_type) == 'displaced_pole') tmpstr2 = ' : user-defined grid with rotated north pole' if (trim(grid_type) == 'tripole') tmpstr2 = ' : user-defined grid with northern hemisphere zipper' write(nu_diag,1030) ' grid_type = ',trim(grid_type),trim(tmpstr2) - write(nu_diag,1030) ' grid_system = ',trim(grid_system) + write(nu_diag,1030) ' grid_ice = ',trim(grid_ice) + write(nu_diag,1030) ' grid_ice_thrm = ',trim(grid_ice_thrm) + write(nu_diag,1030) ' grid_ice_dynu = ',trim(grid_ice_dynu) + write(nu_diag,1030) ' grid_ice_dynv = ',trim(grid_ice_dynv) + write(nu_diag,1030) ' grid_atm = ',trim(grid_atm) + write(nu_diag,1030) ' grid_atm_thrm = ',trim(grid_atm_thrm) + write(nu_diag,1030) ' grid_atm_dynu = ',trim(grid_atm_dynu) + write(nu_diag,1030) ' grid_atm_dynv = ',trim(grid_atm_dynv) + write(nu_diag,1030) ' grid_ocn = ',trim(grid_ocn) + write(nu_diag,1030) ' grid_ocn_thrm = ',trim(grid_ocn_thrm) + write(nu_diag,1030) ' grid_ocn_dynu = ',trim(grid_ocn_dynu) + write(nu_diag,1030) ' grid_ocn_dynv = ',trim(grid_ocn_dynv) write(nu_diag,1030) ' kmt_type = ',trim(kmt_type) if (trim(grid_type) /= 'rectangular') then if (use_bathymetry) then @@ -2036,9 +2117,9 @@ subroutine input_data abort_list = trim(abort_list)//":20" endif - if (grid_system /= 'B' .and. & - grid_system /= 'CD' ) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_system=',trim(grid_system) + if (grid_ice /= 'B' .and. & + grid_ice /= 'CD' ) then + if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_ice=',trim(grid_ice) abort_list = trim(abort_list)//":26" endif @@ -2137,7 +2218,7 @@ subroutine init_state use ice_domain, only: nblocks, blocks_ice, halo_info use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero, nfsd use ice_flux, only: sst, Tf, Tair, salinz, Tmltz - use ice_grid, only: tmask, ULON, TLAT, grid_system, grid_average_X2Y + use ice_grid, only: tmask, ULON, TLAT, grid_ice, grid_average_X2Y use ice_boundary, only: ice_HaloUpdate use ice_forcing, only: ice_data_type use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar @@ -2376,14 +2457,14 @@ subroutine init_state vicen, vsnon, & ntrcr, trcrn) - if (trim(grid_system) == 'CD') then + if (trim(grid_ice) == 'CD') then ! move from B-grid to CD-grid for boxslotcyl test if (trim(ice_data_type) == 'boxslotcyl') then - call grid_average_X2Y('U2NS',uvel,uvelN) - call grid_average_X2Y('U2NS',vvel,vvelN) - call grid_average_X2Y('U2ES',uvel,uvelE) - call grid_average_X2Y('U2ES',vvel,vvelE) + call grid_average_X2Y('S',uvel,'U',uvelN,'N') + call grid_average_X2Y('S',vvel,'U',vvelN,'N') + call grid_average_X2Y('S',uvel,'U',uvelE,'E') + call grid_average_X2Y('S',vvel,'U',vvelE,'E') endif ! Halo update on North, East faces diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 976e95361..46a9c9389 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -37,7 +37,7 @@ module ice_step_mod public :: step_therm1, step_therm2, step_dyn_horiz, step_dyn_ridge, & step_snow, prep_radiation, step_radiation, ocean_mixed_layer, & - update_state, biogeochemistry, save_init, step_dyn_wave + update_state, biogeochemistry, step_dyn_wave, step_prep !======================================================================= @@ -51,6 +51,8 @@ subroutine save_init use ice_state, only: aice, aicen, aice_init, aicen_init, & vicen, vicen_init, vsnon, vsnon_init + character(len=*), parameter :: subname = '(save_init)' + !----------------------------------------------------------------- ! Save the ice area passed to the coupler (so that history fields ! can be made consistent with coupler fields). @@ -64,6 +66,27 @@ subroutine save_init end subroutine save_init +!======================================================================= + + subroutine step_prep +! prep for step, called outside nblock loop + + use ice_flux, only: uatm, vatm, uatmT, vatmT + use ice_grid, only: grid_atm_dynu, grid_atm_dynv, grid_average_X2Y + + character(len=*), parameter :: subname = '(step_prep)' + + ! Save initial state + + call save_init + + ! Compute uatmT, vatmT + + call grid_average_X2Y('S',uatm,grid_atm_dynu,uatmT,'T') + call grid_average_X2Y('S',vatm,grid_atm_dynv,vatmT,'T') + + end subroutine step_prep + !======================================================================= ! ! Scales radiation fields computed on the previous time step. @@ -170,7 +193,7 @@ subroutine step_therm1 (dt, iblk) use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, fbot, Tbot, Tsnice, & - meltsn, melttn, meltbn, congeln, snoicen, uatm, vatm, fside, & + meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & frain, Tair, strairxT, strairyT, fsurf, fcondtop, fsens, & @@ -374,8 +397,8 @@ subroutine step_therm1 (dt, iblk) aeroice = aeroice (:,:,:), & isosno = isosno (:,:), & isoice = isoice (:,:), & - uatm = uatm (i,j, iblk), & - vatm = vatm (i,j, iblk), & + uatm = uatmT (i,j, iblk), & + vatm = vatmT (i,j, iblk), & wind = wind (i,j, iblk), & zlvl = zlvl (i,j, iblk), & zlvs = zlvs (i,j, iblk), & @@ -1381,7 +1404,7 @@ subroutine ocean_mixed_layer (dt, iblk) use ice_arrays_column, only: Cdn_atm, Cdn_atm_ratio use ice_blocks, only: nx_block, ny_block - use ice_flux, only: sst, Tf, Qa, uatm, vatm, wind, potT, rhoa, zlvl, & + use ice_flux, only: sst, Tf, Qa, uatmT, vatmT, wind, potT, rhoa, zlvl, & frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & qdp, hmix, strairx_ocn, strairy_ocn, Tref_ocn, Qref_ocn @@ -1464,8 +1487,8 @@ subroutine ocean_mixed_layer (dt, iblk) call icepack_atm_boundary(sfctype = 'ocn', & Tsf = sst (i,j,iblk), & potT = potT (i,j,iblk), & - uatm = uatm (i,j,iblk), & - vatm = vatm (i,j,iblk), & + uatm = uatmT (i,j,iblk), & + vatm = vatmT (i,j,iblk), & wind = wind (i,j,iblk), & zlvl = zlvl (i,j,iblk), & Qa = Qa (i,j,iblk), & diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 9dfc7b27b..ecca13bd9 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -57,7 +57,18 @@ module ice_grid bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) grid_spacing , & ! default of 30.e3m or set by user in namelist - grid_system , & ! Underlying grid structure (i.e. B, C, CD, etc) + grid_ice , & ! Underlying model grid structure (A, B, C, CD) + grid_ice_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) + grid_ice_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) + grid_ice_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) + grid_atm , & ! atmos forcing grid structure (A, B, C, CD) + grid_atm_thrm, & ! atmos forcing grid for thermo fields (T, U, N, E) + grid_atm_dynu, & ! atmos forcing grid for dyn U fields (T, U, N, E) + grid_atm_dynv, & ! atmos forcing grid for dyn V fields (T, U, N, E) + grid_ocn , & ! ocean forcing grid structure (A B, C, CD) + grid_ocn_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) + grid_ocn_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) + grid_ocn_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) grid_type ! current options are rectangular (default), ! displaced_pole, tripole, regional @@ -180,6 +191,12 @@ module ice_grid logical (kind=log_kind), private :: & l_readCenter ! If anglet exist in grid file read it otherwise calculate it + interface grid_average_X2Y + module procedure grid_average_X2Y_base , & + grid_average_X2Y_userwghts, & + grid_average_X2Y_NEversion + end interface + !======================================================================= contains @@ -267,7 +284,7 @@ subroutine alloc_grid stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') - if (grid_system == 'CD') then + if (grid_ice == 'CD') then allocate( & ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & @@ -534,7 +551,7 @@ subroutine init_grid2 enddo enddo - if (grid_system == 'CD') then + if (grid_ice == 'CD') then do j = jlo, jhi do i = ilo, ihi ratiodxN (i,j,iblk) = - dxn(i+1,j ,iblk) / dxn(i,j,iblk) @@ -2349,94 +2366,339 @@ end subroutine Tlatlon !======================================================================= ! Shifts quantities from one grid to another +! Constructs the shift based on the grid ! NOTE: Input array includes ghost cells that must be updated before ! calling this routine. ! ! author: T. Craig - subroutine grid_average_X2Y(X2Y,work1,work2) + subroutine grid_average_X2Y_base(type,work1,grid1,work2,grid2) character(len=*) , intent(in) :: & - X2Y + type, grid1, grid2 - real (kind=dbl_kind), intent(inout) :: & + real (kind=dbl_kind), intent(in) :: & work1(:,:,:) - real (kind=dbl_kind), intent(out), optional :: & + real (kind=dbl_kind), intent(out) :: & work2(:,:,:) ! local variables - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work2tmp + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_base)' + + if (trim(grid1) == trim(grid2)) then + work2 = work1 + else + X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) + call grid_average_X2Y_1(X2Y,work1,work2) + endif + + end subroutine grid_average_X2Y_base + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_userwghts(type,work1,grid1,wght1,mask1,work2,grid2) + + character(len=*) , intent(in) :: & + type, grid1, grid2 + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_userwghts)' + + if (trim(grid1) == trim(grid2)) then + work2 = work1 + else + X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) + call grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) + endif + + end subroutine grid_average_X2Y_userwghts + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,grid2) + + character(len=*) , intent(in) :: & + type, grid1a, grid1b, grid2 + + real (kind=dbl_kind), intent(in) :: & + work1a(:,:,:), work1b(:,:,:) - character(len=*), parameter :: subname = '(grid_average_X2Y)' + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=16) :: X2Y + + character(len=*), parameter :: subname = '(grid_average_X2Y_NEversion)' + + X2Y = trim(grid1a)//trim(grid1b)//'2'//trim(grid2)//trim(type) + + select case (trim(X2Y)) + + ! state masked + case('NE2US') + call grid_average_X2YS_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2US') + call grid_average_X2YS_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) + case('NE2TS') + call grid_average_X2YS_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2TS') + call grid_average_X2YS_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_NEversion + +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_1(X2Y,work1,work2) + + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=*), parameter :: subname = '(grid_average_X2Y_1)' select case (trim(X2Y)) ! flux unmasked case('T2UF') - call grid_average_X2YF('NE',work1,tarea,work2tmp,uarea) + call grid_average_X2YF('NE',work1,tarea,work2,uarea) case('T2EF') - call grid_average_X2YF('E' ,work1,tarea,work2tmp,earea) + call grid_average_X2YF('E' ,work1,tarea,work2,earea) case('T2NF') - call grid_average_X2YF('N' ,work1,tarea,work2tmp,narea) + call grid_average_X2YF('N' ,work1,tarea,work2,narea) case('U2TF') - call grid_average_X2YF('SW',work1,uarea,work2tmp,tarea) + call grid_average_X2YF('SW',work1,uarea,work2,tarea) case('U2EF') - call grid_average_X2YF('S' ,work1,uarea,work2tmp,earea) + call grid_average_X2YF('S' ,work1,uarea,work2,earea) case('U2NF') - call grid_average_X2YF('W' ,work1,uarea,work2tmp,narea) + call grid_average_X2YF('W' ,work1,uarea,work2,narea) case('E2TF') - call grid_average_X2YF('W' ,work1,earea,work2tmp,tarea) + call grid_average_X2YF('W' ,work1,earea,work2,tarea) case('E2UF') - call grid_average_X2YF('N' ,work1,earea,work2tmp,uarea) + call grid_average_X2YF('N' ,work1,earea,work2,uarea) case('E2NF') - call grid_average_X2YF('NW',work1,earea,work2tmp,narea) + call grid_average_X2YF('NW',work1,earea,work2,narea) case('N2TF') - call grid_average_X2YF('S' ,work1,narea,work2tmp,tarea) + call grid_average_X2YF('S' ,work1,narea,work2,tarea) case('N2UF') - call grid_average_X2YF('E' ,work1,narea,work2tmp,uarea) + call grid_average_X2YF('E' ,work1,narea,work2,uarea) case('N2EF') - call grid_average_X2YF('SE',work1,narea,work2tmp,earea) + call grid_average_X2YF('SE',work1,narea,work2,earea) ! state masked case('T2US') - call grid_average_X2YS('NE',work1,tarea,hm ,work2tmp) + call grid_average_X2YS('NE',work1,tarea,hm ,work2) case('T2ES') - call grid_average_X2YS('E' ,work1,tarea,hm ,work2tmp) + call grid_average_X2YS('E' ,work1,tarea,hm ,work2) case('T2NS') - call grid_average_X2YS('N' ,work1,tarea,hm ,work2tmp) + call grid_average_X2YS('N' ,work1,tarea,hm ,work2) case('U2TS') - call grid_average_X2YS('SW',work1,uarea,uvm,work2tmp) + call grid_average_X2YS('SW',work1,uarea,uvm,work2) case('U2ES') - call grid_average_X2YS('S' ,work1,uarea,uvm,work2tmp) + call grid_average_X2YS('S' ,work1,uarea,uvm,work2) case('U2NS') - call grid_average_X2YS('W' ,work1,uarea,uvm,work2tmp) + call grid_average_X2YS('W' ,work1,uarea,uvm,work2) case('E2TS') - call grid_average_X2YS('W' ,work1,earea,epm,work2tmp) + call grid_average_X2YS('W' ,work1,earea,epm,work2) case('E2US') - call grid_average_X2YS('N' ,work1,earea,epm,work2tmp) + call grid_average_X2YS('N' ,work1,earea,epm,work2) case('E2NS') - call grid_average_X2YS('NW',work1,earea,epm,work2tmp) + call grid_average_X2YS('NW',work1,earea,epm,work2) case('N2TS') - call grid_average_X2YS('S' ,work1,narea,npm,work2tmp) + call grid_average_X2YS('S' ,work1,narea,npm,work2) case('N2US') - call grid_average_X2YS('E' ,work1,narea,npm,work2tmp) + call grid_average_X2YS('E' ,work1,narea,npm,work2) case('N2ES') - call grid_average_X2YS('SE',work1,narea,npm,work2tmp) + call grid_average_X2YS('SE',work1,narea,npm,work2) + + ! state unmasked + case('T2UA') + call grid_average_X2YA('NE',work1,tarea,work2) + case('T2EA') + call grid_average_X2YA('E' ,work1,tarea,work2) + case('T2NA') + call grid_average_X2YA('N' ,work1,tarea,work2) + case('U2TA') + call grid_average_X2YA('SW',work1,uarea,work2) + case('U2EA') + call grid_average_X2YA('S' ,work1,uarea,work2) + case('U2NA') + call grid_average_X2YA('W' ,work1,uarea,work2) + case('E2TA') + call grid_average_X2YA('W' ,work1,earea,work2) + case('E2UA') + call grid_average_X2YA('N' ,work1,earea,work2) + case('E2NA') + call grid_average_X2YA('NW',work1,earea,work2) + case('N2TA') + call grid_average_X2YA('S' ,work1,narea,work2) + case('N2UA') + call grid_average_X2YA('E' ,work1,narea,work2) + case('N2EA') + call grid_average_X2YA('SE',work1,narea,work2) case default call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) end select - if (present(work2)) then - work2 = work2tmp - else - work1 = work2tmp - endif + end subroutine grid_average_X2Y_1 - end subroutine grid_average_X2Y +!======================================================================= + +! Shifts quantities from one grid to another +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) + + character(len=*) , intent(in) :: & + X2Y + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:), & + mask1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + character(len=*), parameter :: subname = '(grid_average_X2Y_1f)' + + select case (trim(X2Y)) + +! don't support these for now, requires extra destination wght +! ! flux unmasked +! case('T2UF') +! call grid_average_X2YF('NE',work1,tarea,work2,uarea) +! case('T2EF') +! call grid_average_X2YF('E' ,work1,tarea,work2,earea) +! case('T2NF') +! call grid_average_X2YF('N' ,work1,tarea,work2,narea) +! case('U2TF') +! call grid_average_X2YF('SW',work1,uarea,work2,tarea) +! case('U2EF') +! call grid_average_X2YF('S' ,work1,uarea,work2,earea) +! case('U2NF') +! call grid_average_X2YF('W' ,work1,uarea,work2,narea) +! case('E2TF') +! call grid_average_X2YF('W' ,work1,earea,work2,tarea) +! case('E2UF') +! call grid_average_X2YF('N' ,work1,earea,work2,uarea) +! case('E2NF') +! call grid_average_X2YF('NW',work1,earea,work2,narea) +! case('N2TF') +! call grid_average_X2YF('S' ,work1,narea,work2,tarea) +! case('N2UF') +! call grid_average_X2YF('E' ,work1,narea,work2,uarea) +! case('N2EF') +! call grid_average_X2YF('SE',work1,narea,work2,earea) + + ! state masked + case('T2US') + call grid_average_X2YS('NE',work1,wght1,mask1,work2) + case('T2ES') + call grid_average_X2YS('E' ,work1,wght1,mask1,work2) + case('T2NS') + call grid_average_X2YS('N' ,work1,wght1,mask1,work2) + case('U2TS') + call grid_average_X2YS('SW',work1,wght1,mask1,work2) + case('U2ES') + call grid_average_X2YS('S' ,work1,wght1,mask1,work2) + case('U2NS') + call grid_average_X2YS('W' ,work1,wght1,mask1,work2) + case('E2TS') + call grid_average_X2YS('W' ,work1,wght1,mask1,work2) + case('E2US') + call grid_average_X2YS('N' ,work1,wght1,mask1,work2) + case('E2NS') + call grid_average_X2YS('NW',work1,wght1,mask1,work2) + case('N2TS') + call grid_average_X2YS('S' ,work1,wght1,mask1,work2) + case('N2US') + call grid_average_X2YS('E' ,work1,wght1,mask1,work2) + case('N2ES') + call grid_average_X2YS('SE',work1,wght1,mask1,work2) + + ! state unmasked + case('T2UA') + call grid_average_X2YA('NE',work1,wght1,work2) + case('T2EA') + call grid_average_X2YA('E' ,work1,wght1,work2) + case('T2NA') + call grid_average_X2YA('N' ,work1,wght1,work2) + case('U2TA') + call grid_average_X2YA('SW',work1,wght1,work2) + case('U2EA') + call grid_average_X2YA('S' ,work1,wght1,work2) + case('U2NA') + call grid_average_X2YA('W' ,work1,wght1,work2) + case('E2TA') + call grid_average_X2YA('W' ,work1,wght1,work2) + case('E2UA') + call grid_average_X2YA('N' ,work1,wght1,work2) + case('E2NA') + call grid_average_X2YA('NW',work1,wght1,work2) + case('N2TA') + call grid_average_X2YA('S' ,work1,wght1,work2) + case('N2UA') + call grid_average_X2YA('E' ,work1,wght1,work2) + case('N2EA') + call grid_average_X2YA('SE',work1,wght1,work2) + + case default + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) + end select + + end subroutine grid_average_X2Y_1f !======================================================================= ! Shifts quantities from one grid to another @@ -2446,7 +2708,7 @@ end subroutine grid_average_X2Y ! ! author: T. Craig - subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) + subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) use ice_constants, only: c0 @@ -2455,7 +2717,7 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) real (kind=dbl_kind), intent(in) :: & work1(:,:,:), & - area1(:,:,:), & + wght1(:,:,:), & mask1(:,:,:) real (kind=dbl_kind), intent(out) :: & @@ -2489,15 +2751,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i+1,j, iblk)*area1(i+1,j, iblk) & - + mask1(i, j+1,iblk)*area1(i, j+1,iblk) & - + mask1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) + wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + mask1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i+1,j, iblk)*work1(i+1,j, iblk)*area1(i+1,j, iblk) & - + mask1(i, j+1,iblk)*work1(i, j+1,iblk)*area1(i, j+1,iblk) & - + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) & + work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & / wtmp enddo enddo @@ -2514,15 +2776,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j-1,iblk)*area1(i, j-1,iblk) & - + mask1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) + wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j, iblk)*work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j-1,iblk)*work1(i, j-1,iblk)*area1(i, j-1,iblk) & - + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) & + work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wtmp enddo enddo @@ -2539,15 +2801,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & - + mask1(i, j+1,iblk)*area1(i ,j+1,iblk)) + wtmp = (mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i-1,j, iblk)*work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + mask1(i, j, iblk)*work1(i, j, iblk)*area1(i, j, iblk) & - + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & - + mask1(i, j+1,iblk)*work1(i, j+1,iblk)*area1(i ,j+1,iblk)) & + work2(i,j,iblk) = (mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & / wtmp enddo enddo @@ -2564,15 +2826,15 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i ,j-1,iblk)*area1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & - + mask1(i ,j ,iblk)*area1(i ,j, iblk) & - + mask1(i+1,j ,iblk)*area1(i+1,j, iblk)) + wtmp = (mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*area1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & - + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*area1(i ,j, iblk) & - + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*area1(i+1,j, iblk)) & + work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wtmp enddo enddo @@ -2589,11 +2851,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i, j,iblk)*area1(i, j,iblk) & - + mask1(i+1,j,iblk)*area1(i+1,j,iblk)) + wtmp = (mask1(i ,j,iblk)*wght1(i ,j,iblk) & + + mask1(i+1,j,iblk)*wght1(i+1,j,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i, j,iblk)*work1(i, j,iblk)*area1(i, j,iblk) & - + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*area1(i+1,j,iblk)) & + work2(i,j,iblk) = (mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk) & + + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & / wtmp enddo enddo @@ -2610,11 +2872,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i-1,j,iblk)*area1(i-1,j,iblk) & - + mask1(i, j,iblk)*area1(i, j,iblk)) + wtmp = (mask1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + mask1(i ,j,iblk)*wght1(i ,j,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*area1(i-1,j,iblk) & - + mask1(i, j,iblk)*work1(i, j,iblk)*area1(i, j,iblk)) & + work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk)) & / wtmp enddo enddo @@ -2631,11 +2893,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i,j, iblk)*area1(i,j, iblk) & - + mask1(i,j+1,iblk)*area1(i,j+1,iblk)) + wtmp = (mask1(i,j ,iblk)*wght1(i,j ,iblk) & + + mask1(i,j+1,iblk)*wght1(i,j+1,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i,j, iblk)*work1(i,j, iblk)*area1(i,j, iblk) & - + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*area1(i,j+1,iblk)) & + work2(i,j,iblk) = (mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk) & + + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & / wtmp enddo enddo @@ -2652,11 +2914,11 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) jhi = this_block%jhi do j = jlo, jhi do i = ilo, ihi - wtmp = (mask1(i,j-1,iblk)*area1(i,j-1,iblk) & - + mask1(i,j, iblk)*area1(i,j, iblk)) + wtmp = (mask1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + mask1(i,j ,iblk)*wght1(i,j ,iblk)) if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*area1(i,j-1,iblk) & - + mask1(i,j, iblk)*work1(i,j, iblk)*area1(i,j, iblk)) & + work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk)) & / wtmp enddo enddo @@ -2669,6 +2931,236 @@ subroutine grid_average_X2YS(dir,work1,area1,mask1,work2) end subroutine grid_average_X2YS +!======================================================================= +! Shifts quantities from one grid to another +! State unmasked version, simple weighted averager +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YA(dir,work1,wght1,work2) + + use ice_constants, only: c0 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1(:,:,:), & + wght1(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + wtmp + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YA)' + + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j ,iblk) & + + wght1(i+1,j ,iblk) & + + wght1(i ,j+1,iblk) & + + wght1(i+1,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j ,iblk) & + + wght1(i-1,j ,iblk) & + + wght1(i ,j-1,iblk) & + + wght1(i-1,j-1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NW') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i-1,j ,iblk) & + + wght1(i ,j ,iblk) & + + wght1(i-1,j+1,iblk) & + + wght1(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('SE') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j-1,iblk) & + + wght1(i+1,j-1,iblk) & + + wght1(i ,j ,iblk) & + + wght1(i+1,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('E') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i ,j,iblk) & + + wght1(i+1,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i ,j,iblk)*wght1(i ,j,iblk) & + + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('W') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i-1,j,iblk) & + + wght1(i ,j,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('N') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i,j ,iblk) & + + wght1(i,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i,j ,iblk)*wght1(i,j ,iblk) & + + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('S') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1(i,j-1,iblk) & + + wght1(i,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YA + !======================================================================= ! Shifts quantities from one grid to another ! Flux masked, original implementation based on earlier t2u and u2t versions @@ -2677,7 +3169,7 @@ end subroutine grid_average_X2YS ! ! author: T. Craig - subroutine grid_average_X2YF(dir,work1,area1,work2,area2) + subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) use ice_constants, only: c0, p25, p5 @@ -2686,8 +3178,8 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) real (kind=dbl_kind), intent(in) :: & work1(:,:,:), & - area1(:,:,:), & - area2(:,:,:) + wght1(:,:,:), & + wght2(:,:,:) real (kind=dbl_kind), intent(out) :: & work2(:,:,:) @@ -2718,11 +3210,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i, j, iblk)*area1(i, j, iblk) & - + work1(i+1,j, iblk)*area1(i+1,j, iblk) & - + work1(i, j+1,iblk)*area1(i, j+1,iblk) & - + work1(i+1,j+1,iblk)*area1(i+1,j+1,iblk)) & - / area2(i, j, iblk) + (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & + + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2739,11 +3231,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i, j, iblk)*area1(i, j, iblk) & - + work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + work1(i, j-1,iblk)*area1(i, j-1,iblk) & - + work1(i-1,j-1,iblk)*area1(i-1,j-1,iblk)) & - / area2(i, j, iblk) + (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2760,11 +3252,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i-1,j, iblk)*area1(i-1,j, iblk) & - + work1(i, j, iblk)*area1(i, j, iblk) & - + work1(i-1,j+1,iblk)*area1(i-1,j+1,iblk) & - + work1(i, j+1,iblk)*area1(i ,j+1,iblk)) & - / area2(i, j, iblk) + (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & + + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2781,11 +3273,11 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p25 * & - (work1(i ,j-1,iblk)*area1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*area1(i+1,j-1,iblk) & - + work1(i ,j ,iblk)*area1(i ,j, iblk) & - + work1(i+1,j ,iblk)*area1(i+1,j, iblk)) & - / area2(i, j, iblk) + (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & + / wght2(i ,j ,iblk) enddo enddo enddo @@ -2802,9 +3294,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i, j,iblk)*area1(i, j,iblk) & - + work1(i+1,j,iblk)*area1(i+1,j,iblk)) & - / area2(i, j,iblk) + (work1(i ,j,iblk)*wght1(i ,j,iblk) & + + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2821,9 +3313,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i-1,j,iblk)*area1(i-1,j,iblk) & - + work1(i, j,iblk)*area1(i, j,iblk)) & - / area2(i, j,iblk) + (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & + + work1(i ,j,iblk)*wght1(i ,j,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2840,9 +3332,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i,j, iblk)*area1(i,j, iblk) & - + work1(i,j+1,iblk)*area1(i,j+1,iblk)) & - / area2(i, j,iblk) + (work1(i,j ,iblk)*wght1(i,j ,iblk) & + + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2859,9 +3351,9 @@ subroutine grid_average_X2YF(dir,work1,area1,work2,area2) do j = jlo, jhi do i = ilo, ihi work2(i,j,iblk) = p5 * & - (work1(i,j-1,iblk)*area1(i,j-1,iblk) & - + work1(i,j, iblk)*area1(i,j, iblk)) & - / area2(i, j,iblk) + (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & + + work1(i,j ,iblk)*wght1(i,j ,iblk)) & + / wght2(i ,j,iblk) enddo enddo enddo @@ -2903,6 +3395,102 @@ real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) resul end function grid_neighbor_min +!======================================================================= +! Shifts quantities from one grid to another +! State masked version, simple weighted averager +! NOTE: Input array includes ghost cells that must be updated before +! calling this routine. +! +! author: T. Craig + + subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) + + use ice_constants, only: c0 + + character(len=*) , intent(in) :: & + dir + + real (kind=dbl_kind), intent(in) :: & + work1a(:,:,:), work1b(:,:,:), & + wght1a(:,:,:), wght1b(:,:,:), & + mask1a(:,:,:), mask1b(:,:,:) + + real (kind=dbl_kind), intent(out) :: & + work2(:,:,:) + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + real (kind=dbl_kind) :: & + wtmp + + type (block) :: & + this_block ! block information for current block + + character(len=*), parameter :: subname = '(grid_average_X2YS_2)' + + work2(:,:,:) = c0 + + select case (trim(dir)) + + case('NE2US') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + mask1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1a(i+1,j ,iblk)*work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + mask1b(i ,j+1,iblk)*work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NE2TS') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (mask1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (mask1a(i ,j-1,iblk)*work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + mask1b(i-1,j ,iblk)*work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case default + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) + end select + + end subroutine grid_average_X2YS_2 !======================================================================= ! Compute the maximum of adjacent values of a field at specific indices, diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 11836c073..e4f5a89e9 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -61,7 +61,7 @@ subroutine dumpfile(filename_spec) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_system + use ice_grid, only: grid_ice use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel character(len=char_len_long), intent(in), optional :: filename_spec @@ -167,7 +167,7 @@ subroutine dumpfile(filename_spec) call write_restart_field(nu_dump,0,stress12_2,'ruf8','stress12_2',1,diag) call write_restart_field(nu_dump,0,stress12_4,'ruf8','stress12_4',1,diag) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call write_restart_field(nu_dump,0,stresspT ,'ruf8','stresspT' ,1,diag) call write_restart_field(nu_dump,0,stressmT ,'ruf8','stressmT' ,1,diag) call write_restart_field(nu_dump,0,stress12T,'ruf8','stress12T',1,diag) @@ -222,7 +222,7 @@ subroutine restartfile (ice_ic) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type, grid_system + use ice_grid, only: tmask, grid_type, grid_ice use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & trcr_base, nt_strata, n_trcr_strata @@ -384,7 +384,7 @@ subroutine restartfile (ice_ic) ! tcraig, comment these out now to allow restarts from B grid file ! this will affect exact restart when we get to that point #if (1 == 0) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call read_restart_field(nu_restart,0,stresspT,'ruf8', & 'stresspT' ,1,diag,field_loc_center,field_type_scalar) ! stresspT call read_restart_field(nu_restart,0,stressmT,'ruf8', & diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index 0bba6e36e..e62a1f67f 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -137,7 +137,7 @@ subroutine init_restart_write(filename_spec) n_dic, n_don, n_fed, n_fep, nfsd use ice_arrays_column, only: oceanmixed_ice use ice_dyn_shared, only: kdyn - use ice_grid, only: grid_system + use ice_grid, only: grid_ice character(len=char_len_long), intent(in), optional :: filename_spec @@ -274,7 +274,7 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'stress12_3',dims) call define_rest_field(ncid,'stress12_4',dims) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call define_rest_field(ncid,'stresspT' ,dims) call define_rest_field(ncid,'stressmT' ,dims) call define_rest_field(ncid,'stress12T',dims) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 2e5338fc0..12e4365e9 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -145,7 +145,7 @@ subroutine init_restart_write(filename_spec) n_dic, n_don, n_fed, n_fep, nfsd use ice_dyn_shared, only: kdyn use ice_arrays_column, only: oceanmixed_ice - use ice_grid, only: grid_system + use ice_grid, only: grid_ice logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers @@ -277,7 +277,7 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'stress12_3',dims) call define_rest_field(File,'stress12_4',dims) - if (grid_system == 'CD') then + if (grid_ice == 'CD') then call define_rest_field(File,'stresspT' ,dims) call define_rest_field(File,'stressmT' ,dims) call define_rest_field(File,'stress12T',dims) diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index cd81de879..61f261bb2 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -153,7 +153,7 @@ subroutine ice_step use ice_state, only: trcrn use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -210,7 +210,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index ecd95e3c3..eb2bdcbf1 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -153,7 +153,7 @@ subroutine ice_step use ice_state, only: trcrn use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -210,7 +210,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index e9ab0d7e4..365322dde 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -157,7 +157,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -213,7 +213,7 @@ subroutine ice_step call t_stopf ('cice_run_presc') endif - call save_init + call step_prep call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 7aa60dbdf..f88cc2b2d 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -9,7 +9,7 @@ module ice_import_export use ice_constants , only: field_type_vector, c100 use ice_constants , only: p001, p5 use ice_blocks , only: block, get_block, nx_block, ny_block - use ice_flux , only: strairxt, strairyt, strocnxt, strocnyt + use ice_flux , only: strairxT, strairyT, strocnxT, strocnyT use ice_flux , only: alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only: flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only: fresh, fsalt, zlvl, uatm, vatm, potT, Tair, Qa @@ -65,6 +65,7 @@ subroutine ice_import( x2i ) type(block) :: this_block ! block information for current block integer,parameter :: nflds=17,nfldv=6,nfldb=27 real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP character(len=char_len) :: tfrz_option @@ -472,10 +473,15 @@ subroutine ice_import( x2i ) call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) - call grid_average_X2Y('T2UF',ss_tltx) - call grid_average_X2Y('T2UF',ss_tlty) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 81fa367c1..d4b100518 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -128,7 +128,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave + biogeochemistry, step_prep, step_dyn_wave use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -186,7 +186,7 @@ subroutine ice_step endif #endif - call save_init + call step_prep call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 10d42137f..0f7f1ebd4 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -10,7 +10,7 @@ module ice_import_export use ice_domain , only : nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat use ice_exit , only : abort_ice - use ice_flux , only : strairxt, strairyt, strocnxt, strocnyt + use ice_flux , only : strairxT, strairyT, strocnxT, strocnyT use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only : flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only : fswthru_vdr, fswthru_vdf, fswthru_idr, fswthru_idf @@ -405,6 +405,7 @@ subroutine ice_import( importState, rc ) integer :: ilo, ihi, jlo, jhi !beginning and end of physical domain type(block) :: this_block ! block information for current block real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP real (kind=dbl_kind) :: Tffresh @@ -801,10 +802,15 @@ subroutine ice_import( importState, rc ) call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) - call grid_average_X2Y('T2UF',ss_tltx) - call grid_average_X2Y('T2UF',ss_tlty) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') call t_stopf ('cice_imp_t2u') end if diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 2d3e22973..7da73db1d 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -165,7 +165,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave, step_snow + biogeochemistry, step_prep, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -224,7 +224,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 4b2d6d65f..2faa623ec 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -940,10 +940,15 @@ module cice_cap ! call ice_HaloUpdate(vocn, halo_info, field_loc_center, field_type_scalar) ! call ice_HaloUpdate(ss_tltx, halo_info, field_loc_center, field_type_scalar) ! call ice_HaloUpdate(ss_tlty, halo_info, field_loc_center, field_type_scalar) - call grid_average_X2Y('T2UF',uocn) - call grid_average_X2Y('T2UF',vocn) - call grid_average_X2Y('T2UF',ss_tltx) - call grid_average_X2Y('T2UF',ss_tlty) + ! tcraig, moved to dynamics for consistency + !work = uocn + !call grid_average_X2Y('F',work,'T',uocn,'U') + !work = vocn + !call grid_average_X2Y('F',work,'T',vocn,'U') + !work = ss_tltx + !call grid_average_X2Y('F',work,'T',ss_tltx,'U') + !work = ss_tlty + !call grid_average_X2Y('F',work,'T',ss_tlty,'U') end subroutine subroutine CICE_Export(st,rc) diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 0fde18e04..27d61db84 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -157,7 +157,7 @@ subroutine ice_step use ice_restoring, only: restore_ice, ice_HaloRestore use ice_step_mod, only: prep_radiation, step_therm1, step_therm2, & update_state, step_dyn_horiz, step_dyn_ridge, step_radiation, & - biogeochemistry, save_init, step_dyn_wave, step_snow + biogeochemistry, step_prep, step_dyn_wave, step_snow use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_diags, timer_column, timer_thermo, timer_bound, & timer_hist, timer_readwrite @@ -216,7 +216,7 @@ subroutine ice_step call ice_timer_start(timer_column) ! column physics call ice_timer_start(timer_thermo) ! thermodynamics - call save_init + call step_prep !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 index 73ff10cad..5a4b3d54e 100644 --- a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -29,7 +29,7 @@ program gridavgchk use ice_exit, only: abort_ice, end_run use ice_global_reductions, only: global_minval, global_maxval use ice_grid, only: grid_average_X2Y,tarea,uarea,narea,earea,tmask,umask,nmask,emask, & - hm,uvm + hm,uvm,epm,npm implicit none @@ -38,14 +38,22 @@ program gridavgchk integer(int_kind) :: blockID, numBlocks type (block) :: this_block - real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) - real(dbl_kind) ,allocatable :: array2x(:,:,:), array2y(:,:,:) - real(dbl_kind) ,allocatable :: array3x(:,:,:), array3y(:,:,:) - real(dbl_kind) :: amin, amax, errtol, errx, erry + real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) ! input + real(dbl_kind) ,allocatable :: arraysx(:,:,:), arraysy(:,:,:) ! extra input for NE2T, NE2U + real(dbl_kind) ,allocatable :: array2x(:,:,:), array2y(:,:,:) ! output + real(dbl_kind) ,allocatable :: array3x(:,:,:), array3y(:,:,:) ! error + real(dbl_kind) ,allocatable :: wght1(:,:,:), mask1(:,:,:), array2z(:,:,:) ! extra for explicit + real(dbl_kind) :: amin, amax, fmax, errtol, errx, erry real(dbl_kind) :: deltax0, deltay0, deltax, deltay - integer(int_kind) :: npes, ierr, ntask, ntest, maxtest, navg - integer(int_kind), parameter :: maxgroup = 3 + integer(int_kind), parameter :: maxtests = 3 + integer(int_kind), parameter :: maxgroups = 4 + integer(int_kind) :: numtests_cnt, numgroups_cnt + character(len=16) :: numtests_name(maxtests) + integer(int_kind) :: nbase(maxgroups) + character(len=16) :: numgroups_name(maxgroups) + real(dbl_kind) :: errmax(maxgroups,maxtests) + integer(int_kind) :: npes, ierr, ntask, testcnt, tottest, mtests, cnt, ng integer(int_kind) :: errorflag0,gflag integer(int_kind), allocatable :: errorflag(:) character(len=32), allocatable :: stringflag(:) @@ -67,55 +75,122 @@ program gridavgchk call CICE_Initialize npes = get_num_procs() - navg = 12 - if (.not. landblockelim) navg=24 ! no land block elimination, can test F mappings - allocate(avgname(navg)) - allocate(errtolconst(navg)) - allocate(errtolijind(navg)) - allocate(errtolarea(navg)) - maxtest = maxgroup * navg - allocate(errorflag(maxtest)) - allocate(stringflag(maxtest)) - allocate(dmask(nx_block,ny_block,max_blocks,navg)) - - errtolconst(1:12) = 0.0001_dbl_kind - errtolijind(1:12) = 0.51_dbl_kind - errtolarea (1:12) = 0.75_dbl_kind + numtests_name(1) = 'constant' + numtests_name(2) = 'ijindex' + numtests_name(3) = 'area' + numgroups_name(1) = 'X2YA' + numgroups_name(2) = 'X2YS' + numgroups_name(3) = 'X2YF' + numgroups_name(4) = 'NE2YS' + nbase(1) = 16 + nbase(2) = 16 + nbase(3) = 0 + nbase(4) = 4 + errmax = c0 + + if (.not. landblockelim) nbase(3) = nbase(2) ! no land block elimination, can test F mappings + mtests = nbase(1) + nbase(2) + nbase(3) + nbase(4) + + allocate(avgname(mtests)) + allocate(errtolconst(mtests)) + allocate(errtolijind(mtests)) + allocate(errtolarea(mtests)) + errtolconst = c0 + errtolijind = c0 + errtolarea = c0 + tottest = maxtests * mtests + allocate(errorflag(tottest)) + allocate(stringflag(tottest)) + allocate(dmask(nx_block,ny_block,max_blocks,mtests)) + + n = 0 + errtolconst(n+1:n+nbase(1)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(1)) = 0.10_dbl_kind + errtolarea (n+1:n+nbase(1)) = 0.04_dbl_kind if (nx_global > 200 .and. ny_global > 200) then - errtolarea (1:12) = 0.20_dbl_kind + errtolijind(n+1:n+nbase(1)) = 0.03_dbl_kind + errtolarea (n+1:n+nbase(1)) = 0.003_dbl_kind endif - avgname(1) = 'T2US'; dmask(:,:,:,1) = umask(:,:,:) - avgname(2) = 'T2NS'; dmask(:,:,:,2) = nmask(:,:,:) - avgname(3) = 'T2ES'; dmask(:,:,:,3) = emask(:,:,:) - avgname(4) = 'U2TS'; dmask(:,:,:,4) = tmask(:,:,:) - avgname(5) = 'U2NS'; dmask(:,:,:,5) = nmask(:,:,:) - avgname(6) = 'U2ES'; dmask(:,:,:,6) = emask(:,:,:) - avgname(7) = 'N2TS'; dmask(:,:,:,7) = tmask(:,:,:) - avgname(8) = 'N2US'; dmask(:,:,:,8) = umask(:,:,:) - avgname(9) = 'N2ES'; dmask(:,:,:,9) = emask(:,:,:) - avgname(10) = 'E2TS'; dmask(:,:,:,10) = tmask(:,:,:) - avgname(11) = 'E2US'; dmask(:,:,:,11) = umask(:,:,:) - avgname(12) = 'E2NS'; dmask(:,:,:,12) = nmask(:,:,:) - if (navg > 12) then - errtolconst(13:24) = 0.008_dbl_kind - errtolijind(13:24) = 0.65_dbl_kind - errtolarea (13:24) = 0.55_dbl_kind + n=n+1; avgname(n) = 'T2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2EA' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TA' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2UA' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NA' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2EA' ; dmask(:,:,:,n) = emask(:,:,:) + + errtolconst(n+1:n+nbase(2)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(2)) = 0.51_dbl_kind + errtolarea (n+1:n+nbase(2)) = 0.19_dbl_kind if (nx_global > 200 .and. ny_global > 200) then - errtolijind(13:24) = 0.25_dbl_kind - errtolarea (13:24) = 0.15_dbl_kind + errtolarea (n+1:n+nbase(2)) = 0.06_dbl_kind endif - avgname(13) = 'T2UF'; dmask(:,:,:,13) = umask(:,:,:) - avgname(14) = 'T2NF'; dmask(:,:,:,14) = nmask(:,:,:) - avgname(15) = 'T2EF'; dmask(:,:,:,15) = emask(:,:,:) - avgname(16) = 'U2TF'; dmask(:,:,:,16) = tmask(:,:,:) - avgname(17) = 'U2NF'; dmask(:,:,:,17) = nmask(:,:,:) - avgname(18) = 'U2EF'; dmask(:,:,:,18) = emask(:,:,:) - avgname(19) = 'N2TF'; dmask(:,:,:,19) = tmask(:,:,:) - avgname(20) = 'N2UF'; dmask(:,:,:,20) = umask(:,:,:) - avgname(21) = 'N2EF'; dmask(:,:,:,21) = emask(:,:,:) - avgname(22) = 'E2TF'; dmask(:,:,:,22) = tmask(:,:,:) - avgname(23) = 'E2UF'; dmask(:,:,:,23) = umask(:,:,:) - avgname(24) = 'E2NF'; dmask(:,:,:,24) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2ES' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TS' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2US' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NS' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2ES' ; dmask(:,:,:,n) = emask(:,:,:) + + if (nbase(3) > 0) then + errtolconst(n+1:n+nbase(3)) = 0.0065_dbl_kind + errtolijind(n+1:n+nbase(3)) = 0.65_dbl_kind + errtolarea (n+1:n+nbase(3)) = 0.04_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(3)) = 0.22_dbl_kind + errtolarea (n+1:n+nbase(3)) = 0.004_dbl_kind + endif + n=n+1; avgname(n) = 'T2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'T2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'T2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'T2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'U2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'U2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'U2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'U2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'N2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'N2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'N2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'N2EF' ; dmask(:,:,:,n) = emask(:,:,:) + n=n+1; avgname(n) = 'E2TF' ; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'E2UF' ; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'E2NF' ; dmask(:,:,:,n) = nmask(:,:,:) + n=n+1; avgname(n) = 'E2EF' ; dmask(:,:,:,n) = emask(:,:,:) + endif + + errtolconst(n+1:n+nbase(4)) = 0.00001_dbl_kind + errtolijind(n+1:n+nbase(4)) = 0.51_dbl_kind + errtolarea (n+1:n+nbase(4)) = 0.12_dbl_kind + if (nx_global > 200 .and. ny_global > 200) then + errtolijind(n+1:n+nbase(4)) = 0.26_dbl_kind + errtolarea (n+1:n+nbase(4)) = 0.03_dbl_kind + endif + n=n+1; avgname(n) = 'NE2TS'; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'EN2TS'; dmask(:,:,:,n) = tmask(:,:,:) + n=n+1; avgname(n) = 'NE2US'; dmask(:,:,:,n) = umask(:,:,:) + n=n+1; avgname(n) = 'EN2US'; dmask(:,:,:,n) = umask(:,:,:) + + if (n /= mtests) then + call abort_ice(subname//' n ne mtests') endif !----------------------------------------------------------------- @@ -135,7 +210,7 @@ program gridavgchk write(6,*) ' block_size_x = ',block_size_x write(6,*) ' block_size_y = ',block_size_y write(6,*) ' nblocks_tot = ',nblocks_tot - write(6,*) ' maxtest = ',maxtest + write(6,*) ' tottest = ',tottest write(6,*) ' ' endif @@ -151,37 +226,59 @@ program gridavgchk allocate(array1x(nx_block,ny_block,max_blocks)) allocate(array1y(nx_block,ny_block,max_blocks)) + allocate(arraysx(nx_block,ny_block,max_blocks)) + allocate(arraysy(nx_block,ny_block,max_blocks)) allocate(array2x(nx_block,ny_block,max_blocks)) allocate(array2y(nx_block,ny_block,max_blocks)) allocate(array3x(nx_block,ny_block,max_blocks)) allocate(array3y(nx_block,ny_block,max_blocks)) + allocate(wght1 (nx_block,ny_block,max_blocks)) + allocate(mask1 (nx_block,ny_block,max_blocks)) + allocate(array2z(nx_block,ny_block,max_blocks)) call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) - ntest = 0 + testcnt = 0 !---------------- ! Test constant field !---------------- + numtests_cnt = 1 if (my_task == master_task) then write(6,*) '' - write(6,*) 'TEST constant field' + write(6,*) 'TEST constant field, test ',numtests_cnt endif array1x = testconst + arraysx = testconst - do n = 1,navg - ntest = ntest + 1 + do n = 1,mtests + testcnt = testcnt + 1 - stringflag(ntest) = trim(avgname(n))//' const' + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + errtol = errtolconst(n) + stringflag(testcnt) = trim(avgname(n))//' const' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolconst(n) + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtol,numtests_cnt,numgroups_cnt endif array2x = c0 - call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + if (len_trim(avgname(n)) == 4) then + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + else ! len_trim(avgname(n)) == 5 + if (avgname(n)(1:2) == 'NE') then + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + else ! EN, swap needed + call grid_average_X2Y(avgname(n)(5:5),arraysx,avgname(n)(1:1),array1x,avgname(n)(2:2),array2x,avgname(n)(4:4)) + endif + endif array3x = c0 do iblock = 1,numBlocks @@ -193,17 +290,16 @@ program gridavgchk je = this_block%jhi do j = jb,je jglob = this_block%j_glob(j) - errtol = errtolconst(n) * testconst do i = ib,ie iglob = this_block%i_glob(i) - array3x(i,j,iblock) = array2x(i,j,iblock) - array1x(i,j,iblock) + array3x(i,j,iblock) = (array2x(i,j,iblock) - testconst)/testconst ! if array2 is c0, then there are no valid surrounding points and ignore it if (array2x(i,j,iblock) == c0) array3x(i,j,iblock) = c0 errx = abs(array3x(i,j,iblock)) ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. errx > errtol .and. array2x(i,j,iblock) /= c0) then - errorflag(ntest) = failflag - errorflag0 = failflag + errorflag(testcnt) = failflag + errorflag0 = failflag write(100+my_task,*) '' write(100+my_task,100) 'error const '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob write(100+my_task,101) 'value, error ',array2x(i,j,iblock),errx @@ -211,6 +307,8 @@ program gridavgchk enddo enddo enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' amin = global_minval(array1x, distrb_info) amax = global_maxval(array1x, distrb_info) if (my_task == master_task) write(6,102) 'input min/max = ',amin,amax @@ -220,17 +318,24 @@ program gridavgchk amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) enddo !---------------- ! Test global i, j fields + ! for NE2T, NE2U, inputs should result in exact calcs !---------------- + numtests_cnt = 2 if (my_task == master_task) then write(6,*) '' - write(6,*) 'TEST global i, j fields' + write(6,*) 'TEST global i, j fields, test ',numtests_cnt endif + array1x = -999. + arraysx = -999. + do iblock = 1,numBlocks call ice_distributionGetBlockID(distrb_info, iblock, blockID) this_block = get_block(blockID, blockID) @@ -246,10 +351,7 @@ program gridavgchk enddo enddo - call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) - call ice_HaloUpdate(array1y, halo_info, field_loc_center, field_type_scalar, fillval) - - ! Overwrite the i wraparound points to deal with i/j index average on wraparound + ! Fill in ghost cells with locally appropriate value do iblock = 1,numBlocks call ice_distributionGetBlockID(distrb_info, iblock, blockID) this_block = get_block(blockID, blockID) @@ -257,21 +359,38 @@ program gridavgchk ie = this_block%ihi jb = this_block%jlo je = this_block%jhi - do j = 1,ny_block + ! skip corners do i = ib,ie - if (this_block%i_glob(i) == 1 ) array1x(i-1,j,iblock) = 0 - if (this_block%i_glob(i) == nx_global) array1x(i+1,j,iblock) = nx_global+1 + array1x(i,jb-1,iblock) = array1x(i,jb,iblock) + array1y(i,jb-1,iblock) = array1y(i,jb,iblock) - 1.0_dbl_kind + array1x(i,je+1,iblock) = array1x(i,je,iblock) + array1y(i,je+1,iblock) = array1y(i,je,iblock) + 1.0_dbl_kind enddo + ! set corners + do j = 1,ny_block + array1x(ib-1,j,iblock) = array1x(ib,j,iblock) - 1.0_dbl_kind + array1y(ib-1,j,iblock) = array1y(ib,j,iblock) + array1x(ie+1,j,iblock) = array1x(ie,j,iblock) + 1.0_dbl_kind + array1y(ie+1,j,iblock) = array1y(ie,j,iblock) enddo enddo - do n = 1,navg - ntest = ntest + 1 + arraysx = array1x + 0.5_dbl_kind + arraysy = array1y - 0.5_dbl_kind - stringflag(ntest) = trim(avgname(n))//' ijind' + do n = 1,mtests + testcnt = testcnt + 1 + + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + stringflag(testcnt) = trim(avgname(n))//' ijind' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolijind(n) + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtolijind(n),numtests_cnt,numgroups_cnt endif deltax0 = 0.0_dbl_kind @@ -279,30 +398,44 @@ program gridavgchk if (avgname(n)(1:3) == 'T2U' .or. & avgname(n)(1:3) == 'T2E' .or. & avgname(n)(1:3) == 'N2U' .or. & - avgname(n)(1:3) == 'N2E') then + avgname(n)(1:3) == 'N2E' .or. & + avgname(n)(1:4) == 'NE2U'.or. & + avgname(n)(1:4) == 'EN2U') then deltax0 = 0.5_dbl_kind elseif (avgname(n)(1:3) == 'U2T' .or. & avgname(n)(1:3) == 'U2N' .or. & avgname(n)(1:3) == 'E2T' .or. & - avgname(n)(1:3) == 'E2N') then + avgname(n)(1:3) == 'E2N' ) then deltax0 = -0.5_dbl_kind endif if (avgname(n)(1:3) == 'T2U' .or. & avgname(n)(1:3) == 'T2N' .or. & avgname(n)(1:3) == 'E2U' .or. & - avgname(n)(1:3) == 'E2N') then + avgname(n)(1:3) == 'E2N' ) then deltay0 = 0.5_dbl_kind elseif (avgname(n)(1:3) == 'U2T' .or. & avgname(n)(1:3) == 'U2E' .or. & avgname(n)(1:3) == 'N2T' .or. & - avgname(n)(1:3) == 'N2E') then + avgname(n)(1:3) == 'N2E' .or. & + avgname(n)(1:4) == 'NE2T'.or. & + avgname(n)(1:4) == 'EN2T') then deltay0 = -0.5_dbl_kind endif array2x = c0 array2y = c0 - call grid_average_X2Y(trim(avgname(n)),array1x,array2x) - call grid_average_X2Y(trim(avgname(n)),array1y,array2y) + if (len_trim(avgname(n)) == 4) then + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + call grid_average_X2Y(avgname(n)(4:4),array1y,avgname(n)(1:1),array2y,avgname(n)(3:3)) + else ! len_trim(avgname(n)) == 5 + if (avgname(n)(1:2) == 'NE') then + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + call grid_average_X2Y(avgname(n)(5:5),array1y,avgname(n)(1:1),arraysy,avgname(n)(2:2),array2y,avgname(n)(4:4)) + else ! EN, swap needed array1 is N, arrays is E + call grid_average_X2Y(avgname(n)(5:5),arraysx,avgname(n)(1:1),array1x,avgname(n)(2:2),array2x,avgname(n)(4:4)) + call grid_average_X2Y(avgname(n)(5:5),arraysy,avgname(n)(1:1),array1y,avgname(n)(2:2),array2y,avgname(n)(4:4)) + endif + endif array3x = c0 errtol = errtolijind(n) @@ -329,8 +462,8 @@ program gridavgchk erry = abs(array3y(i,j,iblock)) ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. (errx > errtol .or. erry > errtol)) then - errorflag(ntest) = failflag - errorflag0 = failflag + errorflag(testcnt) = failflag + errorflag0 = failflag write(100+my_task,*) '' write(100+my_task,100) 'error ijind '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob write(100+my_task,101) 'array2x, err',array2x(i,j,iblock),errx @@ -356,6 +489,8 @@ program gridavgchk enddo enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' amin = global_minval(array1x, distrb_info) amax = global_maxval(array1x, distrb_info) if (my_task == master_task) write(6,102) 'i_glob min/max = ',amin,amax @@ -374,6 +509,10 @@ program gridavgchk amin = global_minval(array3y, distrb_info, dmask(:,:,:,n)) amax = global_maxval(array3y, distrb_info, dmask(:,:,:,n)) if (my_task == master_task) write(6,102) 'j error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) + amax = global_maxval(abs(array3y), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) enddo @@ -381,25 +520,113 @@ program gridavgchk ! Test area fields !---------------- + numtests_cnt = 3 if (my_task == master_task) then write(6,*) '' - write(6,*) 'TEST area fields' + write(6,*) 'TEST area fields, test ',numtests_cnt endif - do n = 1,navg - ntest = ntest + 1 + do n = 1,mtests + testcnt = testcnt + 1 - stringflag(ntest) = trim(avgname(n))//' area' + cnt = 0 + do ng = 1,maxgroups + if (n > cnt) numgroups_cnt = ng + cnt = cnt + nbase(ng) + enddo + + stringflag(testcnt) = trim(avgname(n))//' area' if (my_task == master_task) then write(6,*) '' - write(6,*) trim(stringflag(ntest)),' test ',ntest,errtolarea(n) + write(6,110) trim(stringflag(testcnt))//' test ',testcnt,errtolarea(n),numtests_cnt,numgroups_cnt + endif + + array1x = -999. + arraysx = -999. + mask1 = -999. + wght1 = -999. + if (avgname(n)(1:2) == 'T2') then + array1x = tarea + wght1 = tarea + mask1 = hm + elseif (avgname(n)(1:2) == 'U2') then + array1x = uarea + wght1 = uarea + mask1 = uvm + elseif (avgname(n)(1:2) == 'E2') then + array1x = earea + wght1 = earea + mask1 = epm + elseif (avgname(n)(1:2) == 'N2') then + array1x = narea + wght1 = narea + mask1 = npm + elseif (avgname(n)(1:3) == 'NE2') then + array1x = narea + arraysx = earea + elseif (avgname(n)(1:3) == 'EN2') then + array1x = earea + arraysx = narea + else + call abort_ice(subname//' avgname not supported 1x = '//trim(avgname(n))) + endif + + array2y = -999. + if (avgname(n)(2:3) == '2T' .or. & + avgname(n)(3:4) == '2T') then + array2y = tarea + elseif (avgname(n)(2:3) == '2U' .or. & + avgname(n)(3:4) == '2U') then + array2y = uarea + elseif (avgname(n)(2:3) == '2E') then + array2y = earea + elseif (avgname(n)(2:3) == '2N') then + array2y = narea + else + call abort_ice(subname//' avgname not supported 2y = '//trim(avgname(n))) endif - array1x = tarea ! input - array2y = uarea ! result - call ice_HaloUpdate(array1x, halo_info, field_loc_center, field_type_scalar, fillval) array2x = c0 - call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + if (len_trim(avgname(n)) == 4) then +! call grid_average_X2Y(trim(avgname(n)),array1x,array2x) + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),array2x,avgname(n)(3:3)) + ! ------ + ! Extra Explicit Calc Test + ! ------ + if (avgname(n)(2:2) == '2' .and. (avgname(n)(4:4) == 'S' .or. avgname(n)(4:4) == 'A')) then + stringflag(testcnt) = trim(stringflag(testcnt))//' + explicit' + if (avgname(n)(4:4) == 'S') then + ! test direct mapping compared to S, array1x*wght1*mask1 where wght1=area and mask1=mask + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),wght1,mask1,array2z,avgname(n)(3:3)) + elseif (avgname(n)(4:4) == 'A') then + ! test direct mapping compared to A, array1x*wght1 where wght1=area and mask1=1.0 + mask1 = c1 + call grid_average_X2Y(avgname(n)(4:4),array1x,avgname(n)(1:1),wght1,mask1,array2z,avgname(n)(3:3)) + endif + fmax = global_maxval(abs(array1x), distrb_info) + amax = global_maxval(abs(array2z-array2x), distrb_info) +! tcraig, errtol=c0 doesn't work here, diff seems smaller than roundoff? - interesting +! errtol = c0 + errtol = 1.0e-20_dbl_kind + if (amax < fmax * errtol) then + if (my_task == master_task) write(6,103) 'PASS explicit avg vs implicit avg ',errtol + else + errorflag(testcnt) = failflag + errorflag0 = failflag + if (my_task == master_task) write(6,103) 'FAIL explicit avg vs implicit avg ',amax,fmax*errtol + amin = global_minval(array2x, distrb_info) + amax = global_maxval(array2x, distrb_info) + if (my_task == master_task) write(6,103) 'output min/max = ',amin,amax + amin = global_minval(array2z, distrb_info) + amax = global_maxval(array2z, distrb_info) + if (my_task == master_task) write(6,103) 'expout min/max = ',amin,amax + endif + endif + + else ! len_trim(avgname(n)) == 5 + ! no swap needed 1x and sx set based on NE or EN + call grid_average_X2Y(avgname(n)(5:5),array1x,avgname(n)(1:1),arraysx,avgname(n)(2:2),array2x,avgname(n)(4:4)) + endif array3x = c1 array3y = c1 @@ -421,8 +648,8 @@ program gridavgchk errx = abs(array3x(i,j,iblock)) ! flag points that are active and error numerically if (dmask(i,j,iblock,n) .and. errx > errtolarea(n)) then - errorflag(ntest) = failflag - errorflag0 = failflag + errorflag(testcnt) = failflag + errorflag0 = failflag write(100+my_task,*) '' write(100+my_task,100) 'error area '//trim(avgname(n)),my_task,iblock,i,j,iglob,jglob write(100+my_task,101) 'out,exact,err',array2x(i,j,iblock),array2y(i,j,iblock),array3x(i,j,iblock) @@ -430,6 +657,8 @@ program gridavgchk enddo enddo enddo + gflag = global_maxval(errorflag(testcnt), MPI_COMM_ICE) + if (my_task == master_task .and. gflag == failflag) write(6,*) ' *** FAIL ***' amin = global_minval(array1x, distrb_info) amax = global_maxval(array1x, distrb_info) if (my_task == master_task) write(6,103) 'input min/max = ',amin,amax @@ -442,16 +671,29 @@ program gridavgchk amin = global_minval(array3x, distrb_info, dmask(:,:,:,n)) amax = global_maxval(array3x, distrb_info, dmask(:,:,:,n)) if (my_task == master_task) write(6,102) 'error min/max = ',amin,amax + amax = global_maxval(abs(array3x), distrb_info, dmask(:,:,:,n)) + errmax(numgroups_cnt,numtests_cnt) = max(errmax(numgroups_cnt,numtests_cnt), amax) enddo 100 format(a,10i8) 101 format(a,3g16.7) 102 format(a,3f16.7) 103 format(a,2g16.7,f16.7) +110 format(a,i8,g16.7,6i8) + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'Max Errors:' + do i = 1,maxgroups + do j = 1,maxtests + write(6,'(2x,a16,2x,a16,2x,f23.16)') trim(numgroups_name(i)),trim(numtests_name(j)),errmax(i,j) + enddo + enddo + endif gflag = global_maxval(errorflag0, MPI_COMM_ICE) errorflag0 = gflag - do n = 1,maxtest + do n = 1,tottest gflag = global_maxval(errorflag(n), MPI_COMM_ICE) errorflag(n) = gflag enddo @@ -459,7 +701,7 @@ program gridavgchk if (my_task == master_task) then write(6,*) ' ' write(6,*) 'GRIDAVGCHK COMPLETED SUCCESSFULLY' - do n = 1,maxtest + do n = 1,tottest if (errorflag(n) == passflag) then write(6,*) 'PASS ',trim(stringflag(n)) else diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index a5dd3c058..86efbf65e 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -60,7 +60,9 @@ &grid_nml grid_format = 'bin' grid_type = 'displaced_pole' - grid_system = 'B' + grid_ice = 'B' + grid_atm = 'A' + grid_ocn = 'A' grid_file = 'grid' kmt_type = 'file' kmt_file = 'kmt' diff --git a/configuration/scripts/options/set_nml.box2001 b/configuration/scripts/options/set_nml.box2001 index c087466c1..6039335bc 100644 --- a/configuration/scripts/options/set_nml.box2001 +++ b/configuration/scripts/options/set_nml.box2001 @@ -1,3 +1,5 @@ +grid_atm = 'B' +grid_ocn = 'B' days_per_year = 360 use_leap_years = .false. npt = 240 diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index 7fc70713e..f7daef019 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -1,9 +1,13 @@ +grid_ocn = 'B' nilyr = 1 ice_ic = 'default' restart_ext = .false. kcatbound = 2 ew_boundary_type = 'cyclic' ns_boundary_type = 'cyclic' +atm_data_type = 'box2001' +ocn_data_type = 'box2001' +ice_data_type = 'box2001' tr_iage = .true. tr_FY = .false. tr_lvl = .true. diff --git a/configuration/scripts/options/set_nml.boxislandse b/configuration/scripts/options/set_nml.boxislandse index d27b26a8d..561cdb2b1 100644 --- a/configuration/scripts/options/set_nml.boxislandse +++ b/configuration/scripts/options/set_nml.boxislandse @@ -17,6 +17,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_east' +ocn_data_type = 'calm' ice_data_type = 'uniform' rotate_wind = .false. calc_strair = .false. diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn index 48ee103f5..35f321ee4 100644 --- a/configuration/scripts/options/set_nml.boxislandsn +++ b/configuration/scripts/options/set_nml.boxislandsn @@ -17,6 +17,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_north' +ocn_data_type = 'calm' ice_data_type = 'uniform' rotate_wind = .false. calc_strair = .false. diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne index 3dec1d246..c572e7e2c 100644 --- a/configuration/scripts/options/set_nml.boxislandsne +++ b/configuration/scripts/options/set_nml.boxislandsne @@ -17,6 +17,7 @@ ktransport = -1 coriolis = 'constant' atmbndy = 'constant' atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' ice_data_type = 'uniform' rotate_wind = .false. calc_strair = .false. diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 8e5d4a692..25ef3cbd4 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -52,5 +52,6 @@ krdg_redist = 1 seabed_stress = .true. atm_data_type = 'calm' ocn_data_type = 'calm' +ice_data_type = 'box2001' shortwave = 'ccsm3' albedo_type = 'constant' diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index ac0266aeb..a3bacd6d3 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -1,3 +1,4 @@ +grid_ocn = 'B' nilyr = 1 ice_ic = 'default' restart_ext = .true. @@ -6,6 +7,9 @@ ndtd = 2 kcatbound = 1 ew_boundary_type = 'cyclic' ns_boundary_type = 'open' +atm_data_type = 'box2001' +ocn_data_type = 'box2001' +ice_data_type = 'box2001' histfreq = 'd','x','x','x','x' histfreq_n = 1,1,1,1,1 f_aice = 'd' diff --git a/configuration/scripts/options/set_nml.boxslotcyl b/configuration/scripts/options/set_nml.boxslotcyl index b38d0efce..9bbe59dd6 100644 --- a/configuration/scripts/options/set_nml.boxslotcyl +++ b/configuration/scripts/options/set_nml.boxslotcyl @@ -1,3 +1,5 @@ +grid_atm = 'B' +grid_ocn = 'B' nilyr = 1 ice_ic = 'default' restart_ext = .false. @@ -17,6 +19,8 @@ ktherm = -1 kdyn = -1 kridge = -1 ktransport = 1 +atm_data_type = 'box2001' +ocn_data_type = 'box2001' ice_data_type = 'boxslotcyl' histfreq = 'h','x','x','x','x' histfreq_n = 6 , 1 , 1 , 1 , 1 diff --git a/configuration/scripts/options/set_nml.gbox128 b/configuration/scripts/options/set_nml.gbox128 index 2371b65ed..40e81553f 100644 --- a/configuration/scripts/options/set_nml.gbox128 +++ b/configuration/scripts/options/set_nml.gbox128 @@ -1,5 +1,7 @@ +grid_ocn = 'B' ice_ic = 'default' grid_type = 'rectangular' atm_data_type = 'box2001' ocn_data_type = 'box2001' ice_data_type = 'box2001' + diff --git a/configuration/scripts/options/set_nml.gridb b/configuration/scripts/options/set_nml.gridb index 2a209410b..eadfc15ce 100644 --- a/configuration/scripts/options/set_nml.gridb +++ b/configuration/scripts/options/set_nml.gridb @@ -1,2 +1,2 @@ -grid_system = 'B' +grid_ice = 'B' diff --git a/configuration/scripts/options/set_nml.gridcd b/configuration/scripts/options/set_nml.gridcd index 9426056e9..c4198f382 100644 --- a/configuration/scripts/options/set_nml.gridcd +++ b/configuration/scripts/options/set_nml.gridcd @@ -1,2 +1,2 @@ -grid_system = 'CD' +grid_ice = 'CD' diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index da4f1280a..8ca6e5414 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -283,9 +283,20 @@ either Celsius or Kelvin units). "fyear_init", "initial forcing data year", "" "**G**", "", "" "gravit", "gravitational acceleration", "9.80616 m/s\ :math:`^2`" + "grid_atm", "grid structure for atm forcing/coupling fields, 'A', 'B', 'C', etc", "" + "grid_atm_dynu", "grid for atm dynamic-u forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_atm_dynv", "grid for atm dynamic-v forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_atm_thrm", "grid for atm thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_file", "input file for grid info", "" "grid_format", "format of grid files", "" - "grid_system", "structure of the grid, ‘B’, ‘CD’, etc", "" + "grid_ice", "structure of the model ice grid, ‘B’, ‘CD’, etc", "" + "grid_ice_dynu", "grid for ice dynamic-u model fields, 'T', 'U', 'N', 'E'", "" + "grid_ice_dynv", "grid for ice dynamic-v model fields, 'T', 'U', 'N', 'E'", "" + "grid_ice_thrm", "grid for ice thermodynamic model fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn", "grid structure for ocn forcing/coupling fields, 'A', 'B', 'C', etc", "" + "grid_ocn_dynu", "grid for ocn dynamic-u forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn_dynv", "grid for ocn dynamic-v forcing/coupling fields, 'T', 'U', 'N', 'E'", "" + "grid_ocn_thrm", "grid for ocn thermodynamic forcing/coupling fields, 'T', 'U', 'N', 'E'", "" "grid_type", "‘rectangular’, ‘displaced_pole’, ‘column’ or ‘regional’", "" "gridcpl_file", "input file for coupling grid info", "" "grow_net", "specific biogeochemistry growth rate per grid cell", "s :math:`^{-1}`" diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index e4afa0c46..295f5df9d 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -950,13 +950,27 @@ @Article{Roberts18 } @article{Roach19, -author = "L.A. Roach and C. M. Bitz and C. Horvat and S. M. Dean", -title = {{Advances in modelling interactions between sea ice and ocean surface waves}}, -journal = {Journal of Advances in Modeling Earth Systems}, -url = {http://doi.wiley.com/10.1029/2019MS001836}, -year={2019} + author = "L.A. Roach and C. M. Bitz and C. Horvat and S. M. Dean", + title = {{Advances in modelling interactions between sea ice and ocean surface waves}}, + journal = {Journal of Advances in Modeling Earth Systems}, + url = {http://doi.wiley.com/10.1029/2019MS001836}, + year={2019} } +@incollection{Arakawa77, + author = "A. Arakawa and V.R. Lamb", + title = "Computational Design of the Basic Dynamical Processes of the UCLA General Circulation Model", + editor = "Julius Chang", + series = "Methods in Computational Physics: Advances in Research and Applications", + publisher = {Elsevier}, + volume = {17}, + pages = {173-265}, + year = {1977}, + booktitle = "General Circulation Models of the Atmosphere", + issn = {0076-6860}, + doi = {https://doi.org/10.1016/B978-0-12-460817-7.50009-4}, + url = {https://www.sciencedirect.com/science/article/pii/B9780124608177500094}, +} ======= @article{Horvat15, diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 6df4c228d..8f763db2f 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -236,12 +236,19 @@ grid_nml "``dxrect``", "real", "x-direction grid spacing for rectangular grid in cm", "0.0" "``dyrect``", "real", "y-direction grid spacing for rectangular grid in cm", "0.0" "``gridcpl_file``", "string", "input file for coupling grid info", "'unknown_gridcpl_file'" + "``grid_atm``", "``A``", "atm forcing/coupling grid, all fields on T grid", "``A``" + "", "``B``", "atm forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" + "", "``C``", "atm forcing/coupling grid, thermo fields on T grid, dynu fields on E grid, dynv fields on N grid", "" + "", "``CD``", "atm forcing/coupling grid, thermo fields on T grid, dyn fields on N and E grid", "" "``grid_file``", "string", "name of grid file to be read", "'unknown_grid_file'" "``grid_format``", "``bin``", "read direct access grid and kmt files", "``bin``" "", "``nc``", "read grid and kmt files", "" - "``grid_system``", "``B``", "use B grid structure with T at center and U at NE corner, "``B``" - "", "``C``", "use C grid structure with T at center, U at E edge, and V at N edge", "" + "``grid_ice``", "``B``", "use B grid structure with T at center and U at NE corner", "``B``" "", "``CD``", "use CD grid structure with T at center and U/V at N and E edge", "" + "``grid_ocn``", "``A``", "ocn forcing/coupling grid, all fields on T grid", "``A``" + "", "``B``", "ocn forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" + "", "``C``", "ocn forcing/coupling grid, thermo fields on T grid, dynu fields on E grid, dynv fields on N grid", "" + "", "``CD``", "ocn forcing/coupling grid, thermo fields on T grid, dyn fields on N and E grid", "" "``grid_type``", "``displaced_pole``", "read from file in *popgrid*", "``rectangular``" "", "``rectangular``", "defined in *rectgrid*", "" "", "``regional``", "read from file in *popgrid*", "" @@ -252,7 +259,7 @@ grid_nml "", "``2``", "WMO standard categories", "" "", "``3``", "asymptotic scheme", "" "``kmt_type``", "string", "file, default or boxislands", "file" - "``kmt_file``", "string", "name of land mask file to be read", "'unknown_kmt_file'" + "``kmt_file``", "string", "name of land mask file to be read", "``unknown_kmt_file``" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" "``nfsd``", "integer", "number of floe size categories", "1" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 5bccd33e1..a838f887b 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -419,6 +419,122 @@ or southern hemispheres, respectively. Special constants (``spval`` and points in the history files and diagnostics. +.. _interpolation: + +**************************** +Interpolating between grids +**************************** + +Fields in CICE are generally defined at particular grid locations, such as T cell centers, +U corners, or N or E edges. These are assigned internally in CICE based on the ``grid_ice`` +namelist variable. Forcing/coupling fields are also associated with a +specific set of grid locations that may or may not be the same as on the internal CICE model grid. +The namelist variables ``grid_atm`` and ``grid_ocn`` define the forcing/coupling grids. +The ``grid_ice``, ``grid_atm``, and ``grid_ocn`` variables are independent and take +values like ``A``, ``B``, ``C``, or ``CD`` consistent with the Arakawa grid convention :cite:`Arakawa77`. +The relationship between the grid system and the internal grids is shown in :ref:`tab-gridsys`. + +.. _tab-gridsys: + +.. table:: Grid System and Type Definitions + :align: center + + +--------------+----------------+----------------+----------------+ + | grid system | thermo grid | u dynamic grid | v dynamic grid | + +==============+================+================+================+ + | A | T | T | T | + +--------------+----------------+----------------+----------------+ + | B | T | U | U | + +--------------+----------------+----------------+----------------+ + | C | T | E | N | + +--------------+----------------+----------------+----------------+ + | CD | T | N+E | N+E | + +--------------+----------------+----------------+----------------+ + +For all grid systems, thermodynamic variables are always defined on the ``T`` grid for the model and +model forcing/coupling fields. However, the dynamics u and v fields vary. +In the ``CD`` grid, there are twice as many u and v fields as on the other grids. Within the CICE model, +the variables ``grid_ice_thrm``, ``grid_ice_dynu``, ``grid_ice_dynv``, ``grid_atm_thrm``, +``grid_atm_dynu``, ``grid_atm_dynv``, ``grid_ocn_thrm``, ``grid_ocn_dynu``, and ``grid_ocn_dynv`` are +character strings (``T``, ``U``, ``N``, ``E`` , ``NE``) derived from the ``grid_ice``, ``grid_atm``, +and ``grid_ocn`` namelist values. + +The CICE model has several internal methods that will interpolate (a.k.a. map or average) fields on +(``T``, ``U``, ``N``, ``E``, ``NE``) grids to (``T``, ``U``, ``N``, ``E``). An interpolation +to an identical grid results in a field copy. The generic interface to this method is ``grid_average_X2Y``, +and there are several forms. + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +where type is an interpolation type with the following valid values, + +type = ``S`` is a normalized, masked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (M_{1i}A_{1i}work1_{i})} {\sum_{i=1}^{n} (M_{1i}A_{1i})} + +type = ``A`` is a normalized, unmasked, area-weighted interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {\sum_{i=1}^{n} (A_{1i})} + +type = ``F`` is a normalized, unmasked, conservative flux interpolation + +.. math:: + work2 = \frac{\sum_{i=1}^{n} (A_{1i}work1_{i})} {n*A_{2}} + +with A defined as the appropriate gridcell area and M as the gridcell mask. +Another form of the ``grid_average_X2Y`` is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1,grid1,wght1,mask1,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1(:,:,:) ! input field(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: wght1(:,:,:) ! input weight(nx_block, ny_block, max_blocks) + real (kind=dbl_kind), intent(in) :: mask1(:,:,:) ! input mask(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1 ! work1 grid (T, U, N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U, N, E) + +In this case, the input arrays `wght1` and `mask1` are used in the interpolation equations instead of gridcell +area and mask. This version allows the user to define the weights and mask +explicitly. This implementation is supported only for type = ``S`` or ``A`` interpolations. + +A final form of the ``grid_average_X2Y`` interface is + +.. code-block:: fortran + + subroutine grid_average_X2Y(type,work1a,grid1a,work1b,grid1b,work2,grid2) + character(len=*) , intent(in) :: type ! mapping type (S, A, F) + real (kind=dbl_kind), intent(in) :: work1a(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1a ! work1 grid (N, E) + real (kind=dbl_kind), intent(in) :: work1b(:,:,:) ! input field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid1b ! work1 grid (N, E) + real (kind=dbl_kind), intent(out) :: work2(:,:,:) ! output field(nx_block, ny_block, max_blocks) + character(len=*) , intent(in) :: grid2 ! work2 grid (T, U) + +This version supports mapping from an ``NE`` grid to a ``T`` or ``U`` grid. In this case, the ``1a`` arguments +are for either the `N` or `E` field and the 1b arguments are for the complementary field (``E`` or ``N`` respectively). +At present, only ``S`` type mappings are supported with this interface. + +In all cases, the work1, wght1, and mask1 input arrays should have correct halo values when called. Examples of usage +can be found in the source code, but the following example maps the uocn and vocn fields from their native +forcing/coupling grid to the ``U`` grid using a masked, area-weighted, average method. + +.. code-block:: fortran + + call grid_average_X2Y('S', uocn, grid_ocn_dynu, uocnU, 'U') + call grid_average_X2Y('S', vocn, grid_ocn_dynv, vocnU, 'U') + + .. _performance: ***************