diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6256f9bc6..77cf56ddc 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -18,6 +18,9 @@ please refer to: umax_stab) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (abs(work1(i,j,iblk) - umaxn) < puny) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Warning, large ice speed' - write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & - my_task, iblk, i, j, umaxn - endif - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxn) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & + my_task, iblk, i, j, umaxn + endif + enddo + enddo enddo !$OMP END PARALLEL DO elseif (umaxs > umax_stab) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (abs(work1(i,j,iblk) - umaxs) < puny) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Warning, large ice speed' - write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & - my_task, iblk, i, j, umaxs - endif - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxs) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & + my_task, iblk, i, j, umaxs + endif + enddo + enddo enddo !$OMP END PARALLEL DO endif ! umax @@ -1357,14 +1349,14 @@ subroutine init_mass_diags do n=1,n_aero !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & - + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & - + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & - + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & + + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) + enddo + enddo enddo !$OMP END PARALLEL DO totaeron(n)= global_sum(work1, distrb_info, field_loc_center, tarean) @@ -1377,17 +1369,17 @@ subroutine init_mass_diags totps = c0 !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - do n = 1, ncat - work1(i,j,iblk) = work1(i,j,iblk) & - + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & - * trcrn(i,j,nt_hpnd,n,iblk) + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo enddo - enddo - enddo enddo !$OMP END PARALLEL DO totpn = global_sum(work1, distrb_info, field_loc_center, tarean) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index caaa56295..f5e7d0d16 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -3546,7 +3546,7 @@ subroutine accum_hist (dt) call accum_hist_drag (iblk) ! floe size distribution - call accum_hist_fsd (iblk) + call accum_hist_fsd (dt, iblk) ! advanced snow physics call accum_hist_snow (iblk) diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedynB/analysis/ice_history_fsd.F90 index 50fee99e7..b52db4e05 100644 --- a/cicecore/cicedynB/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedynB/analysis/ice_history_fsd.F90 @@ -137,7 +137,7 @@ subroutine init_hist_fsd_2D do ns = 1, nstreams if (f_wave_sig_ht(1:1) /= 'x') & - call define_hist_field(n_wave_sig_ht,"wave_sig_ht","1",tstr2D, tcstr, & + call define_hist_field(n_wave_sig_ht,"wave_sig_ht","m",tstr2D, tcstr, & "significant height of wind and swell waves", & "from attenuated spectrum in ice", c1, c0, & ns, f_wave_sig_ht) @@ -147,7 +147,7 @@ subroutine init_hist_fsd_2D "for waves", c1, c0, & ns, f_aice_ww) if (f_diam_ww(1:1) /= 'x') & - call define_hist_field(n_diam_ww,"diam_ww","1",tstr2D, tcstr, & + call define_hist_field(n_diam_ww,"diam_ww","m",tstr2D, tcstr, & "Average (number) diameter of floes > Dmin", & "for waves", c1, c0, & ns, f_diam_ww) @@ -216,27 +216,27 @@ subroutine init_hist_fsd_3Df if (histfreq(ns) /= 'x') then if (f_afsd(1:1) /= 'x') & - call define_hist_field(n_afsd,"afsd", "1", tstr3Df, tcstr, & + call define_hist_field(n_afsd,"afsd", "1/m", tstr3Df, tcstr, & "areal floe size distribution", & "per unit bin width ", c1, c0, ns, f_afsd) if (f_dafsd_newi(1:1) /= 'x') & - call define_hist_field(n_dafsd_newi,"dafsd_newi","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_newi,"dafsd_newi","1/s",tstr3Df, tcstr, & "Change in fsd: new ice", & "Avg over freq period", c1, c0, ns, f_dafsd_newi) if (f_dafsd_latg(1:1) /= 'x') & - call define_hist_field(n_dafsd_latg,"dafsd_latg","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_latg,"dafsd_latg","1/s",tstr3Df, tcstr, & "Change in fsd: lateral growth", & "Avg over freq period", c1, c0, ns, f_dafsd_latg) if (f_dafsd_latm(1:1) /= 'x') & - call define_hist_field(n_dafsd_latm,"dafsd_latm","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_latm,"dafsd_latm","1/s",tstr3Df, tcstr, & "Change in fsd: lateral melt", & "Avg over freq period", c1, c0, ns, f_dafsd_latm) if (f_dafsd_wave(1:1) /= 'x') & - call define_hist_field(n_dafsd_wave,"dafsd_wave","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_wave,"dafsd_wave","1/s",tstr3Df, tcstr, & "Change in fsd: waves", & "Avg over freq period", c1, c0, ns, f_dafsd_wave) if (f_dafsd_weld(1:1) /= 'x') & - call define_hist_field(n_dafsd_weld,"dafsd_weld","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_weld,"dafsd_weld","1/s",tstr3Df, tcstr, & "Change in fsd: welding", & "Avg over freq period", c1, c0, ns, f_dafsd_weld) endif ! if (histfreq(ns) /= 'x') @@ -272,7 +272,7 @@ subroutine init_hist_fsd_4Df if (histfreq(ns) /= 'x') then if (f_afsdn(1:1) /= 'x') & - call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & + call define_hist_field(n_afsdn,"afsdn","1/m",tstr4Df, tcstr, & "areal floe size and thickness distribution", & "per unit bin width", c1, c0, ns, f_afsdn) @@ -288,16 +288,19 @@ end subroutine init_hist_fsd_4Df ! accumulate average ice quantities or snapshots ! author: Elizabeth C. Hunke, LANL - subroutine accum_hist_fsd (iblk) + subroutine accum_hist_fsd (dt, iblk) use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c2, c4 use ice_history_shared, only: a2D, a3Df, a4Df, nfsd_hist, & ncat_hist, accum_hist_field, n3Dacum, n4Dscum - use ice_state, only: trcrn, aicen_init, vicen, aice_init + use ice_state, only: trcrn, aicen, vicen, aice use ice_arrays_column, only: wave_sig_ht, floe_rad_c, floe_binwidth, & d_afsd_newi, d_afsd_latg, d_afsd_latm, d_afsd_wave, d_afsd_weld + real (kind=dbl_kind), intent(in) :: & + dt ! time step + integer (kind=int_kind), intent(in) :: & iblk ! block index @@ -342,7 +345,7 @@ subroutine accum_hist_fsd (iblk) worka(i,j) = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - worka(i,j) = worka(i,j) + aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) + worka(i,j) = worka(i,j) + aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) end do end do end do @@ -357,7 +360,7 @@ subroutine accum_hist_fsd (iblk) workb = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - workc = aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) & + workc = aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) & / (c4*floeshape*floe_rad_c(k)**2) ! number-mean radius worka(i,j) = worka(i,j) + workc * floe_rad_c(k) @@ -380,7 +383,7 @@ subroutine accum_hist_fsd (iblk) workb = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - workb = workb + aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) + workb = workb + aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) worka(i,j) = worka(i,j) + vicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) end do end do @@ -398,12 +401,12 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block worka(i,j) = c0 - if (aice_init(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist worka(i,j) = worka(i,j) & + (trcrn(i,j,nt_fsd+k-1,n,iblk) * floe_rad_c(k) & - * aicen_init(i,j,n,iblk)/aice_init(i,j,iblk)) + * aicen(i,j,n,iblk)/aice(i,j,iblk)) end do end do endif @@ -416,12 +419,12 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block worka(i,j) = c0 - if (aice_init(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist worka(i,j) = worka(i,j) & + (c8*floeshape*trcrn(i,j,nt_fsd+k-1,n,iblk)*floe_rad_c(k) & - *aicen_init(i,j,n,iblk)/(c4*floeshape*floe_rad_c(k)**2 *aice_init(i,j,iblk))) + *aicen(i,j,n,iblk)/(c4*floeshape*floe_rad_c(k)**2 *aice(i,j,iblk))) end do end do endif @@ -442,7 +445,7 @@ subroutine accum_hist_fsd (iblk) worke(i,j,k)=c0 do n = 1, ncat_hist worke(i,j,k) = worke(i,j,k) + (trcrn(i,j,nt_fsd+k-1,n,iblk) & - * aicen_init(i,j,n,iblk)/floe_binwidth(k)) + * aicen(i,j,n,iblk)/floe_binwidth(k)) end do end do end do @@ -452,19 +455,19 @@ subroutine accum_hist_fsd (iblk) if (f_dafsd_newi(1:1)/= 'x') & call accum_hist_field(n_dafsd_newi-n3Dacum, iblk, nfsd_hist, & - d_afsd_newi(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_newi(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_latg(1:1)/= 'x') & call accum_hist_field(n_dafsd_latg-n3Dacum, iblk, nfsd_hist, & - d_afsd_latg(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_latg(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_latm(1:1)/= 'x') & call accum_hist_field(n_dafsd_latm-n3Dacum, iblk, nfsd_hist, & - d_afsd_latm(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_latm(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_wave(1:1)/= 'x') & call accum_hist_field(n_dafsd_wave-n3Dacum, iblk, nfsd_hist, & - d_afsd_wave(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_wave(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_weld(1:1)/= 'x') & call accum_hist_field(n_dafsd_weld-n3Dacum, iblk, nfsd_hist, & - d_afsd_weld(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_weld(:,:,1:nfsd_hist,iblk)/dt, a3Df) endif ! a3Df allocated ! 4D floe size, thickness category fields @@ -476,7 +479,7 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block workd(i,j,k,n) = trcrn(i,j,nt_fsd+k-1,n,iblk) & - * aicen_init(i,j,n,iblk)/floe_binwidth(k) + * aicen(i,j,n,iblk)/floe_binwidth(k) end do end do end do diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedynB/analysis/ice_history_pond.F90 index f6e4b8737..8818ff94e 100644 --- a/cicecore/cicedynB/analysis/ice_history_pond.F90 +++ b/cicecore/cicedynB/analysis/ice_history_pond.F90 @@ -268,13 +268,8 @@ subroutine accum_hist_pond (iblk) integer (kind=int_kind) :: & nt_apnd, nt_hpnd, nt_alvl, nt_ipnd -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: & - tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: & tr_pond_lvl, tr_pond_topo -#endif real (kind=dbl_kind) :: & puny @@ -289,13 +284,8 @@ subroutine accum_hist_pond (iblk) !--------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) -#ifdef UNDEPRECATE_CESMPONDS - call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) -#else call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo) -#endif call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, & nt_alvl_out=nt_alvl, nt_ipnd_out=nt_ipnd) call icepack_warnings_flush(nu_diag) @@ -303,27 +293,9 @@ subroutine accum_hist_pond (iblk) file=__FILE__, line=__LINE__) if (allocated(a2D)) then -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (f_apond(1:1)/= 'x') & - call accum_hist_field(n_apond, iblk, & - trcr(:,:,nt_apnd,iblk), a2D) - if (f_apond_ai(1:1)/= 'x') & - call accum_hist_field(n_apond_ai, iblk, & - aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk), a2D) - if (f_hpond(1:1)/= 'x') & - call accum_hist_field(n_hpond, iblk, & - trcr(:,:,nt_apnd,iblk) & - * trcr(:,:,nt_hpnd,iblk), a2D) - if (f_hpond_ai(1:1)/= 'x') & - call accum_hist_field(n_hpond_ai, iblk, & - aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk) & - * trcr(:,:,nt_hpnd,iblk), a2D) - elseif (tr_pond_lvl) then -#else if (tr_pond_lvl) then -#endif + if (f_apond(1:1)/= 'x') & call accum_hist_field(n_apond, iblk, & trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk), a2D) diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index ee48a9996..d9c62edde 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -156,57 +156,25 @@ module ice_history_shared igrdz(nvar_grdz) ! true if category/vertical grid field is written character (len=25), public, parameter :: & - tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities - ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities - ncstr = 'area: narea' , & ! vcellmeas for N cell quantities - ecstr = 'area: earea' , & ! vcellmeas for E cell quantities - tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D - ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D - nstr2D = 'NLON NLAT time' , & ! vcoord for N cell quantities, 2D - estr2D = 'ELON ELAT time' , & ! vcoord for E cell quantities, 2D - tstr3Dz = 'TLON TLAT VGRDi time',& ! vcoord for T cell quantities, 3D - ustr3Dz = 'ULON ULAT VGRDi time',& ! vcoord for U cell quantities, 3D - nstr3Dz = 'NLON NLAT VGRDi time',& ! vcoord for N cell quantities, 3D - estr3Dz = 'ELON ELAT VGRDi time',& ! vcoord for E cell quantities, 3D - tstr3Dc = 'TLON TLAT NCAT time',& ! vcoord for T cell quantities, 3D - ustr3Dc = 'ULON ULAT NCAT time',& ! vcoord for U cell quantities, 3D - nstr3Dc = 'NLON NLAT NCAT time',& ! vcoord for N cell quantities, 3D - estr3Dc = 'ELON ELAT NCAT time',& ! vcoord for E cell quantities, 3D - tstr3Db = 'TLON TLAT VGRDb time',& ! vcoord for T cell quantities, 3D - ustr3Db = 'ULON ULAT VGRDb time',& ! vcoord for U cell quantities, 3D - nstr3Db = 'NLON NLAT VGRDb time',& ! vcoord for N cell quantities, 3D - estr3Db = 'ELON ELAT VGRDb time',& ! vcoord for E cell quantities, 3D - tstr3Da = 'TLON TLAT VGRDa time',& ! vcoord for T cell quantities, 3D - ustr3Da = 'ULON ULAT VGRDa time',& ! vcoord for U cell quantities, 3D - nstr3Da = 'NLON NLAT VGRDa time',& ! vcoord for N cell quantities, 3D - estr3Da = 'ELON ELAT VGRDa time',& ! vcoord for E cell quantities, 3D - tstr3Df = 'TLON TLAT NFSD time',& ! vcoord for T cell quantities, 3D - ustr3Df = 'ULON ULAT NFSD time',& ! vcoord for U cell quantities, 3D - nstr3Df = 'NLON NLAT NFSD time',& ! vcoord for N cell quantities, 3D - estr3Df = 'ELON ELAT NFSD time',& ! vcoord for E cell quantities, 3D - -!ferret + ! T grids + tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities + tstr2D = 'TLON TLAT time' , & ! vcoord for T cell, 2D + tstr3Dc = 'TLON TLAT NCAT time', & ! vcoord for T cell, 3D, ncat + tstr3Da = 'TLON TLAT VGRDa time', & ! vcoord for T cell, 3D, ice-snow-bio + tstr3Db = 'TLON TLAT VGRDb time', & ! vcoord for T cell, 3D, ice-bio + tstr3Df = 'TLON TLAT NFSD time', & ! vcoord for T cell, 3D, fsd tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice - ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice - nstr4Di = 'NLON NLAT VGRDi NCAT', & ! vcoord for N cell, 4D, ice - estr4Di = 'ELON ELAT VGRDi NCAT', & ! vcoord for E cell, 4D, ice tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow - ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow - nstr4Ds = 'NLON NLAT VGRDs NCAT', & ! vcoord for N cell, 4D, snow - estr4Ds = 'ELON ELAT VGRDs NCAT', & ! vcoord for E cell, 4D, snow tstr4Df = 'TLON TLAT NFSD NCAT', & ! vcoord for T cell, 4D, fsd - ustr4Df = 'ULON ULAT NFSD NCAT', & ! vcoord for U cell, 4D, fsd - nstr4Df = 'NLON NLAT NFSD NCAT', & ! vcoord for N cell, 4D, fsd - estr4Df = 'ELON ELAT NFSD NCAT' ! vcoord for E cell, 4D, fsd -!ferret -! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time -! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. -! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead -! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) -! tstr4Db = 'TLON TLAT VGRDb NCAT time', & -! ustr4Db = 'ULON ULAT VGRDb NCAT time', & -! tstr4Df = 'TLON TLAT NFSD NCAT time', & -! ustr4Df = 'ULON ULAT NFSD NCAT time', & + ! U grids + ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + ustr2D = 'ULON ULAT time' , & ! vcoord for U cell, 2D + ! N grids + ncstr = 'area: narea' , & ! vcellmeas for N cell quantities + nstr2D = 'NLON NLAT time' , & ! vcoord for N cell, 2D + ! E grids + ecstr = 'area: earea' , & ! vcellmeas for E cell quantities + estr2D = 'ELON ELAT time' ! vcoord for E cell, 2D !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index f71d959da..28a047c4e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,19 +134,19 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_fields, unstack_fields + stack_fields, unstack_fields, iceTmask, iceUmask use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y, iceumask, & + tarear, uarear, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -163,31 +163,33 @@ subroutine eap (dt) i, j, ij integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & uocnU , & ! i ocean current (m/s) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) forcexU , & ! work array: combined atm stress and ocn tilt, x forceyU , & ! work array: combined atm stress and ocn tilt, y - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) ! temporary for stacking fields for halo update + fld2(:,:,:,:), & ! temporary for stacking fields for halo update + fld3(:,:,:,:), & ! temporary for stacking fields for halo update + fld4(:,:,:,:) ! temporary for stacking fields for halo update real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -196,7 +198,6 @@ subroutine eap (dt) calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask , & ! ice extent mask (T-cell) halomask ! ice mask for halo update type (ice_halo) :: & @@ -205,10 +206,6 @@ subroutine eap (dt) type (block) :: & this_block ! block information for current block - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 , & ! temporary - work2 ! temporary - character(len=*), parameter :: subname = '(eap)' call ice_timer_start(timer_dynamics) ! dynamics @@ -218,6 +215,8 @@ subroutine eap (dt) !----------------------------------------------------------------- allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -256,13 +255,13 @@ subroutine eap (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -270,8 +269,18 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F', tmass , 'T' , umass, 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') @@ -314,16 +323,16 @@ subroutine eap (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -347,7 +356,7 @@ subroutine eap (dt) do j = 1, ny_block do i = 1, nx_block - if (icetmask(i,j,iblk)==0) then + if (.not.iceTmask(i,j,iblk)) then if (tmask(i,j,iblk)) then ! structure tensor a11_1(i,j,iblk) = p5 @@ -364,7 +373,7 @@ subroutine eap (dt) a12_2(i,j,iblk) = c0 a12_3(i,j,iblk) = c0 a12_4(i,j,iblk) = c0 - endif ! icetmask + endif ! iceTmask enddo ! i enddo ! j @@ -374,9 +383,9 @@ subroutine eap (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength(ncat=ncat, & aice = aice (i,j, iblk), & vice = vice (i,j, iblk), & @@ -405,7 +414,7 @@ subroutine eap (dt) if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -421,8 +430,8 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU (:,:,iblk)) enddo @@ -432,8 +441,8 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater (:,:,iblk), TbU (:,:,iblk)) enddo @@ -453,8 +462,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp1,iblk) call stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & arlx1i, denom1, & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & @@ -491,8 +500,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp2,iblk) call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU (:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & @@ -513,8 +522,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp3,iblk) if (mod(ksub,10) == 1) then ! only called every 10th timestep call stepa (nx_block , ny_block , & - dtei , icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + dtei , icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & a11 (:,:,iblk), a12 (:,:,iblk), & a11_1 (:,:,iblk), a11_2 (:,:,iblk), & a11_3 (:,:,iblk), a11_4 (:,:,iblk), & @@ -545,7 +554,7 @@ subroutine eap (dt) enddo ! subcycling - deallocate(fld2) + deallocate(fld2,fld3,fld4) if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -557,8 +566,8 @@ subroutine eap (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -567,27 +576,6 @@ subroutine eap (dt) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift - call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - call ice_timer_stop(timer_dynamics) ! dynamics end subroutine eap @@ -1165,8 +1153,8 @@ end FUNCTION s22ks subroutine stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & arlx1i, denom1, & uvel, vvel, & dxT, dyT, & @@ -1198,11 +1186,11 @@ subroutine stress_eap (nx_block, ny_block, & nx_block, ny_block, & ! block dimensions ksub , & ! subcycling step ndte , & ! number of subcycles - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), intent(in) :: & arlx1i , & ! dte/2T (original) or 1/alpha1 (revised) @@ -1285,9 +1273,9 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1889,8 +1877,8 @@ end subroutine update_stress_rdg ! Solves evolution equation for structure tensor (A19, A20) subroutine stepa (nx_block, ny_block, & - dtei, icellt, & - indxti, indxtj, & + dtei, icellT, & + indxTi, indxTj, & a11, a12, & a11_1, a11_2, a11_3, a11_4, & a12_1, a12_2, a12_3, a12_4, & @@ -1903,14 +1891,14 @@ subroutine stepa (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. real (kind=dbl_kind), intent(in) :: & dtei ! 1/dte, where dte is subcycling timestep (1/s) integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & ! ice stress tensor (kg/s^2) in each corner of T cell @@ -1940,9 +1928,9 @@ subroutine stepa (nx_block, ny_block, & dteikth = c1 / (dtei + kth) p5kth = p5 * kth - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) ! ne call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index c2060285a..8eab5e260 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -85,7 +85,7 @@ subroutine evp (dt) use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & strairxN, strairyN, fmN, & strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & @@ -99,14 +99,13 @@ subroutine evp (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & - iceumask, iceemask, icenmask, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & @@ -116,6 +115,7 @@ subroutine evp (dt) use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & strain_rates_U, & + iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate real (kind=dbl_kind), intent(in) :: & @@ -130,32 +130,32 @@ subroutine evp (dt) i, j, ij ! local indices integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icelln , & ! no. of cells where icenmask = .true. - icelle , & ! no. of cells where iceemask = .true. - icellu ! no. of cells where iceumask = .true. + icellT , & ! no. of cells where iceTmask = .true. + icellN , & ! no. of cells where iceNmask = .true. + icellE , & ! no. of cells where iceEmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxei , & ! compressed index in i-direction - indxej , & ! compressed index in j-direction - indxni , & ! compressed index in i-direction - indxnj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxEi , & ! compressed index in i-direction + indxEj , & ! compressed index in j-direction + indxNi , & ! compressed index in i-direction + indxNj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & uocnU , & ! i ocean current (m/s) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) forcexU , & ! work array: combined atm stress and ocn tilt, x forceyU , & ! work array: combined atm stress and ocn tilt, y - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -164,6 +164,7 @@ subroutine evp (dt) vocnN , & ! j ocean current (m/s) ss_tltxN , & ! sea surface slope, x-direction (m/m) ss_tltyN , & ! sea surface slope, y-direction (m/m) + cdn_ocnN , & ! ocn drag coefficient waterxN , & ! for ocean stress calculation, x (m/s) wateryN , & ! for ocean stress calculation, y (m/s) forcexN , & ! work array: combined atm stress and ocn tilt, x @@ -177,6 +178,7 @@ subroutine evp (dt) vocnE , & ! j ocean current (m/s) ss_tltxE , & ! sea surface slope, x-direction (m/m) ss_tltyE , & ! sea surface slope, y-direction (m/m) + cdn_ocnE , & ! ocn drag coefficient waterxE , & ! for ocean stress calculation, x (m/s) wateryE , & ! for ocean stress calculation, y (m/s) forcexE , & ! work array: combined atm stress and ocn tilt, x @@ -186,9 +188,9 @@ subroutine evp (dt) emassdti ! mass of E-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) , & ! 2 bundled fields - fld3(:,:,:,:) , & ! 3 bundled fields - fld4(:,:,:,:) ! 4 bundled fields + fld2(:,:,:,:), & ! 2 bundled fields + fld3(:,:,:,:), & ! 3 bundled fields + fld4(:,:,:,:) ! 4 bundled fields real (kind=dbl_kind), allocatable :: & strengthU(:,:,:), & ! strength averaged to U points @@ -208,7 +210,6 @@ subroutine evp (dt) calc_strair ! calculate air/ice stress integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) halomask ! generic halo mask type (ice_halo) :: & @@ -217,10 +218,6 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary - logical (kind=log_kind), save :: & first_time = .true. ! first time logical @@ -303,13 +300,13 @@ subroutine evp (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -317,22 +314,34 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F', tmass , 'T' , umass , 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('F', tmass , 'T' , emass , 'E') - call grid_average_X2Y('F', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', tmass , 'T' , emass , 'E') + call grid_average_X2Y('S', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnE, 'E') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE , 'E') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE , 'E') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxE, 'E') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyE, 'E') - call grid_average_X2Y('F', tmass , 'T' , nmass , 'N') - call grid_average_X2Y('F', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', tmass , 'T' , nmass , 'N') + call grid_average_X2Y('S', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnN, 'N') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN , 'N') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN , 'N') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxN, 'N') @@ -390,16 +399,16 @@ subroutine evp (dt) if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -420,16 +429,16 @@ subroutine evp (dt) elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umaskCD (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -453,9 +462,9 @@ subroutine evp (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength(ncat = ncat, & aice = aice (i,j, iblk), & vice = vice (i,j, iblk), & @@ -485,16 +494,16 @@ subroutine evp (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icelln (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellT (iblk), icellN (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), nmass (:,:,iblk), & nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & nmask (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & strairxN (:,:,iblk), strairyN (:,:,iblk), & ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - icetmask (:,:,iblk), icenmask (:,:,iblk), & + iceTmask (:,:,iblk), iceNmask (:,:,iblk), & fmN (:,:,iblk), dt, & strtltxN (:,:,iblk), strtltyN (:,:,iblk), & strocnxN (:,:,iblk), strocnyN (:,:,iblk), & @@ -518,16 +527,16 @@ subroutine evp (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icelle (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellT (iblk), icellE (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), emass (:,:,iblk), & emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & emask (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & strairxE (:,:,iblk), strairyE (:,:,iblk), & ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - icetmask (:,:,iblk), iceemask (:,:,iblk), & + iceTmask (:,:,iblk), iceEmask (:,:,iblk), & fmE (:,:,iblk), dt, & strtltxE (:,:,iblk), strtltyE (:,:,iblk), & strocnxE (:,:,iblk), strocnyE (:,:,iblk), & @@ -548,12 +557,12 @@ subroutine evp (dt) do i=1,nx_block do j=1,ny_block - if (.not.iceumask(i,j,iblk)) then + if (.not.iceUmask(i,j,iblk)) then stresspU (i,j,iblk) = c0 stressmU (i,j,iblk) = c0 stress12U(i,j,iblk) = c0 endif - if (icetmask(i,j,iblk) == 0) then + if (.not.iceTmask(i,j,iblk)) then stresspT (i,j,iblk) = c0 stressmT (i,j,iblk) = c0 stress12T(i,j,iblk) = c0 @@ -612,7 +621,7 @@ subroutine evp (dt) if (maskhalo_dyn) then halomask = 0 if (grid_ice == 'B') then - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 elseif (grid_ice == 'C' .or. grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) do iblk = 1, nblocks @@ -623,11 +632,11 @@ subroutine evp (dt) jhi = this_block%jhi do j = jlo,jhi do i = ilo,ihi - if (icetmask(i ,j ,iblk) /= 0 .or. & - icetmask(i-1,j ,iblk) /= 0 .or. & - icetmask(i+1,j ,iblk) /= 0 .or. & - icetmask(i ,j-1,iblk) /= 0 .or. & - icetmask(i ,j+1,iblk) /= 0) then + if (iceTmask(i ,j ,iblk) .or. & + iceTmask(i-1,j ,iblk) .or. & + iceTmask(i+1,j ,iblk) .or. & + iceTmask(i ,j-1,iblk) .or. & + iceTmask(i ,j+1,iblk)) then halomask(i,j,iblk) = 1 endif enddo @@ -654,8 +663,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU (:,:,iblk)) enddo @@ -665,8 +674,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & hwater (:,:,iblk), TbU (:,:,iblk)) enddo @@ -679,13 +688,13 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block, & - icelle (iblk), & - indxei (:,iblk), indxej(:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbE (:,:,iblk)) call seabed_stress_factor_LKD (nx_block , ny_block, & - icelln (iblk), & - indxni (:,iblk), indxnj(:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbN (:,:,iblk)) enddo @@ -695,13 +704,13 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & hwater (:,:,iblk), TbU (:,:,iblk) , & TbE (:,:,iblk), TbN (:,:,iblk) , & - icelle(iblk), indxei(:,iblk), indxej(:,iblk), & - icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) + icellE(iblk), indxEi(:,iblk), indxEj(:,iblk), & + icellN(iblk), indxNi(:,iblk), indxNj(:,iblk) ) enddo !$OMP END PARALLEL DO endif @@ -724,8 +733,8 @@ subroutine evp (dt) call ice_timer_start(timer_evp_1d) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & - icetmask, iceumask, & - cdn_ocn,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & + iceTmask, iceUmask, & + cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& strength,uvel,vvel,dxT,dyT, & stressp_1 ,stressp_2, stressp_3, stressp_4, & @@ -745,9 +754,10 @@ subroutine evp (dt) else ! evp_algorithm == standard_2d (Standard CICE) call ice_timer_start(timer_evp_2d) - do ksub = 1,ndte ! subcycling - if (grid_ice == "B") then + if (grid_ice == "B") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -756,8 +766,8 @@ subroutine evp (dt) ! stress tensor equation, total surface stress !----------------------------------------------------------------- call stress (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & @@ -773,28 +783,12 @@ subroutine evp (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & strtmp (:,:,:) ) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformations (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) - endif - !----------------------------------------------------------------- ! momentum equation !----------------------------------------------------------------- call stepu (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & @@ -810,7 +804,37 @@ subroutine evp (dt) enddo ! iblk !$OMP END PARALLEL DO - elseif (grid_ice == "C") then + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! sub cycling + + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformations (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + enddo + !$OMP END PARALLEL DO + + + elseif (grid_ice == "C") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -820,8 +844,8 @@ subroutine evp (dt) ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & @@ -837,15 +861,15 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner, field_type_scalar, & shearU) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -855,29 +879,11 @@ subroutine evp (dt) zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk)) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - - call deformationsC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), uarea (:,:,iblk), & - shearU (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - - endif enddo !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T, stresspT, stressmT) @@ -890,8 +896,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uarea (:,:,iblk), & etax2U (:,:,iblk), deltaU (:,:,iblk), & strengthU (:,:,iblk), shearU (:,:,iblk), & @@ -900,7 +906,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info , halo_info_mask, & + call dyn_haloUpdate (halo_info , halo_info_mask, & field_loc_NEcorner, field_type_scalar, & stress12U) @@ -908,8 +914,8 @@ subroutine evp (dt) do iblk = 1, nblocks call div_stress_Ex (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -917,8 +923,8 @@ subroutine evp (dt) stress12U (:,:,iblk), strintxE (:,:,iblk) ) call div_stress_Ny (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -932,8 +938,8 @@ subroutine evp (dt) do iblk = 1, nblocks call stepu_C (nx_block , ny_block , & ! u, E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), forcexE (:,:,iblk), & @@ -944,8 +950,8 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepv_C (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & wateryN (:,:,iblk), forceyN (:,:,iblk), & @@ -958,10 +964,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & vvelN) @@ -971,10 +977,10 @@ subroutine evp (dt) vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & vvelE) @@ -983,14 +989,43 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! subcycling - elseif (grid_ice == "CD") then + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformationsC_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), uarea (:,:,iblk), & + shearU (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + elseif (grid_ice == "CD") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -1001,26 +1036,11 @@ subroutine evp (dt) stresspT (:,:,iblk), stressmT (:,:,iblk), & stress12T (:,:,iblk) ) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformationsCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - endif enddo !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T) @@ -1038,8 +1058,8 @@ subroutine evp (dt) ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & @@ -1052,8 +1072,8 @@ subroutine evp (dt) shearU (:,:,iblk), DeltaU (:,:,iblk) ) call stressCD_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uarea (:,:,iblk), & zetax2U (:,:,iblk), etax2U (:,:,iblk), & strengthU(:,:,iblk), & @@ -1065,10 +1085,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & stresspT, stressmT, stress12T) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner,field_type_scalar, & stresspU, stressmU, stress12U) @@ -1076,8 +1096,8 @@ subroutine evp (dt) do iblk = 1, nblocks call div_stress_Ex (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -1085,8 +1105,8 @@ subroutine evp (dt) stress12U (:,:,iblk), strintxE (:,:,iblk) ) call div_stress_Ey (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -1094,8 +1114,8 @@ subroutine evp (dt) stress12T (:,:,iblk), strintyE (:,:,iblk) ) call div_stress_Nx (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -1103,8 +1123,8 @@ subroutine evp (dt) stress12T (:,:,iblk), strintxN (:,:,iblk) ) call div_stress_Ny (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -1118,8 +1138,8 @@ subroutine evp (dt) do iblk = 1, nblocks call stepuv_CD (nx_block , ny_block , & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), wateryE (:,:,iblk), & @@ -1132,8 +1152,8 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepuv_CD (nx_block , ny_block , & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & @@ -1148,10 +1168,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE, vvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN, vvelN) @@ -1160,16 +1180,34 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! subcycling - endif ! grid_ice + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call deformationsCD_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + endif ! grid_ice - enddo ! subcycling call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm @@ -1260,8 +1298,8 @@ subroutine evp (dt) do iblk = 1, nblocks call dyn_finish & (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -1276,8 +1314,8 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelln (iblk), Cdn_ocn (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN(:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & aiN (:,:,iblk), fmN (:,:,iblk), & @@ -1285,8 +1323,8 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelle (iblk), Cdn_ocn (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE(:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & aiE (:,:,iblk), fmE (:,:,iblk), & @@ -1297,30 +1335,12 @@ subroutine evp (dt) endif - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - !$OMP END PARALLEL DO - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift - call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - if (grid_ice == 'CD' .or. grid_ice == 'C') then + call ice_HaloUpdate (strintxE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (strintyN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) call grid_average_X2Y('S', strintxE, 'E', strintxU, 'U') ! diagnostic call grid_average_X2Y('S', strintyN, 'N', strintyU, 'U') ! diagnostic endif @@ -1337,8 +1357,8 @@ end subroutine evp ! author: Elizabeth C. Hunke, LANL subroutine stress (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvel, vvel, & dxT, dyT, & dxhy, dyhx, & @@ -1358,11 +1378,11 @@ subroutine stress (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & strength , & ! ice strength (N/m) @@ -1418,9 +1438,9 @@ subroutine stress (nx_block, ny_block, & str(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1639,8 +1659,8 @@ end subroutine stress ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. subroutine stressC_T (nx_block, ny_block , & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvelE , vvelE , & uvelN , vvelN , & dxN , dyE , & @@ -1655,11 +1675,11 @@ subroutine stressC_T (nx_block, ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1702,17 +1722,17 @@ subroutine stressC_T (nx_block, ny_block , & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & divT (:,:), tensionT(:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! Square of shear strain rate at T obtained from interpolation of @@ -1764,8 +1784,8 @@ end subroutine stressC_T ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. subroutine stressC_U (nx_block , ny_block, & - icellu, & - indxui , indxuj, & + icellU, & + indxUi , indxUj, & uarea , & etax2U , deltaU, & strengthU, shearU, & @@ -1776,11 +1796,11 @@ subroutine stressC_U (nx_block , ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarea , & ! area of U point @@ -1813,17 +1833,17 @@ subroutine stressC_U (nx_block , ny_block, & !----------------------------------------------------------------- if (visc_method == 'avg_zeta') then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 enddo elseif (visc_method == 'avg_strength') then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is @@ -1844,8 +1864,8 @@ end subroutine stressC_U ! Nov 2021 subroutine stressCD_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & @@ -1861,11 +1881,11 @@ subroutine stressCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1908,8 +1928,8 @@ subroutine stressCD_T (nx_block, ny_block, & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -1917,9 +1937,9 @@ subroutine stressCD_T (nx_block, ny_block, & divT (:,:), tensionT(:,:), & shearT(:,:), DeltaT (:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! viscosities and replacement pressure at T point @@ -1954,8 +1974,8 @@ end subroutine stressCD_T ! Nov 2021 subroutine stressCD_U (nx_block, ny_block, & - icellu, & - indxui, indxuj, & + icellU, & + indxUi, indxUj, & uarea, & zetax2U, etax2U, & strengthU, & @@ -1970,11 +1990,11 @@ subroutine stressCD_U (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarea , & ! area of U-cell (m^2) @@ -2004,9 +2024,9 @@ subroutine stressCD_U (nx_block, ny_block, & character(len=*), parameter :: subname = '(stressCD_U)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) !----------------------------------------------------------------- ! viscosities and replacement pressure at U point diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index fe04a3d63..e874611bd 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -1017,7 +1017,7 @@ end subroutine dealloc1d !======================================================================= subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & - I_icetmask, I_iceumask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & + I_iceTmask, I_iceUmask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & I_forcex, I_forcey, I_Tbu, I_umassdti, I_fm, I_uarear, I_tarear, & I_strintx, I_strinty, I_uvel_init, I_vvel_init, I_strength, & I_uvel, I_vvel, I_dxT, I_dyT, I_stressp_1, I_stressp_2, & @@ -1035,9 +1035,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & integer(int_kind), intent(in) :: nx, ny, nblk, nx_glob, ny_glob logical(kind=log_kind), dimension(nx, ny, nblk), intent(in) :: & - I_iceumask - integer(kind=int_kind), dimension(nx, ny, nblk), intent(in) :: & - I_icetmask + I_iceTmask, I_iceUmask real(kind=dbl_kind), dimension(nx, ny, nblk), intent(in) :: & I_cdn_ocn, I_aiu, I_uocn, I_vocn, I_forcex, I_forcey, I_Tbu, & I_umassdti, I_fm, I_uarear, I_tarear, I_strintx, I_strinty, & @@ -1049,9 +1047,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & ! local variables logical(kind=log_kind), dimension(nx_glob, ny_glob) :: & - G_iceumask - integer(kind=int_kind), dimension(nx_glob, ny_glob) :: & - G_icetmask + G_iceTmask, G_iceUmask real(kind=dbl_kind), dimension(nx_glob, ny_glob) :: & G_cdn_ocn, G_aiu, G_uocn, G_vocn, G_forcex, G_forcey, G_Tbu, & G_umassdti, G_fm, G_uarear, G_tarear, G_strintx, G_strinty, & @@ -1063,8 +1059,8 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & character(len=*), parameter :: & subname = '(ice_dyn_evp_1d_copyin)' - call gather_global_ext(G_icetmask, I_icetmask, master_task, distrb_info ) - call gather_global_ext(G_iceumask, I_iceumask, master_task, distrb_info ) + call gather_global_ext(G_iceTmask, I_iceTmask, master_task, distrb_info ) + call gather_global_ext(G_iceUmask, I_iceUmask, master_task, distrb_info ) call gather_global_ext(G_cdn_ocn, I_cdn_ocn, master_task, distrb_info ) call gather_global_ext(G_aiu, I_aiu, master_task, distrb_info ) call gather_global_ext(G_uocn, I_uocn, master_task, distrb_info ) @@ -1101,9 +1097,9 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & ! all calculations id done on master task if (my_task == master_task) then ! find number of active points and allocate 1D vectors - call calc_na(nx_glob, ny_glob, NA_len, G_icetmask, G_iceumask) + call calc_na(nx_glob, ny_glob, NA_len, G_iceTmask, G_iceUmask) call alloc1d(NA_len) - call calc_2d_indices(nx_glob, ny_glob, NA_len, G_icetmask, G_iceumask) + call calc_2d_indices(nx_glob, ny_glob, NA_len, G_iceTmask, G_iceUmask) call calc_navel(nx_glob, ny_glob, NA_len, NAVEL_len) call alloc1d_navel(NAVEL_len) ! initialize OpenMP. FIXME: ought to be called from main @@ -1120,7 +1116,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & G_stressp_2, G_stressp_3, G_stressp_4, G_stressm_1, & G_stressm_2, G_stressm_3, G_stressm_4, G_stress12_1, & G_stress12_2, G_stress12_3, G_stress12_4) - call calc_halo_parent(nx_glob, ny_glob, NA_len, NAVEL_len, G_icetmask) + call calc_halo_parent(nx_glob, ny_glob, NA_len, NAVEL_len, G_iceTmask) end if end subroutine ice_dyn_evp_1d_copyin @@ -1336,7 +1332,7 @@ end subroutine ice_dyn_evp_1d_kernel !======================================================================= - subroutine calc_na(nx, ny, na, icetmask, iceumask) + subroutine calc_na(nx, ny, na, iceTmask, iceUmask) ! Calculate number of active points use ice_blocks, only : nghost @@ -1344,10 +1340,8 @@ subroutine calc_na(nx, ny, na, icetmask, iceumask) implicit none integer(kind=int_kind), intent(in) :: nx, ny - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - icetmask logical(kind=log_kind), dimension(nx, ny), intent(in) :: & - iceumask + iceTmask, iceUmask integer(kind=int_kind), intent(out) :: na ! local variables @@ -1360,7 +1354,7 @@ subroutine calc_na(nx, ny, na, icetmask, iceumask) ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny do i = 1 + nghost, nx - if (icetmask(i,j) == 1 .or. iceumask(i,j)) na = na + 1 + if (iceTmask(i,j) .or. iceUmask(i,j)) na = na + 1 end do end do @@ -1368,17 +1362,15 @@ end subroutine calc_na !======================================================================= - subroutine calc_2d_indices(nx, ny, na, icetmask, iceumask) + subroutine calc_2d_indices(nx, ny, na, iceTmask, iceUmask) use ice_blocks, only : nghost implicit none integer(kind=int_kind), intent(in) :: nx, ny, na - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - icetmask logical(kind=log_kind), dimension(nx, ny), intent(in) :: & - iceumask + iceTmask, iceUmask ! local variables @@ -1394,12 +1386,12 @@ subroutine calc_2d_indices(nx, ny, na, icetmask, iceumask) ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny do i = 1 + nghost, nx - if (icetmask(i,j) == 1 .or. iceumask(i,j)) then + if (iceTmask(i,j) .or. iceUmask(i,j)) then Nmaskt = Nmaskt + 1 indi(Nmaskt) = i indj(Nmaskt) = j - if (icetmask(i,j) /= 1) skiptcell(Nmaskt) = .true. - if (.not. iceumask(i,j)) skipucell(Nmaskt) = .true. + if (.not. iceTmask(i,j)) skiptcell(Nmaskt) = .true. + if (.not. iceUmask(i,j)) skipucell(Nmaskt) = .true. ! NOTE: U mask does not include northern and eastern ! ghost cells. Skip northern and eastern ghost cells if (i == nx) skipucell(Nmaskt) = .true. @@ -1588,13 +1580,13 @@ end subroutine convert_2d_1d !======================================================================= - subroutine calc_halo_parent(nx, ny, na, navel, I_icetmask) + subroutine calc_halo_parent(nx, ny, na, navel, I_iceTmask) implicit none integer(kind=int_kind), intent(in) :: nx, ny, na, navel - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - I_icetmask + logical(kind=log_kind), dimension(nx, ny), intent(in) :: & + I_iceTmask ! local variables @@ -1619,10 +1611,10 @@ subroutine calc_halo_parent(nx, ny, na, navel, I_icetmask) j = int((indij(iw) - 1) / (nx)) + 1 i = indij(iw) - (j - 1) * nx ! if within ghost zone - if (i == nx .and. I_icetmask(2, j) == 1) Ihalo(iw) = 2 + (j - 1) * nx - if (i == 1 .and. I_icetmask(nx - 1, j) == 1) Ihalo(iw) = (nx - 1) + (j - 1) * nx - if (j == ny .and. I_icetmask(i, 2) == 1) Ihalo(iw) = i + nx - if (j == 1 .and. I_icetmask(i, ny - 1) == 1) Ihalo(iw) = i + (ny - 2) * nx + if (i == nx .and. I_iceTmask(2, j) ) Ihalo(iw) = 2 + (j - 1) * nx + if (i == 1 .and. I_iceTmask(nx - 1, j) ) Ihalo(iw) = (nx - 1) + (j - 1) * nx + if (j == ny .and. I_iceTmask(i, 2) ) Ihalo(iw) = i + nx + if (j == 1 .and. I_iceTmask(i, ny - 1) ) Ihalo(iw) = i + (ny - 2) * nx end do ! relate halo indices to indij vector diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 95d2eedb1..187ec55cc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -106,6 +106,12 @@ module ice_dyn_shared uvelE_init , & ! x-component of velocity (m/s), beginning of timestep vvelE_init ! y-component of velocity (m/s), beginning of timestep + logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & + iceTmask, & ! ice extent mask (T-cell) + iceUmask, & ! ice extent mask (U-cell) + iceNmask, & ! ice extent mask (N-cell) + iceEmask ! ice extent mask (E-cell) + real (kind=dbl_kind), allocatable, public :: & DminTarea(:,:,:) ! deltamin * tarea (m^2/s) @@ -168,6 +174,8 @@ subroutine alloc_dyn_shared allocate( & uvel_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics + iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') @@ -177,6 +185,8 @@ subroutine alloc_dyn_shared vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + iceEmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics + iceNmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') endif @@ -199,7 +209,7 @@ subroutine init_dyn (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT, NLAT, ELAT, tarea, iceumask, iceemask, icenmask + use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -310,10 +320,10 @@ subroutine init_dyn (dt) endif ! ice extent mask on velocity points - iceumask(i,j,iblk) = .false. + iceUmask(i,j,iblk) = .false. if (grid_ice == 'CD' .or. grid_ice == 'C') then - iceemask(i,j,iblk) = .false. - icenmask(i,j,iblk) = .false. + iceEmask(i,j,iblk) = .false. + iceNmask(i,j,iblk) = .false. end if enddo ! i enddo ! j @@ -394,7 +404,7 @@ subroutine dyn_prep1 (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & Tmass ! total mass of ice and snow (kg/m^2) - integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & + logical (kind=log_kind), dimension (nx_block,ny_block), intent(out) :: & iceTmask ! ice extent mask (T-cell) ! local variables @@ -438,7 +448,7 @@ subroutine dyn_prep1 (nx_block, ny_block, & !----------------------------------------------------------------- ! augmented mask (land + open ocean) !----------------------------------------------------------------- - iceTmask (i,j) = 0 + iceTmask (i,j) = .false. enddo enddo @@ -450,10 +460,10 @@ subroutine dyn_prep1 (nx_block, ny_block, & if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then - iceTmask(i,j) = 1 + iceTmask(i,j) = .true. endif - if (.not.Tmask(i,j)) iceTmask(i,j) = 0 + if (.not.Tmask(i,j)) iceTmask(i,j) = .false. enddo enddo @@ -504,8 +514,8 @@ subroutine dyn_prep2 (nx_block, ny_block, & ilo,ihi,jlo,jhi ! beginning and end of physical domain integer (kind=int_kind), intent(out) :: & - icellT , & ! no. of cells where iceTmask = 1 - icellX ! no. of cells where iceXmask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellX ! no. of cells where iceXmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & indxTi , & ! compressed index in i-direction on T grid @@ -516,7 +526,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & Xmask ! land/boundary mask, thickness (X-grid-cell) - integer (kind=int_kind), dimension (nx_block,ny_block), intent(in) :: & + logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & iceTmask ! ice extent mask (T-cell) logical (kind=log_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -590,7 +600,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & taubx (i,j) = c0 tauby (i,j) = c0 - if (iceTmask(i,j)==0) then + if (.not.iceTmask(i,j)) then stressp_1 (i,j) = c0 stressp_2 (i,j) = c0 stressp_3 (i,j) = c0 @@ -608,7 +618,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & enddo ! j !----------------------------------------------------------------- - ! Identify cells where iceTmask = 1 + ! Identify cells where iceTmask = .true. ! Note: The icellT mask includes north and east ghost cells ! where stresses are needed. !----------------------------------------------------------------- @@ -616,7 +626,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & icellT = 0 do j = jlo, jhi+1 do i = ilo, ihi+1 - if (iceTmask(i,j) == 1) then + if (iceTmask(i,j)) then icellT = icellT + 1 indxTi(icellT) = i indxTj(icellT) = j @@ -729,7 +739,7 @@ subroutine stepu (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellU ! total count when iceumask is true + icellU ! total count when iceUmask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxUi , & ! compressed index in i-direction @@ -1166,7 +1176,7 @@ subroutine dyn_finish (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellU ! total count when iceumask is true + icellU ! total count when iceUmask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxUi , & ! compressed index in i-direction @@ -1635,7 +1645,7 @@ subroutine deformations (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction @@ -1733,7 +1743,7 @@ subroutine deformationsCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction @@ -1830,7 +1840,7 @@ subroutine deformationsC_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 17fd0b73f..6534e7568 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -51,7 +51,7 @@ module ice_dyn_vp seabed_stress, Ktens, stack_fields, unstack_fields use ice_fileunits, only: nu_diag use ice_flux, only: fmU - use ice_global_reductions, only: global_sum, global_allreduce_sum + use ice_global_reductions, only: global_sum use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -96,17 +96,19 @@ module ice_dyn_vp ! module variables integer (kind=int_kind), allocatable :: & - icellt(:) , & ! no. of cells where icetmask = 1 - icellu(:) ! no. of cells where iceumask = 1 + icellT(:) , & ! no. of cells where iceTmask = .true. + icellU(:) ! no. of cells where iceUmask = .true. integer (kind=int_kind), allocatable :: & - indxti(:,:) , & ! compressed index in i-direction - indxtj(:,:) , & ! compressed index in j-direction - indxui(:,:) , & ! compressed index in i-direction - indxuj(:,:) ! compressed index in j-direction + indxTi(:,:) , & ! compressed index in i-direction + indxTj(:,:) , & ! compressed index in j-direction + indxUi(:,:) , & ! compressed index in i-direction + indxUj(:,:) ! compressed index in j-direction real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) ! work array for boundary updates + fld2(:,:,:,:), & ! work array for boundary updates + fld3(:,:,:,:), & ! work array for boundary updates + fld4(:,:,:,:) ! work array for boundary updates !======================================================================= @@ -136,12 +138,14 @@ subroutine init_vp this_block ! block information for current block ! Initialize module variables - allocate(icellt(max_blocks), icellu(max_blocks)) - allocate(indxti(nx_block*ny_block, max_blocks), & - indxtj(nx_block*ny_block, max_blocks), & - indxui(nx_block*ny_block, max_blocks), & - indxuj(nx_block*ny_block, max_blocks)) + allocate(icellT(max_blocks), icellU(max_blocks)) + allocate(indxTi(nx_block*ny_block, max_blocks), & + indxTj(nx_block*ny_block, max_blocks), & + indxUi(nx_block*ny_block, max_blocks), & + indxUj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) end subroutine init_vp @@ -167,19 +171,19 @@ subroutine implicit_solver (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations + use ice_dyn_shared, only: deformations, iceTmask, iceUmask use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y, iceumask, & + tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -200,6 +204,7 @@ subroutine implicit_solver (dt) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) @@ -210,7 +215,6 @@ subroutine implicit_solver (dt) Cb , & ! seabed stress coefficient fpresx , & ! fixed point residual vector, x components: fx = uvel - uprev_k fpresy , & ! fixed point residual vector, y components: fy = vvel - vprev_k - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -222,7 +226,6 @@ subroutine implicit_solver (dt) logical (kind=log_kind) :: calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) halomask ! generic halo mask type (ice_halo) :: & @@ -234,10 +237,6 @@ subroutine implicit_solver (dt) real (kind=dbl_kind), allocatable :: & sol(:) ! solution vector - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary - character(len=*), parameter :: subname = '(implicit_solver)' call ice_timer_start(timer_dynamics) ! dynamics @@ -290,13 +289,13 @@ subroutine implicit_solver (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -304,12 +303,22 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F',tmass , 'T', umass, 'U') - call grid_average_X2Y('F',aice_init, 'T', aiU , 'U') - call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') - call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') - call grid_average_X2Y('S',ss_tltx, grid_ocn_dynu, ss_tltxU, 'U') - call grid_average_X2Y('S',ss_tlty, grid_ocn_dynv, ss_tltyU, 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S',tmass , 'T' , umass , 'U') + call grid_average_X2Y('S',aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S',cdn_ocn , 'T' , cdn_ocnU, 'U') + call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') + call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') + call grid_average_X2Y('S',ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') + call grid_average_X2Y('S',ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -332,9 +341,7 @@ subroutine implicit_solver (dt) call grid_average_X2Y('F',strairyT,'T',strairyU,'U') endif -! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength -! need to do more debugging - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -349,16 +356,16 @@ subroutine implicit_solver (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT(iblk), icellU(iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -377,8 +384,8 @@ subroutine implicit_solver (dt) TbU (:,:,iblk)) call calc_bfix (nx_block , ny_block , & - icellu(iblk) , & - indxui (:,iblk), indxuj (:,iblk), & + icellU(iblk) , & + indxUi (:,iblk), indxUj (:,iblk), & umassdti (:,:,iblk), & forcexU (:,:,iblk), forceyU (:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & @@ -389,9 +396,9 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength (ncat, & aice (i,j, iblk), & vice (i,j, iblk), & @@ -402,7 +409,7 @@ subroutine implicit_solver (dt) enddo ! ij enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -421,7 +428,7 @@ subroutine implicit_solver (dt) if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -436,8 +443,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi(:,iblk), indxUj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU(:,:,iblk)) enddo @@ -448,8 +455,8 @@ subroutine implicit_solver (dt) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater(:,:,iblk), TbU(:,:,iblk)) enddo @@ -465,7 +472,7 @@ subroutine implicit_solver (dt) ntot = 0 do iblk = 1, nblocks - ntot = ntot + icellu(iblk) + ntot = ntot + icellU(iblk) enddo ntot = 2 * ntot ! times 2 because of u and v @@ -474,9 +481,9 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! Start of nonlinear iteration !----------------------------------------------------------------- - call anderson_solver (icellt , icellu , & - indxti , indxtj , & - indxui , indxuj , & + call anderson_solver (icellT , icellU , & + indxTi , indxTj , & + indxUi , indxUj , & aiU , ntot , & uocnU , vocnU , & waterxU , wateryU, & @@ -484,7 +491,7 @@ subroutine implicit_solver (dt) umassdti, sol , & fpresx , fpresy , & zetax2 , etax2 , & - rep_prs , & + rep_prs , cdn_ocnU,& Cb, halo_info_mask) !----------------------------------------------------------------- ! End of nonlinear iteration @@ -500,8 +507,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stress_vp (nx_block , ny_block , & - icellt(iblk) , & - indxti (:,iblk), indxtj (:,iblk), & + icellT(iblk) , & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -523,8 +530,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call deformations (nx_block , ny_block , & - icellt(iblk) , & - indxti (:,iblk), indxtj (:,iblk), & + icellT(iblk) , & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -542,8 +549,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_seabed_stress (nx_block , ny_block , & - icellu(iblk) , & - indxui (:,iblk), indxuj (:,iblk), & + icellU(iblk) , & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Cb (:,:,iblk), & taubxU (:,:,iblk), taubyU (:,:,iblk)) @@ -628,8 +635,8 @@ subroutine implicit_solver (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -640,27 +647,6 @@ subroutine implicit_solver (dt) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift - call grid_average_X2Y('F',work2,'U',strocnyT,'T') - ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready @@ -685,9 +671,9 @@ end subroutine implicit_solver ! H. F. Walker, “Anderson Acceleration: Algorithms and Implementations” ! [Online]. Available: https://users.wpi.edu/~walker/Papers/anderson_accn_algs_imps.pdf - subroutine anderson_solver (icellt , icellu , & - indxti , indxtj , & - indxui , indxuj , & + subroutine anderson_solver (icellT , icellU , & + indxTi , indxTj , & + indxUi , indxUj , & aiU , ntot , & uocn , vocn , & waterxU , wateryU, & @@ -695,10 +681,9 @@ subroutine anderson_solver (icellt , icellu , & umassdti, sol , & fpresx , fpresy , & zetax2 , etax2 , & - rep_prs , & + rep_prs , cdn_ocn, & Cb, halo_info_mask) - use ice_arrays_column, only: Cdn_ocn use ice_blocks, only: nx_block, ny_block use ice_boundary, only: ice_HaloUpdate use ice_constants, only: c1 @@ -715,19 +700,20 @@ subroutine anderson_solver (icellt , icellu , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension(max_blocks), intent(in) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & aiU , & ! ice fraction on u-grid uocn , & ! i ocean current (m/s) vocn , & ! j ocean current (m/s) + cdn_ocn , & ! ocn drag coefficient waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard @@ -783,9 +769,6 @@ subroutine anderson_solver (icellt , icellu , & stress_Pr, & ! x,y-derivatives of the replacement pressure diag_rheo ! contributions of the rhelogy term to the diagonal - real (kind=dbl_kind), dimension (max_blocks) :: & - L2norm ! array used to compute l^2 norm of grid function - real (kind=dbl_kind), dimension (ntot) :: & res , & ! current residual res_old , & ! previous residual @@ -821,7 +804,6 @@ subroutine anderson_solver (icellt , icellu , & ! Initialization res_num = 0 - L2norm = c0 !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -850,8 +832,8 @@ subroutine anderson_solver (icellt , icellu , & vprev_k(:,:,iblk) = vvel(:,:,iblk) call calc_zeta_dPr (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uprev_k (:,:,iblk), vprev_k (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & @@ -862,8 +844,8 @@ subroutine anderson_solver (icellt , icellu , & rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) call calc_vrel_Cb (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocn (:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), TbU (:,:,iblk), & uocn (:,:,iblk), vocn (:,:,iblk), & ulin (:,:,iblk), vlin (:,:,iblk), & @@ -871,8 +853,8 @@ subroutine anderson_solver (icellt , icellu , & ! prepare b vector (RHS) call calc_bvec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & stress_Pr (:,:,:), uarear (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk), & @@ -881,9 +863,9 @@ subroutine anderson_solver (icellt , icellu , & ! Compute nonlinear residual norm (PDE residual) call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -895,15 +877,17 @@ subroutine anderson_solver (icellt , icellu , & uarear (:,:,iblk) , & Au (:,:,iblk) , Av (:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & Au (:,:,iblk), Av (:,:,iblk), & - Fx (:,:,iblk), Fy (:,:,iblk), & - L2norm (iblk)) + Fx (:,:,iblk), Fy (:,:,iblk)) enddo !$OMP END PARALLEL DO - nlres_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + nlres_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + Fx , Fy ) if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " nonlin_res_L2norm= ", nlres_norm @@ -923,8 +907,8 @@ subroutine anderson_solver (icellt , icellu , & soly = vprev_k call arrays_to_vec (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & uprev_k (:,:,:), vprev_k (:,:,:), & sol (:)) @@ -938,8 +922,8 @@ subroutine anderson_solver (icellt , icellu , & do iblk = 1, nblocks ! first compute diagonal contributions due to rheology term call formDiag_step1 (nx_block , ny_block , & - icellu (iblk) , & - indxui (:,iblk) , indxuj(:,iblk), & + icellU (iblk) , & + indxUi (:,iblk) , indxUj(:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx(:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -948,8 +932,8 @@ subroutine anderson_solver (icellt , icellu , & diag_rheo(:,:,:)) ! second compute the full diagonal call formDiag_step2 (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & diag_rheo (:,:,:), vrel (:,:,iblk), & umassdti (:,:,iblk), & uarear (:,:,iblk), Cb (:,:,iblk), & @@ -972,8 +956,8 @@ subroutine anderson_solver (icellt , icellu , & ! Put FGMRES solution solx,soly in fpfunc vector (needed for Anderson) call arrays_to_vec (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & solx (:,:,:), soly (:,:,:), & fpfunc (:)) elseif (fpfunc_andacc == 2) then @@ -989,20 +973,14 @@ subroutine anderson_solver (icellt , icellu , & #else call vec_to_arrays (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj(:,:) , & + icellU (:), ntot , & + indxUi (:,:), indxUj(:,:) , & res (:), & fpresx (:,:,:), fpresy (:,:,:)) - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & - fpresx(:,:,iblk), fpresy(:,:,iblk), & - L2norm (iblk)) - enddo - !$OMP END PARALLEL DO - fpres_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + fpres_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + fpresx , fpresy ) #endif if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & @@ -1108,8 +1086,8 @@ subroutine anderson_solver (icellt , icellu , & !----------------------------------------------------------------------- call vec_to_arrays (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & sol (:), & uvel (:,:,:), vvel (:,:,:)) @@ -1131,14 +1109,12 @@ subroutine anderson_solver (icellt , icellu , & do iblk = 1, nblocks fpresx(:,:,iblk) = uvel(:,:,iblk) - uprev_k(:,:,iblk) fpresy(:,:,iblk) = vvel(:,:,iblk) - vprev_k(:,:,iblk) - call calc_L2norm_squared (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & - fpresx(:,:,iblk), fpresy(:,:,iblk), & - L2norm (iblk)) enddo !$OMP END PARALLEL DO - prog_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + prog_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + fpresx , fpresy ) if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " progress_res_L2norm= ", prog_norm @@ -1153,8 +1129,8 @@ end subroutine anderson_solver ! Computes the viscosities and dPr/dx, dPr/dy subroutine calc_zeta_dPr (nx_block, ny_block, & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvel , vvel , & dxT , dyT , & dxhy , dyhx , & @@ -1169,11 +1145,11 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & strength , & ! ice strength (N/m) @@ -1215,14 +1191,14 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_zeta_dPr)' ! Initialize stPr, zetax2 and etax2 to zero - ! (for cells where icetmask is false) + ! (for cells where iceTmask is false) stPr = c0 zetax2 = c0 etax2 = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1348,8 +1324,8 @@ end subroutine calc_zeta_dPr ! viscous-plastic sea ice stresses, Geosci. Model Dev., 13, 1763–1769, subroutine stress_vp (nx_block , ny_block , & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvel , vvel , & dxT , dyT , & cxp , cyp , & @@ -1367,11 +1343,11 @@ subroutine stress_vp (nx_block , ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1406,9 +1382,9 @@ subroutine stress_vp (nx_block , ny_block , & character(len=*), parameter :: subname = '(stress_vp)' - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1458,8 +1434,8 @@ end subroutine stress_vp ! Compute vrel and seabed stress coefficients subroutine calc_vrel_Cb (nx_block, ny_block, & - icellu , Cw , & - indxui , indxuj , & + icellU , Cw , & + indxUi , indxUj , & aiU , TbU , & uocn , vocn , & uvel , vvel , & @@ -1469,11 +1445,11 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & TbU, & ! seabed stress factor (N/m^2) @@ -1505,9 +1481,9 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! (magnitude of relative ocean current)*rhow*drag*aice vrel(i,j) = aiU(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & @@ -1523,19 +1499,19 @@ end subroutine calc_vrel_Cb ! Compute seabed stress (diagnostic) subroutine calc_seabed_stress (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & uvel , vvel , & Cb , & taubxU , taubyU) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1553,9 +1529,9 @@ subroutine calc_seabed_stress (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_seabed_stress)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) taubxU(i,j) = -uvel(i,j)*Cb(i,j) taubyU(i,j) = -vvel(i,j)*Cb(i,j) @@ -1570,9 +1546,9 @@ end subroutine calc_seabed_stress ! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) subroutine matvec (nx_block, ny_block, & - icellu , icellt , & - indxui , indxuj , & - indxti , indxtj , & + icellU , icellT , & + indxUi , indxUj , & + indxTi , indxTj , & dxT , dyT , & dxhy , dyhx , & cxp , cyp , & @@ -1588,14 +1564,14 @@ subroutine matvec (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu, & ! total count when iceumask is true - icellt ! no. of cells where icetmask = 1 + icellU, & ! total count when iceUmask = .true. + icellT ! total count when iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj , & ! compressed index in j-direction - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj , & ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -1664,9 +1640,9 @@ subroutine matvec (nx_block, ny_block, & str(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1807,15 +1783,15 @@ subroutine matvec (nx_block, ny_block, & str(i,j,8) = strp_tmp - strm_tmp + str12sn & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - enddo ! ij - icellt + enddo ! ij - icellT !----------------------------------------------------------------- ! Form Au and Av !----------------------------------------------------------------- - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s @@ -1829,7 +1805,7 @@ subroutine matvec (nx_block, ny_block, & Au(i,j) = ccaimp*uvel(i,j) - ccb*vvel(i,j) - strintx Av(i,j) = ccaimp*vvel(i,j) + ccb*uvel(i,j) - strinty - enddo ! ij - icellu + enddo ! ij - icellU end subroutine matvec @@ -1839,8 +1815,8 @@ end subroutine matvec ! does not depend on (u,v) and thus do not change during the nonlinear iteration subroutine calc_bfix (nx_block , ny_block , & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & umassdti , & forcexU , forceyU , & uvel_init, vvel_init, & @@ -1848,11 +1824,11 @@ subroutine calc_bfix (nx_block , ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel_init,& ! x-component of velocity (m/s), beginning of time step @@ -1872,9 +1848,9 @@ subroutine calc_bfix (nx_block , ny_block , & character(len=*), parameter :: subname = '(calc_bfix)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) bxfix(i,j) = umassdti(i,j)*uvel_init(i,j) + forcexU(i,j) byfix(i,j) = umassdti(i,j)*vvel_init(i,j) + forceyU(i,j) @@ -1889,8 +1865,8 @@ end subroutine calc_bfix ! depending on (u,v) subroutine calc_bvec (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & stPr , uarear , & waterxU , wateryU , & bxfix , byfix , & @@ -1899,11 +1875,11 @@ subroutine calc_bvec (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarear , & ! 1/uarea @@ -1941,9 +1917,9 @@ subroutine calc_bvec (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! ice/ocean stress taux = vrel(i,j)*waterxU(i,j) ! NOTE this is not the entire @@ -1969,20 +1945,19 @@ end subroutine calc_bvec ! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) subroutine residual_vec (nx_block , ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & bx , by , & Au , Av , & - Fx , Fy , & - sum_squared) + Fx , Fy ) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & bx , & ! b vector, bx = taux + bxfix (N/m^2) @@ -1994,9 +1969,6 @@ subroutine residual_vec (nx_block , ny_block, & Fx , & ! x residual vector, Fx = bx - Au (N/m^2) Fy ! y residual vector, Fy = by - Av (N/m^2) - real (kind=dbl_kind), intent(out), optional :: & - sum_squared ! sum of squared residual vector components - ! local variables integer (kind=int_kind) :: & @@ -2005,22 +1977,15 @@ subroutine residual_vec (nx_block , ny_block, & character(len=*), parameter :: subname = '(residual_vec)' !----------------------------------------------------------------- - ! compute residual and sum its squared components + ! compute residual !----------------------------------------------------------------- - if (present(sum_squared)) then - sum_squared = c0 - endif - - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) Fx(i,j) = bx(i,j) - Au(i,j) Fy(i,j) = by(i,j) - Av(i,j) - if (present(sum_squared)) then - sum_squared = sum_squared + Fx(i,j)**2 + Fy(i,j)**2 - endif enddo ! ij end subroutine residual_vec @@ -2031,8 +1996,8 @@ end subroutine residual_vec ! Part 1: compute the contributions to the diagonal from the rheology term subroutine formDiag_step1 (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & dxT , dyT , & dxhy , dyhx , & cxp , cyp , & @@ -2042,11 +2007,11 @@ subroutine formDiag_step1 (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where icetmask = 1 + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -2202,10 +2167,10 @@ subroutine formDiag_step1 (nx_block, ny_block, & dj = 1 endif - do ij = 1, icellu + do ij = 1, icellU - iu = indxui(ij) - ju = indxuj(ij) + iu = indxUi(ij) + ju = indxUj(ij) i = iu + di j = ju + dj @@ -2406,8 +2371,8 @@ end subroutine formDiag_step1 ! Part 2: compute diagonal subroutine formDiag_step2 (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & Drheo , vrel , & umassdti, & uarear , Cb , & @@ -2415,11 +2380,11 @@ subroutine formDiag_step2 (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & vrel, & ! coefficient for tauw @@ -2465,9 +2430,9 @@ subroutine formDiag_step2 (nx_block, ny_block, & ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s @@ -2484,50 +2449,121 @@ end subroutine formDiag_step2 !======================================================================= -! Compute squared l^2 norm of a grid function (tpu,tpv) +! Compute global l^2 norm of a vector field (field_x, field_y) - subroutine calc_L2norm_squared (nx_block, ny_block, & - icellu , & - indxui , indxuj , & - tpu , tpv , & - L2norm) + function global_norm (nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + field_x , field_y ) & + result(norm) + + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + nx_block, ny_block ! block dimensions - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + integer (kind=int_kind), dimension (max_blocks), intent(in) :: & + icellU ! total count when iceumask is true - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - tpu , & ! x-component of vector grid function - tpv ! y-component of vector grid function + integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & + field_x , & ! x-component of vector field + field_y ! y-component of vector field - real (kind=dbl_kind), intent(out) :: & - L2norm ! squared l^2 norm of vector grid function (tpu,tpv) + real (kind=dbl_kind) :: & + norm ! l^2 norm of vector field ! local variables integer (kind=int_kind) :: & - i, j, ij + i, j, ij, iblk - character(len=*), parameter :: subname = '(calc_L2norm_squared)' + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + squared ! temporary array for summed squared components - !----------------------------------------------------------------- - ! compute squared l^2 norm of vector grid function (tpu,tpv) - !----------------------------------------------------------------- + character(len=*), parameter :: subname = '(global_norm)' - L2norm = c0 + norm = sqrt(global_dot_product (nx_block , ny_block , & + icellU , & + indxUi , indxUj , & + field_x , field_y , & + field_x , field_y )) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + end function global_norm - L2norm = L2norm + tpu(i,j)**2 + tpv(i,j)**2 - enddo ! ij +!======================================================================= + +! Compute global dot product of two grid vectors, each split into X and Y components + + function global_dot_product (nx_block , ny_block , & + icellU , & + indxUi , indxUj , & + vector1_x , vector1_y, & + vector2_x , vector2_y) & + result(dot_product) + + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks + use ice_fileunits, only: bfbflag + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + integer (kind=int_kind), dimension (max_blocks), intent(in) :: & + icellU ! total count when iceumask is true + + integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & + vector1_x , & ! x-component of first vector + vector1_y , & ! y-component of first vector + vector2_x , & ! x-component of second vector + vector2_y ! y-component of second vector + + real (kind=dbl_kind) :: & + dot_product ! l^2 norm of vector field + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij, iblk + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + prod ! temporary array + + real (kind=dbl_kind), dimension(max_blocks) :: & + dot ! temporary scalar for accumulating the result + + character(len=*), parameter :: subname = '(global_dot_product)' + + prod = c0 + dot = c0 + + !$OMP PARALLEL DO PRIVATE(i, j, ij, iblk) + do iblk = 1, nblocks + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) + prod(i,j,iblk) = vector1_x(i,j,iblk)*vector2_x(i,j,iblk) + vector1_y(i,j,iblk)*vector2_y(i,j,iblk) + dot(iblk) = dot(iblk) + prod(i,j,iblk) + enddo ! ij + enddo + !$OMP END PARALLEL DO + + ! Use local summation result unless bfbflag is active + if (bfbflag == 'off') then + dot_product = global_sum(sum(dot), distrb_info) + else + dot_product = global_sum(prod, distrb_info, field_loc_NEcorner) + endif - end subroutine calc_L2norm_squared + end function global_dot_product !======================================================================= @@ -2535,8 +2571,8 @@ end subroutine calc_L2norm_squared subroutine arrays_to_vec (nx_block, ny_block , & nblocks , max_blocks, & - icellu , ntot , & - indxui , indxuj , & + icellU , ntot , & + indxUi , indxUj , & tpu , tpv , & outvec) @@ -2547,11 +2583,11 @@ subroutine arrays_to_vec (nx_block, ny_block , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block, max_blocks), intent(in) :: & tpu , & ! x-component of vector @@ -2575,9 +2611,9 @@ subroutine arrays_to_vec (nx_block, ny_block , & tot = 0 do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) tot = tot + 1 outvec(tot) = tpu(i, j, iblk) tot = tot + 1 @@ -2593,8 +2629,8 @@ end subroutine arrays_to_vec subroutine vec_to_arrays (nx_block, ny_block , & nblocks , max_blocks, & - icellu , ntot , & - indxui , indxuj , & + icellU , ntot , & + indxUi , indxUj , & invec , & tpu , tpv) @@ -2605,11 +2641,11 @@ subroutine vec_to_arrays (nx_block, ny_block , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (ntot), intent(in) :: & invec ! input 1D vector @@ -2634,9 +2670,9 @@ subroutine vec_to_arrays (nx_block, ny_block , & tot = 0 do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) tot = tot + 1 tpu(i, j, iblk) = invec(tot) tot = tot + 1 @@ -2780,6 +2816,7 @@ subroutine fgmres (zetax2 , etax2 , & real (kind=dbl_kind) :: & norm_residual , & ! current L^2 norm of residual vector inverse_norm , & ! inverse of the norm of a vector + norm_rhs , & ! L^2 norm of right-hand-side vector nu, t ! local temporary values integer (kind=int_kind) :: & @@ -2819,14 +2856,25 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_x = c0 arnoldi_basis_y = c0 + ! solution is zero if RHS is zero + norm_rhs = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + bx , by ) + if (norm_rhs == c0) then + solx = bx + soly = by + return + endif + ! Residual of the initial iterate !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -2838,8 +2886,8 @@ subroutine fgmres (zetax2 , etax2 , & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & workspace_x(:,:,iblk), workspace_y(:,:,iblk), & arnoldi_basis_x (:,:,iblk, 1), & @@ -2850,18 +2898,11 @@ subroutine fgmres (zetax2 , etax2 , & ! Start outer (restarts) loop do ! Compute norm of initial residual - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & - arnoldi_basis_x(:,:,iblk, 1) , & - arnoldi_basis_y(:,:,iblk, 1) , & - norm_squared(iblk)) - - enddo - !$OMP END PARALLEL DO - norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) + norm_residual = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, 1), & + arnoldi_basis_y(:,:,:, 1)) if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & @@ -2877,9 +2918,9 @@ subroutine fgmres (zetax2 , etax2 , & inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm @@ -2907,6 +2948,7 @@ subroutine fgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & arnoldi_basis_x(:,:,:,initer), & arnoldi_basis_y(:,:,:,initer), & diagx , diagy , & @@ -2931,9 +2973,9 @@ subroutine fgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -2955,17 +2997,11 @@ subroutine fgmres (zetax2 , etax2 , & hessenberg) ! Compute norm of new Arnoldi vector and update Hessenberg matrix - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk) , & - arnoldi_basis_x(:,:,iblk, nextit), & - arnoldi_basis_y(:,:,iblk, nextit), & - norm_squared(iblk)) - enddo - !$OMP END PARALLEL DO - hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) + hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then @@ -2973,9 +3009,9 @@ subroutine fgmres (zetax2 , etax2 , & inverse_norm = c1 / hessenberg(nextit,initer) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm @@ -3039,9 +3075,9 @@ subroutine fgmres (zetax2 , etax2 , & t = rhs_hess(it) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) solx(i, j, iblk) = solx(i, j, iblk) + t * orig_basis_x(i, j, iblk, it) soly(i, j, iblk) = soly(i, j, iblk) + t * orig_basis_y(i, j, iblk, it) @@ -3083,9 +3119,9 @@ subroutine fgmres (zetax2 , etax2 , & do it = 1, nextit !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) @@ -3109,6 +3145,7 @@ end subroutine fgmres subroutine pgmres (zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & bx , by , & diagx , diagy , & tolerance, maxinner, & @@ -3116,6 +3153,11 @@ subroutine pgmres (zetax2 , etax2 , & solx , soly , & nbiter) + use ice_boundary, only: ice_HaloUpdate + use ice_domain, only: maskhalo_dyn, halo_info + use ice_fileunits, only: bfbflag + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 ! etax2 = 2*eta (shear viscosity) @@ -3125,6 +3167,9 @@ subroutine pgmres (zetax2 , etax2 , & Cb , & ! seabed stress coefficient umassdti ! mass of U-cell/dte (kg/m^2 s) + type (ice_halo), intent(in) :: & + halo_info_mask ! ghost cell update info for masked halo + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & bx , & ! Right hand side of the linear system (x components) by ! Right hand side of the linear system (y components) @@ -3217,9 +3262,9 @@ subroutine pgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -3231,8 +3276,8 @@ subroutine pgmres (zetax2 , etax2 , & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & workspace_x(:,:,iblk), workspace_y(:,:,iblk), & arnoldi_basis_x (:,:,iblk, 1), & @@ -3243,18 +3288,11 @@ subroutine pgmres (zetax2 , etax2 , & ! Start outer (restarts) loop do ! Compute norm of initial residual - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk), & - arnoldi_basis_x(:,:,iblk, 1), & - arnoldi_basis_y(:,:,iblk, 1), & - norm_squared(iblk)) - - enddo - !$OMP END PARALLEL DO - norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) + norm_residual = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, 1), & + arnoldi_basis_y(:,:,:, 1)) if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & @@ -3270,9 +3308,9 @@ subroutine pgmres (zetax2 , etax2 , & inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm @@ -3301,21 +3339,36 @@ subroutine pgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & arnoldi_basis_x(:,:,:,initer), & arnoldi_basis_y(:,:,:,initer), & diagx , diagy , & precond_type, & workspace_x , workspace_y) - ! NOTE: halo updates for (workspace_x, workspace_y) - ! are skipped here for efficiency since this is just a preconditioner + ! Update workspace with boundary values + ! NOTE: skipped for efficiency since this is just a preconditioner + ! unless bfbflag is active + if (bfbflag /= 'off') then + call stack_fields(workspace_x, workspace_y, fld2) + call ice_timer_start(timer_bound) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + call ice_timer_stop(timer_bound) + call unstack_fields(fld2, workspace_x, workspace_y) + endif !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -3337,17 +3390,11 @@ subroutine pgmres (zetax2 , etax2 , & hessenberg) ! Compute norm of new Arnoldi vector and update Hessenberg matrix - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk) , & - arnoldi_basis_x(:,:,iblk, nextit), & - arnoldi_basis_y(:,:,iblk, nextit), & - norm_squared(iblk)) - enddo - !$OMP END PARALLEL DO - hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) + hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then @@ -3355,9 +3402,9 @@ subroutine pgmres (zetax2 , etax2 , & inverse_norm = c1 / hessenberg(nextit,initer) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm @@ -3423,9 +3470,9 @@ subroutine pgmres (zetax2 , etax2 , & t = rhs_hess(it) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + t * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_y(i, j, iblk) + t * arnoldi_basis_y(i, j, iblk, it) @@ -3438,6 +3485,7 @@ subroutine pgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & workspace_x , workspace_y, & diagx , diagy , & precond_type, & @@ -3479,9 +3527,9 @@ subroutine pgmres (zetax2 , etax2 , & do it = 1, nextit !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) @@ -3504,6 +3552,7 @@ end subroutine pgmres subroutine precondition(zetax2 , etax2, & Cb , vrel , & umassdti , & + halo_info_mask, & vx , vy , & diagx , diagy, & precond_type, & @@ -3518,6 +3567,9 @@ subroutine precondition(zetax2 , etax2, & Cb , & ! seabed stress coefficient umassdti ! mass of U-cell/dte (kg/m^2 s) + type (ice_halo), intent(in) :: & + halo_info_mask ! ghost cell update info for masked halo + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & vx , & ! input vector (x components) vy ! input vector (y components) @@ -3560,9 +3612,9 @@ subroutine precondition(zetax2 , etax2, & elseif (precond_type == 'diag') then ! Jacobi preconditioner (diagonal) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) wx(i,j,iblk) = vx(i,j,iblk) / diagx(i,j,iblk) wy(i,j,iblk) = vy(i,j,iblk) / diagy(i,j,iblk) @@ -3579,6 +3631,7 @@ subroutine precondition(zetax2 , etax2, & call pgmres (zetax2, etax2 , & Cb , vrel , & umassdti , & + halo_info_mask , & vx , vy , & diagx , diagy , & tolerance, maxinner, & @@ -3626,46 +3679,27 @@ subroutine orthogonalize(ortho_type , initer , & ij , & ! compressed index i, j ! grid indices - real (kind=dbl_kind), dimension (max_blocks) :: & - local_dot ! local array value to accumulate dot product of grid function over blocks - - real (kind=dbl_kind), dimension(maxinner) :: & - dotprod_local ! local array to accumulate several dot product computations - character(len=*), parameter :: subname = '(orthogonalize)' if (trim(ortho_type) == 'cgs') then ! Classical Gram-Schmidt ! Classical Gram-Schmidt orthogonalisation process ! First loop of Gram-Schmidt (compute coefficients) - dotprod_local = c0 do it = 1, initer - local_dot = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) - - local_dot(iblk) = local_dot(iblk) + & - (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & - (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - dotprod_local(it) = sum(local_dot) + hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, it) , & + arnoldi_basis_y(:,:,:, it) , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) end do - - hessenberg(1:initer, initer) = global_allreduce_sum(dotprod_local(1:initer), distrb_info) - ! Second loop of Gram-Schmidt (orthonormalize) do it = 1, initer !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) @@ -3678,28 +3712,19 @@ subroutine orthogonalize(ortho_type , initer , & elseif (trim(ortho_type) == 'mgs') then ! Modified Gram-Schmidt ! Modified Gram-Schmidt orthogonalisation process do it = 1, initer - local_dot = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) - - local_dot(iblk) = local_dot(iblk) + & - (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & - (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - hessenberg(it,initer) = global_sum(sum(local_dot), distrb_info) + hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, it) , & + arnoldi_basis_y(:,:,:, it) , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 43fe5af13..30fe546e0 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -1534,13 +1534,8 @@ subroutine state_to_work (nx_block, ny_block, & integer (kind=int_kind) :: & nt_alvl, nt_apnd, nt_fbri -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: & - tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: & tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: & i, j, n, it, & ! counting indices @@ -1548,13 +1543,8 @@ subroutine state_to_work (nx_block, ny_block, & character(len=*), parameter :: subname = '(state_to_work)' -#ifdef UNDEPRECATE_CESMPONDS - call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) -#else call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo) -#endif call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_apnd_out=nt_apnd, & nt_fbri_out=nt_fbri) call icepack_warnings_flush(nu_diag) @@ -1613,13 +1603,8 @@ subroutine state_to_work (nx_block, ny_block, & * trcrn(i,j,it ,n) enddo enddo -#ifdef UNDEPRECATE_CESMPONDS - elseif (trcr_depend(it) == 2+nt_apnd .and. & - tr_pond_cesm .or. tr_pond_topo) then -#else elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_topo) then -#endif do j = 1, ny_block do i = 1, nx_block works(i,j,narrays+it) = aicen(i,j ,n) & diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 6fd037b7b..286a51711 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -633,6 +633,7 @@ subroutine horizontal_remap (dt, ntrace, & endif ! nghost + ! tcraig, this OMP loop sometimes fails with cce/14.0.3, compiler bug?? !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,n, & !$OMP edgearea_e,edgearea_n,edge,iflux,jflux, & !$OMP xp,yp,indxing,indxjng,mflxe,mflxn, & diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index a7e5aa584..8d190753e 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -35,6 +35,16 @@ module ice_flux ! Dynamics component ! All variables are assumed to be on the atm or ocn thermodynamic ! grid except as noted + ! + ! scale_fluxes divides several of these by aice "in place", so + ! the state of some of these variables is not well defined. In the + ! future, we need to refactor and add "_iavg" versions of the + ! fields to clearly differentiate fields that have been divided + ! by aice and others that are not. The challenge is that we need + ! to go thru each field carefully to see which version is used. + ! For instance, in diagnostics, there are places where these + ! fields are multiplied by aice to compute things properly. + ! strocn[x,y]T_iavg is the first field defined using _iavg. !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -56,8 +66,8 @@ module ice_flux ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction at T points, per ice fraction - strocnyT ! ice-ocean stress, y-direction at T points, per ice fraction + strocnxT_iavg, & ! ice-ocean stress, x-direction at T points, per ice fraction (scaled flux) + strocnyT_iavg ! ice-ocean stress, y-direction at T points, per ice fraction (scaled flux) ! diagnostic @@ -389,8 +399,8 @@ subroutine alloc_flux hwater (nx_block,ny_block,max_blocks), & ! water depth for seabed stress calc (landfast ice) strairxT (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction strairyT (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction - strocnxT (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction - strocnyT (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction + strocnxT_iavg(nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction, per ice area + strocnyT_iavg(nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction, per ice area sig1 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sig2 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sigP (nx_block,ny_block,max_blocks), & ! internal ice pressure (N/m) @@ -765,8 +775,8 @@ subroutine init_coupler_flux ! fluxes sent to ocean !----------------------------------------------------------------- - strocnxT(:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) - strocnyT(:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) + strocnxT_iavg (:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) + strocnyT_iavg (:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) fresh (:,:,:) = c0 fsalt (:,:,:) = c0 fpond (:,:,:) = c0 diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index edff03b9f..ff79778c5 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -22,7 +22,7 @@ module ice_forcing use ice_boundary, only: ice_HaloUpdate use ice_blocks, only: nx_block, ny_block use ice_domain, only: halo_info - use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global + use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global, nfreq use ice_communicate, only: my_task, master_task use ice_calendar, only: istep, istep1, & msec, mday, mmonth, myear, yday, daycal, & @@ -116,6 +116,9 @@ module ice_forcing topmelt_data, & botmelt_data + real (kind=dbl_kind), dimension(:,:,:,:,:), allocatable :: & + wave_spectrum_data ! field values at 2 temporal data points + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf @@ -229,6 +232,7 @@ subroutine alloc_forcing ocn_frc_m(nx_block,ny_block, max_blocks,nfld,12), & ! ocn data for 12 months topmelt_file(ncat), & botmelt_file(ncat), & + wave_spectrum_data(nx_block,ny_block,nfreq,2,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice('(alloc_forcing): Out of Memory') @@ -1527,6 +1531,44 @@ subroutine interpolate_data (field_data, field) end subroutine interpolate_data +!======================================================================= + + subroutine interpolate_wavespec_data (field_data, field) + +! Linear interpolation + +! author: Elizabeth C. Hunke, LANL + + use ice_domain, only: nblocks + + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,2,max_blocks), intent(in) :: & + field_data ! 2 values used for interpolation + + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,max_blocks), intent(out) :: & + field ! interpolated field + + ! local variables + + integer (kind=int_kind) :: i,j, iblk, freq + + character(len=*), parameter :: subname = '(interpolate data)' + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + do freq = 1, nfreq + field(i,j,freq,iblk) = c1intp * field_data(i,j,freq,1,iblk) & + + c2intp * field_data(i,j,freq,2,iblk) + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine interpolate_wavespec_data + + !======================================================================= subroutine file_year (data_file, yr) @@ -4129,8 +4171,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call grid_average_X2Y('F',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') - call grid_average_X2Y('F',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') + call grid_average_X2Y('A',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') + call grid_average_X2Y('A',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') enddo ! month loop enddo ! field loop @@ -4373,7 +4415,7 @@ subroutine ocn_data_hadgem(dt) use ice_domain, only: nblocks use ice_domain_size, only: max_blocks use ice_flux, only: sst, uocn, vocn - use ice_grid, only: grid_average_X2Y, ANGLET + use ice_grid, only: ANGLET real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -5566,7 +5608,6 @@ subroutine get_wave_spec use ice_arrays_column, only: wave_spectrum, & dwavefreq, wavefreq use ice_constants, only: c0 - use ice_domain_size, only: nfreq use ice_timers, only: ice_timer_start, ice_timer_stop, timer_fsd ! local variables @@ -5592,7 +5633,6 @@ subroutine get_wave_spec ! if no wave data is provided, wave_spectrum is zero everywhere wave_spectrum(:,:,:,:) = c0 - wave_spec_dir = ocn_data_dir debug_forcing = .false. ! wave spectrum and frequencies @@ -5610,10 +5650,7 @@ subroutine get_wave_spec file=__FILE__, line=__LINE__) else #ifdef USE_NETCDF - call ice_open_nc(wave_spec_file,fid) - call ice_read_nc_xyf (fid, 1, 'efreq', wave_spectrum(:,:,:,:), debug_forcing, & - field_loc_center, field_type_scalar) - call ice_close_nc(fid) + call wave_spec_data #else write (nu_diag,*) "wave spectrum file not available, requires cpp USE_NETCDF" write (nu_diag,*) "wave spectrum file not available, using default profile" @@ -5628,6 +5665,146 @@ subroutine get_wave_spec end subroutine get_wave_spec +!======================================================================= +! +! Read in wave spectrum forcing as a function of time. 6 hourly +! LR started working from JRA55_data routine +! Changed fields, and changed 3 hourly to 6 hourly +! + subroutine wave_spec_data + + use ice_blocks, only: block, get_block + use ice_global_reductions, only: global_minval, global_maxval + use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_arrays_column, only: wave_spectrum, & + dwavefreq, wavefreq + use ice_read_write, only: ice_read_nc_xyf + use ice_grid, only: hm, tlon, tlat, tmask, umask + use ice_calendar, only: days_per_year, use_leap_years + + integer (kind=int_kind) :: & + ncid , & ! netcdf file id + i, j, freq , & + ixm,ixx,ixp , & ! record numbers for neighboring months + recnum , & ! record number + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth , & ! middle day of month + dataloc , & ! = 1 for data located in middle of time interval + ! = 2 for date located at end of time interval + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + yr ! current forcing year + + real (kind=dbl_kind) :: & + sec6hr , & ! number of seconds in 3 hours + secday , & ! number of seconds in day + vmin, vmax + + logical (kind=log_kind) :: readm, read6,debug_n_d + + type (block) :: & + this_block ! block information for current block + + real(kind=dbl_kind), dimension(nfreq) :: & + wave_spectrum_profile ! wave spectrum + + character(len=64) :: fieldname !netcdf field name + character(char_len_long) :: spec_file + character(char_len) :: wave_spec_type + logical (kind=log_kind) :: wave_spec + character(len=*), parameter :: subname = '(wave_spec_data)' + + + + debug_n_d = .false. !usually false + + call icepack_query_parameters(secday_out=secday) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_init_wave(nfreq, & + wave_spectrum_profile, & + wavefreq, dwavefreq) + + + !spec_file = trim(ocn_data_dir)//'/'//trim(wave_spec_file) + spec_file = trim(wave_spec_file) + wave_spectrum_data = c0 + wave_spectrum = c0 + yr = fyear ! current year + !------------------------------------------------------------------- + ! 6-hourly data + ! + ! Assume that the 6-hourly value is located at the end of the + ! 6-hour period. This is the convention for NCEP reanalysis data. + ! E.g. record 1 gives conditions at 6 am GMT on 1 January. + !------------------------------------------------------------------- + + dataloc = 2 ! data located at end of interval + sec6hr = secday/c4 ! seconds in 6 hours + !maxrec = 2920 ! 365*8; for leap years = 366*8 + + if (use_leap_years) days_per_year = 366 !overrides setting of 365 in ice_calendar + maxrec = days_per_year*4 + + if(days_per_year == 365 .and. (mod(yr, 4) == 0)) then + call abort_ice('days_per_year should be set to 366 for leap years') + end if + + ! current record number + recnum = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec6hr) + + ! Compute record numbers for surrounding data (2 on each side) + + ixm = mod(recnum+maxrec-2,maxrec) + 1 + ixx = mod(recnum-1, maxrec) + 1 + + ! Compute interpolation coefficients + ! If data is located at the end of the time interval, then the + ! data value for the current record goes in slot 2 + + recslot = 2 + ixp = -99 + call interp_coeff (recnum, recslot, sec6hr, dataloc) + + ! Read + read6 = .false. + if (istep==1 .or. oldrecnum .ne. recnum) read6 = .true. + !------------------------------------------------------------------- + ! File is NETCDF + ! file variable names are: + ! efreq (wave spectrum, energy as a function of wave frequency UNITS) + !------------------------------------------------------------------- + call ice_open_nc(spec_file,ncid) + + call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,1,:),debug_n_d, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,2,:),debug_n_d, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + call ice_close_nc(ncid) + + + ! Interpolate + call interpolate_wavespec_data (wave_spectrum_data, wave_spectrum) + + ! Save record number + oldrecnum = recnum + + if (local_debug) then + if (my_task == master_task) write (nu_diag,*) & + 'wave_spec_data ',spec_file + if (my_task.eq.master_task) & + write (nu_diag,*) 'maxrec',maxrec + write (nu_diag,*) 'days_per_year', days_per_year + + endif ! local debug + + end subroutine wave_spec_data + !======================================================================= ! initial snow aging lookup table diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedynB/general/ice_forcing_bgc.F90 index fc440834c..2f07d05f1 100644 --- a/cicecore/cicedynB/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedynB/general/ice_forcing_bgc.F90 @@ -889,68 +889,116 @@ subroutine faero_optics ! this data is used in bulk aerosol treatment in dEdd radiation kaer_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) - 11580.61872, 5535.41835, 2793.79690, & - 25798.96479, 11536.03871, 4688.24207, & - 196.49772, 204.14078, 214.42287, & - 2665.85867, 2256.71027, 820.36024, & - 840.78295, 1028.24656, 1163.03298, & - 387.51211, 414.68808, 450.29814/), & +! 11580.61872, 5535.41835, 2793.79690, & +! 25798.96479, 11536.03871, 4688.24207, & +! 196.49772, 204.14078, 214.42287, & +! 2665.85867, 2256.71027, 820.36024, & +! 840.78295, 1028.24656, 1163.03298, & +! 387.51211, 414.68808, 450.29814/), & + 11580.61872_dbl_kind, 5535.41835_dbl_kind, 2793.79690_dbl_kind, & + 25798.96479_dbl_kind, 11536.03871_dbl_kind, 4688.24207_dbl_kind, & + 196.49772_dbl_kind, 204.14078_dbl_kind, 214.42287_dbl_kind, & + 2665.85867_dbl_kind, 2256.71027_dbl_kind, 820.36024_dbl_kind, & + 840.78295_dbl_kind, 1028.24656_dbl_kind, 1163.03298_dbl_kind, & + 387.51211_dbl_kind, 414.68808_dbl_kind, 450.29814_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) waer_tab = reshape((/ & ! aerosol single scatter albedo (fraction) - 0.29003, 0.17349, 0.06613, & - 0.51731, 0.41609, 0.21324, & - 0.84467, 0.94216, 0.95666, & - 0.97764, 0.99402, 0.98552, & - 0.94146, 0.98527, 0.99093, & - 0.90034, 0.96543, 0.97678/), & +! 0.29003, 0.17349, 0.06613, & +! 0.51731, 0.41609, 0.21324, & +! 0.84467, 0.94216, 0.95666, & +! 0.97764, 0.99402, 0.98552, & +! 0.94146, 0.98527, 0.99093, & +! 0.90034, 0.96543, 0.97678/), & + 0.29003_dbl_kind, 0.17349_dbl_kind, 0.06613_dbl_kind, & + 0.51731_dbl_kind, 0.41609_dbl_kind, 0.21324_dbl_kind, & + 0.84467_dbl_kind, 0.94216_dbl_kind, 0.95666_dbl_kind, & + 0.97764_dbl_kind, 0.99402_dbl_kind, 0.98552_dbl_kind, & + 0.94146_dbl_kind, 0.98527_dbl_kind, 0.99093_dbl_kind, & + 0.90034_dbl_kind, 0.96543_dbl_kind, 0.97678_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) gaer_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) - 0.35445, 0.19838, 0.08857, & - 0.52581, 0.32384, 0.14970, & - 0.83162, 0.78306, 0.74375, & - 0.68861, 0.70836, 0.54171, & - 0.70239, 0.66115, 0.71983, & - 0.78734, 0.73580, 0.64411/), & +! 0.35445, 0.19838, 0.08857, & +! 0.52581, 0.32384, 0.14970, & +! 0.83162, 0.78306, 0.74375, & +! 0.68861, 0.70836, 0.54171, & +! 0.70239, 0.66115, 0.71983, & +! 0.78734, 0.73580, 0.64411/), & + 0.35445_dbl_kind, 0.19838_dbl_kind, 0.08857_dbl_kind, & + 0.52581_dbl_kind, 0.32384_dbl_kind, 0.14970_dbl_kind, & + 0.83162_dbl_kind, 0.78306_dbl_kind, 0.74375_dbl_kind, & + 0.68861_dbl_kind, 0.70836_dbl_kind, 0.54171_dbl_kind, & + 0.70239_dbl_kind, 0.66115_dbl_kind, 0.71983_dbl_kind, & + 0.78734_dbl_kind, 0.73580_dbl_kind, 0.64411_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) ! this data is used in MODAL AEROSOL treatment in dEdd radiation kaer_bc_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) - 12955.44732, 5946.89461, 2772.33366, & - 12085.30664, 7438.83131, 3657.13084, & - 9753.99698, 7342.87139, 4187.79304, & - 7815.74879, 6659.65096, 4337.98863, & - 6381.28194, 5876.78408, 4254.65054, & - 5326.93163, 5156.74532, 4053.66581, & - 4538.09763, 4538.60875, 3804.10884, & - 3934.17604, 4020.20799, 3543.27199, & - 3461.20656, 3587.80962, 3289.98060, & - 3083.03396, 3226.27231, 3052.91441/), & +! 12955.44732, 5946.89461, 2772.33366, & +! 12085.30664, 7438.83131, 3657.13084, & +! 9753.99698, 7342.87139, 4187.79304, & +! 7815.74879, 6659.65096, 4337.98863, & +! 6381.28194, 5876.78408, 4254.65054, & +! 5326.93163, 5156.74532, 4053.66581, & +! 4538.09763, 4538.60875, 3804.10884, & +! 3934.17604, 4020.20799, 3543.27199, & +! 3461.20656, 3587.80962, 3289.98060, & +! 3083.03396, 3226.27231, 3052.91441/), & + 12955.4473151973_dbl_kind, 5946.89461205564_dbl_kind, 2772.33366387720_dbl_kind, & + 12085.3066388712_dbl_kind, 7438.83131367992_dbl_kind, 3657.13084442081_dbl_kind, & + 9753.99697536893_dbl_kind, 7342.87139082553_dbl_kind, 4187.79303607928_dbl_kind, & + 7815.74879345131_dbl_kind, 6659.65096365965_dbl_kind, 4337.98863414228_dbl_kind, & + 6381.28194381772_dbl_kind, 5876.78408231865_dbl_kind, 4254.65053724305_dbl_kind, & + 5326.93163497508_dbl_kind, 5156.74531505734_dbl_kind, 4053.66581550147_dbl_kind, & + 4538.09762614960_dbl_kind, 4538.60874501597_dbl_kind, 3804.10884202567_dbl_kind, & + 3934.17604000777_dbl_kind, 4020.20798667897_dbl_kind, 3543.27199302277_dbl_kind, & + 3461.20655708248_dbl_kind, 3587.80961820605_dbl_kind, 3289.98060303894_dbl_kind, & + 3083.03396032095_dbl_kind, 3226.27231329114_dbl_kind, 3052.91440681137_dbl_kind/), & (/icepack_nspint,10/)) waer_bc_tab = reshape((/ & ! aerosol single scatter albedo (fraction) - 0.26107, 0.15861, 0.06535, & - 0.37559, 0.30318, 0.19483, & - 0.42224, 0.36913, 0.27875, & - 0.44777, 0.40503, 0.33026, & - 0.46444, 0.42744, 0.36426, & - 0.47667, 0.44285, 0.38827, & - 0.48635, 0.45428, 0.40617, & - 0.49440, 0.46328, 0.42008, & - 0.50131, 0.47070, 0.43128, & - 0.50736, 0.47704, 0.44056/), & +! 0.26107, 0.15861, 0.06535, & +! 0.37559, 0.30318, 0.19483, & +! 0.42224, 0.36913, 0.27875, & +! 0.44777, 0.40503, 0.33026, & +! 0.46444, 0.42744, 0.36426, & +! 0.47667, 0.44285, 0.38827, & +! 0.48635, 0.45428, 0.40617, & +! 0.49440, 0.46328, 0.42008, & +! 0.50131, 0.47070, 0.43128, & +! 0.50736, 0.47704, 0.44056/), & + 0.261071919959011_dbl_kind, 0.158608047940651_dbl_kind, 0.0653546447770291_dbl_kind, & + 0.375593873543050_dbl_kind, 0.303181671502553_dbl_kind, 0.194832290545495_dbl_kind, & + 0.422240383488477_dbl_kind, 0.369134186611324_dbl_kind, 0.278752556671685_dbl_kind, & + 0.447772153910671_dbl_kind, 0.405033725319593_dbl_kind, 0.330260831965086_dbl_kind, & + 0.464443094570456_dbl_kind, 0.427439117980081_dbl_kind, 0.364256689383418_dbl_kind, & + 0.476668995985241_dbl_kind, 0.442854173154887_dbl_kind, 0.388270470928338_dbl_kind, & + 0.486347881475941_dbl_kind, 0.454284736567521_dbl_kind, 0.406167596922937_dbl_kind, & + 0.494397834153785_dbl_kind, 0.463279526357470_dbl_kind, 0.420084410794128_dbl_kind, & + 0.501307856563459_dbl_kind, 0.470696914968199_dbl_kind, 0.431284889617716_dbl_kind, & + 0.507362336297419_dbl_kind, 0.477038272961243_dbl_kind, 0.440559363958571_dbl_kind/), & (/icepack_nspint,10/)) gaer_bc_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) - 0.28328, 0.19644, 0.10498, & - 0.44488, 0.32615, 0.19612, & - 0.54724, 0.41611, 0.26390, & - 0.61711, 0.48475, 0.31922, & - 0.66673, 0.53923, 0.36632, & - 0.70296, 0.58337, 0.40732, & - 0.73002, 0.61960, 0.44344, & - 0.75064, 0.64959, 0.47551, & - 0.76663, 0.67461, 0.50415, & - 0.77926, 0.69561, 0.52981/),& +! 0.28328, 0.19644, 0.10498, & +! 0.44488, 0.32615, 0.19612, & +! 0.54724, 0.41611, 0.26390, & +! 0.61711, 0.48475, 0.31922, & +! 0.66673, 0.53923, 0.36632, & +! 0.70296, 0.58337, 0.40732, & +! 0.73002, 0.61960, 0.44344, & +! 0.75064, 0.64959, 0.47551, & +! 0.76663, 0.67461, 0.50415, & +! 0.77926, 0.69561, 0.52981/),& + 0.283282988564031_dbl_kind, 0.196444209821980_dbl_kind, 0.104976473902976_dbl_kind, & + 0.444877326083453_dbl_kind, 0.326147707342261_dbl_kind, 0.196121968923488_dbl_kind, & + 0.547243414035631_dbl_kind, 0.416106187964493_dbl_kind, 0.263903486903711_dbl_kind, & + 0.617111563012282_dbl_kind, 0.484745531707601_dbl_kind, 0.319218974395050_dbl_kind, & + 0.666728525631754_dbl_kind, 0.539228555802301_dbl_kind, 0.366323180358996_dbl_kind, & + 0.702956870835387_dbl_kind, 0.583372441336763_dbl_kind, 0.407316408184865_dbl_kind, & + 0.730016668453191_dbl_kind, 0.619595539349710_dbl_kind, 0.443436944107423_dbl_kind, & + 0.750635997128011_dbl_kind, 0.649589805870541_dbl_kind, 0.475512089138887_dbl_kind, & + 0.766634959089444_dbl_kind, 0.674609076223658_dbl_kind, 0.504145461809103_dbl_kind, & + 0.779256641759228_dbl_kind, 0.695614224933709_dbl_kind, 0.529805346632687_dbl_kind/), & (/icepack_nspint,10/)) bcenh(:,:,:) = c0 diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index c2cc986f8..45ae58d8b 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -73,15 +73,9 @@ subroutine input_data npt, dt, ndtd, days_per_year, use_leap_years, & write_ic, dump_last, npt_unit use ice_arrays_column, only: oceanmixed_ice -#ifdef UNDEPRECATE_CESMPONDS - use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & - restart_pond_cesm, restart_pond_lvl, restart_pond_topo, restart_aero, & - restart_fsd, restart_iso, restart_snow -#else use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & restart_pond_lvl, restart_pond_topo, restart_aero, & restart_fsd, restart_iso, restart_snow -#endif use ice_restart_shared, only: & restart, restart_ext, restart_coszen, restart_dir, restart_file, pointer_file, & runid, runtype, use_restart_time, restart_format, lcdf64 @@ -110,8 +104,8 @@ subroutine input_data grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & - dxrect, dyrect, & - pgl_global_ext + dxrect, dyrect, dxscale, dyscale, scale_dxdy, & + lonrefrect, latrefrect, pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & evp_algorithm, visc_method, & seabed_stress, seabed_stress_method, & @@ -165,23 +159,12 @@ subroutine input_data logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: numin, numax ! unit number limits -#ifdef UNDEPRECATE_CESMPONDS - integer (kind=int_kind) :: rpcesm, rplvl, rptopo -#else integer (kind=int_kind) :: rplvl, rptopo -#endif real (kind=dbl_kind) :: Cf, ksno, puny character (len=char_len) :: abort_list -#ifdef CESMCOUPLED - character (len=64) :: tmpstr -#endif character (len=128) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -212,6 +195,8 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & + dxscale, dyscale, lonrefrect, latrefrect, & + scale_dxdy, & close_boundaries, orca_halogrid, grid_ice, kmt_type, & grid_atm, grid_ocn @@ -219,9 +204,6 @@ subroutine input_data tr_iage, restart_age, & tr_FY, restart_FY, & tr_lvl, restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, restart_pond_cesm, & -#endif tr_pond_lvl, restart_pond_lvl, & tr_pond_topo, restart_pond_topo, & tr_snow, restart_snow, & @@ -398,6 +380,11 @@ subroutine input_data ksno = 0.3_dbl_kind ! snow thermal conductivity dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction + lonrefrect = -156.50_dbl_kind ! lower left corner lon for rectgrid + latrefrect = 71.35_dbl_kind ! lower left corner lat for rectgrid + scale_dxdy = .false. ! apply dxscale, dyscale to rectgrid + dxscale = 1.0_dbl_kind ! user defined rectgrid x-grid scale factor (e.g., 1.02) + dyscale = 1.0_dbl_kind ! user defined rectgrid y-grid scale factor (e.g., 1.02) close_boundaries = .false. ! true = set land on edges of grid seabed_stress= .false. ! if true, seabed stress for landfast is on seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep @@ -412,7 +399,7 @@ subroutine input_data deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) - maxits_nonlin = 4 ! max nb of iteration for nonlinear solver + maxits_nonlin = 10 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), ! 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -424,7 +411,7 @@ subroutine input_data monitor_pgmres = .false. ! print pgmres residual norm ortho_type = 'mgs' ! orthogonalization procedure 'cgs' or 'mgs' reltol_nonlin = 1e-8_dbl_kind ! nonlinear stopping criterion: reltol_nonlin*res(k=0) - reltol_fgmres = 1e-2_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) + reltol_fgmres = 1e-1_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: @@ -438,11 +425,7 @@ subroutine input_data conserv_check = .false. ! tracer conservation check shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' -#ifdef UNDEPRECATE_0LAYER - ktherm = 1 ! -1 = OFF, 0 = 0-layer, 1 = BL99, 2 = mushy thermo -#else ktherm = 1 ! -1 = OFF, 1 = BL99, 2 = mushy thermo -#endif conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) coriolis = 'latitude' ! latitude dependent, or 'constant' ssh_stress = 'geostrophic' ! 'geostrophic' or 'coupled' @@ -552,10 +535,6 @@ subroutine input_data restart_FY = .false. ! ice age restart tr_lvl = .false. ! level ice restart_lvl = .false. ! level ice restart -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm = .false. ! CESM melt ponds - restart_pond_cesm = .false. ! melt ponds restart -#endif tr_pond_lvl = .false. ! level-ice melt ponds restart_pond_lvl = .false. ! melt ponds restart tr_pond_topo = .false. ! explicit melt ponds (topographic) @@ -853,6 +832,11 @@ subroutine input_data call broadcast_scalar(grid_format, master_task) call broadcast_scalar(dxrect, master_task) call broadcast_scalar(dyrect, master_task) + call broadcast_scalar(scale_dxdy, master_task) + call broadcast_scalar(dxscale, master_task) + call broadcast_scalar(dyscale, master_task) + call broadcast_scalar(lonrefrect, master_task) + call broadcast_scalar(latrefrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) call broadcast_scalar(grid_ice, master_task) @@ -968,6 +952,7 @@ subroutine input_data call broadcast_scalar(albsnowi, master_task) call broadcast_scalar(ahmax, master_task) call broadcast_scalar(atmbndy, master_task) + call broadcast_scalar(default_season, master_task) call broadcast_scalar(fyear_init, master_task) call broadcast_scalar(ycycle, master_task) call broadcast_scalar(atm_data_format, master_task) @@ -1021,10 +1006,6 @@ subroutine input_data call broadcast_scalar(restart_FY, master_task) call broadcast_scalar(tr_lvl, master_task) call broadcast_scalar(restart_lvl, master_task) -#ifdef UNDEPRECATE_CESMPONDS - call broadcast_scalar(tr_pond_cesm, master_task) - call broadcast_scalar(restart_pond_cesm, master_task) -#endif call broadcast_scalar(tr_pond_lvl, master_task) call broadcast_scalar(restart_pond_lvl, master_task) call broadcast_scalar(tr_pond_topo, master_task) @@ -1117,9 +1098,6 @@ subroutine input_data restart_age = .false. restart_fy = .false. restart_lvl = .false. -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm = .false. -#endif restart_pond_lvl = .false. restart_pond_topo = .false. restart_snow = .false. @@ -1236,29 +1214,15 @@ subroutine input_data endif endif -#ifdef UNDEPRECATE_CESMPONDS - rpcesm = 0 -#endif rplvl = 0 rptopo = 0 -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) rpcesm = 1 -#endif if (tr_pond_lvl ) rplvl = 1 if (tr_pond_topo) rptopo = 1 tr_pond = .false. ! explicit melt ponds -#ifdef UNDEPRECATE_CESMPONDS - if (rpcesm + rplvl + rptopo > 0) tr_pond = .true. -#else if (rplvl + rptopo > 0) tr_pond = .true. -#endif -#ifdef UNDEPRECATE_CESMPONDS - if (rpcesm + rplvl + rptopo > 1) then -#else if (rplvl + rptopo > 1) then -#endif if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: Must use only one melt pond scheme' endif @@ -1482,13 +1446,6 @@ subroutine input_data abort_list = trim(abort_list)//":16" endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and frzpnd=cesm' - abort_list = trim(abort_list)//":17" - endif -#endif - if (.not. tr_lvl) then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and tr_lvl=F' abort_list = trim(abort_list)//":18" @@ -1570,7 +1527,12 @@ subroutine input_data wave_spec = .false. if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. - + if (tr_fsd .and. (trim(wave_spec_type) == 'none')) then + if (my_task == master_task) then + write(nu_diag,*) subname//' WARNING: tr_fsd=T but wave_spec=F - not recommended' + endif + end if + ! compute grid locations for thermo, u and v fields grid_ice_thrm = 'T' @@ -1907,10 +1869,6 @@ subroutine input_data tmpstr2 = ' : Bitz and Lipscomb 1999 thermo' elseif (ktherm == 2) then tmpstr2 = ' : mushy-layer thermo' -#ifdef UNDEPRECATE_0LAYER - elseif (ktherm == 0) then - tmpstr2 = ' : zero-layer thermo' -#endif elseif (ktherm < 0) then tmpstr2 = ' : Thermodynamics disabled' else @@ -2062,18 +2020,18 @@ subroutine input_data if (wave_spec) then tmpstr2 = ' : use wave spectrum for floe size distribution' else - tmpstr2 = ' : floe size distribution does not use wave spectrum' + tmpstr2 = 'WARNING : floe size distribution does not use wave spectrum' endif write(nu_diag,1010) ' wave_spec = ', wave_spec,trim(tmpstr2) if (wave_spec) then if (trim(wave_spec_type) == 'none') then tmpstr2 = ' : no wave data provided, no wave-ice interactions' elseif (trim(wave_spec_type) == 'profile') then - tmpstr2 = ' : use fixed dummy wave spectrum for testing' + tmpstr2 = ' : use fixed dummy wave spectrum for testing, sea surface height generated using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'constant') then - tmpstr2 = ' : constant wave spectrum data file provided for testing' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'random') then - tmpstr2 = ' : wave data file provided, spectrum generated using random number' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using random number (multiple iterations of wave fracture to convergence)' else tmpstr2 = ' : unknown value' endif @@ -2091,14 +2049,7 @@ subroutine input_data write(nu_diag,*) ' ' write(nu_diag,*) ' Melt ponds' write(nu_diag,*) '--------------------------------' -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' - write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' - elseif (tr_pond_lvl) then -#else if (tr_pond_lvl) then -#endif write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' write(nu_diag,1000) ' dpscale = ', dpscale,' : time scale for flushing in permeable ice' @@ -2211,9 +2162,6 @@ subroutine input_data if (tr_lvl) write(nu_diag,1010) ' tr_lvl = ', tr_lvl,' : ridging related tracers' if (tr_pond_lvl) write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' if (tr_pond_topo) write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo,' : topo pond formulation' -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' -#endif if (tr_snow) write(nu_diag,1010) ' tr_snow = ', tr_snow,' : advanced snow physics' if (tr_iage) write(nu_diag,1010) ' tr_iage = ', tr_iage,' : chronological ice age' if (tr_FY) write(nu_diag,1010) ' tr_FY = ', tr_FY,' : first-year ice area' @@ -2338,9 +2286,6 @@ subroutine input_data write(nu_diag,1011) ' restart_age = ', restart_age write(nu_diag,1011) ' restart_FY = ', restart_FY write(nu_diag,1011) ' restart_lvl = ', restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - write(nu_diag,1011) ' restart_pond_cesm= ', restart_pond_cesm -#endif write(nu_diag,1011) ' restart_pond_lvl = ', restart_pond_lvl write(nu_diag,1011) ' restart_pond_topo= ', restart_pond_topo write(nu_diag,1011) ' restart_snow = ', restart_snow @@ -2439,11 +2384,7 @@ subroutine input_data call icepack_init_tracer_flags(tr_iage_in=tr_iage, tr_FY_in=tr_FY, & tr_lvl_in=tr_lvl, tr_iso_in=tr_iso, tr_aero_in=tr_aero, & tr_fsd_in=tr_fsd, tr_snow_in=tr_snow, tr_pond_in=tr_pond, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_in=tr_pond_cesm, tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) -#else tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) -#endif call icepack_init_tracer_sizes(ncat_in=ncat, nilyr_in=nilyr, nslyr_in=nslyr, nblyr_in=nblyr, & nfsd_in=nfsd, n_algae_in=n_algae, n_iso_in=n_iso, n_aero_in=n_aero, & n_DOC_in=n_DOC, n_DON_in=n_DON, & @@ -2500,18 +2441,10 @@ subroutine init_state it , & ! tracer index iblk ! block index -#ifdef UNDEPRECATE_0LAYER - logical (kind=log_kind) :: & - heat_capacity ! from icepack -#endif integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo -#endif logical (kind=log_kind) :: tr_snow, tr_fsd integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd @@ -2525,17 +2458,10 @@ subroutine init_state !----------------------------------------------------------------- -#ifdef UNDEPRECATE_0LAYER - call icepack_query_parameters(heat_capacity_out=heat_capacity) -#endif call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & -#else tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & -#endif tr_snow_out=tr_snow, tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & nt_qice_out=nt_qice, nt_qsno_out=nt_qsno, nt_iage_out=nt_iage, nt_fy_out=nt_fy, & @@ -2569,25 +2495,6 @@ subroutine init_state file=__FILE__, line=__LINE__) endif -#ifdef UNDEPRECATE_0LAYER - if (.not.heat_capacity) then - - if (nilyr > 1) then - write(nu_diag,*) subname//' ERROR: Must have nilyr = 1 if heat_capacity=F' - write(nu_diag,*) subname//' ERROR: nilyr =', nilyr - call abort_ice(error_message=subname//' Too many ice layers', & - file=__FILE__, line=__LINE__) - endif - - if (nslyr > 1) then - write(nu_diag,*) subname//' ERROR: Must have nslyr = 1 if heat_capacity=F' - write(nu_diag,*) subname//' ERROR: nslyr =', nslyr - call abort_ice(error_message=subname//' Too many snow layers', & - file=__FILE__, line=__LINE__) - endif - - endif ! heat_capacity = F -#endif endif ! my_task !----------------------------------------------------------------- @@ -2606,12 +2513,6 @@ subroutine init_state if (tr_FY) trcr_depend(nt_FY) = 0 ! area-weighted first-year ice area if (tr_lvl) trcr_depend(nt_alvl) = 0 ! level ice area if (tr_lvl) trcr_depend(nt_vlvl) = 1 ! level ice volume -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - trcr_depend(nt_apnd) = 0 ! melt pond area - trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth - endif -#endif if (tr_pond_lvl) then trcr_depend(nt_apnd) = 2+nt_alvl ! melt pond area trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth @@ -2673,12 +2574,6 @@ subroutine init_state nt_strata (it,2) = 0 enddo -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - n_trcr_strata(nt_hpnd) = 1 ! melt pond depth - nt_strata (nt_hpnd,1) = nt_apnd ! on melt pond area - endif -#endif if (tr_pond_lvl) then n_trcr_strata(nt_apnd) = 1 ! melt pond area nt_strata (nt_apnd,1) = nt_alvl ! on level ice area diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 4a3f83ce2..862f0a8bc 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -54,7 +54,8 @@ module ice_state real (kind=dbl_kind), dimension(:,:,:), allocatable, & public :: & - aice , & ! concentration of ice + aice , & ! concentration of ice on T grid + aiU , & ! concentration of ice on U grid vice , & ! volume per unit area of ice (m) vsno ! volume per unit area of snow (m) @@ -149,7 +150,8 @@ subroutine alloc_state file=__FILE__, line=__LINE__) allocate ( & - aice (nx_block,ny_block,max_blocks) , & ! concentration of ice + aice (nx_block,ny_block,max_blocks) , & ! concentration of ice T grid + aiU (nx_block,ny_block,max_blocks) , & ! concentration of ice U grid vice (nx_block,ny_block,max_blocks) , & ! volume per unit area of ice (m) vsno (nx_block,ny_block,max_blocks) , & ! volume per unit area of snow (m) aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water @@ -188,6 +190,10 @@ subroutine alloc_state trcr_base = c0 aicen = c0 aicen_init = c0 + vicen = c0 + vicen_init = c0 + vsnon = c0 + vsnon_init = c0 end subroutine alloc_state diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index b6f8741c0..8dd6fe49a 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -12,7 +12,14 @@ module ice_step_mod use ice_kinds_mod + use ice_blocks, only: block, get_block + use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c1000, c4, p25 + use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, & + field_type_scalar, field_type_vector + use ice_domain, only: halo_info, nblocks, blocks_ice + use ice_domain_size, only: max_blocks use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -39,6 +46,10 @@ module ice_step_mod step_snow, prep_radiation, step_radiation, ocean_mixed_layer, & update_state, biogeochemistry, step_dyn_wave, step_prep + real (kind=dbl_kind), dimension (:,:,:), allocatable :: & + uvelT_icep, & ! uvel for wind stress computation in icepack + vvelT_icep ! vvel for wind stress computation in icepack + !======================================================================= contains @@ -73,6 +84,13 @@ subroutine step_prep use ice_flux, only: uatm, vatm, uatmT, vatmT use ice_grid, only: grid_atm_dynu, grid_atm_dynv, grid_average_X2Y + use ice_state, only: uvel, vvel + + logical (kind=log_kind) :: & + highfreq ! highfreq flag + + logical (kind=log_kind), save :: & + first_call = .true. ! first call flag character(len=*), parameter :: subname = '(step_prep)' @@ -85,6 +103,26 @@ subroutine step_prep call grid_average_X2Y('S',uatm,grid_atm_dynu,uatmT,'T') call grid_average_X2Y('S',vatm,grid_atm_dynv,vatmT,'T') + !----------------------------------------------------------------- + ! Compute uvelT_icep, vvelT_icep + !----------------------------------------------------------------- + + if (first_call) then + allocate(uvelT_icep(nx_block,ny_block,max_blocks)) + allocate(vvelT_icep(nx_block,ny_block,max_blocks)) + uvelT_icep = c0 + vvelT_icep = c0 + endif + + call icepack_query_parameters(highfreq_out=highfreq) + + if (highfreq) then + call grid_average_X2Y('A', uvel, 'U', uvelT_icep, 'T') + call grid_average_X2Y('A', vvel, 'U', vvelT_icep, 'T') + endif + + first_call = .false. + end subroutine step_prep !======================================================================= @@ -95,8 +133,6 @@ end subroutine step_prep subroutine prep_radiation (iblk) - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & alvdr_ai, alvdf_ai, alidr_ai, alidf_ai, & @@ -188,14 +224,9 @@ subroutine step_therm1 (dt, iblk) hfreebd, hdraft, hridge, distrdg, hkeel, dkeel, lfloe, dfloe, & fswsfcn, fswintn, Sswabsn, Iswabsn, meltsliqn, meltsliq, & fswthrun, fswthrun_vdr, fswthrun_vdf, fswthrun_idr, fswthrun_idf - use ice_blocks, only: block, get_block -#ifdef CICE_IN_NEMO - use ice_blocks, only: nx_block, ny_block -#endif use ice_calendar, only: yday - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero - use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, fbot, Tbot, Tsnice, & + use ice_flux, only: frzmlt, sst, Tf, strocnxT_iavg, strocnyT_iavg, rside, fbot, Tbot, Tsnice, & meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & @@ -209,7 +240,7 @@ subroutine step_therm1 (dt, iblk) Qa_iso, Qref_iso, fiso_evap, HDO_ocn, H2_16O_ocn, H2_18O_ocn use ice_grid, only: lmask_n, lmask_s, tmask use ice_state, only: aice, aicen, aicen_init, vicen_init, & - vice, vicen, vsno, vsnon, trcrn, uvel, vvel, vsnon_init + vice, vicen, vsno, vsnon, trcrn, vsnon_init #ifdef CICE_IN_NEMO use ice_state, only: aice_init #endif @@ -243,16 +274,10 @@ subroutine step_therm1 (dt, iblk) nt_isosno, nt_isoice, nt_rsnw, nt_smice, nt_smliq logical (kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, & -#endif tr_pond_lvl, tr_pond_topo, calc_Tsfc, highfreq, tr_snow real (kind=dbl_kind) :: & - uvel_center, & ! cell-centered velocity, x component (m/s) - vvel_center, & ! cell-centered velocity, y component (m/s) puny ! a very small number real (kind=dbl_kind), dimension(n_aero,2,ncat) :: & @@ -275,11 +300,7 @@ subroutine step_therm1 (dt, iblk) call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_iso_out=tr_iso, & -#ifdef UNDEPRECATE_CESMPONDS - tr_aero_out=tr_aero, tr_pond_out=tr_pond, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_aero_out=tr_aero, tr_pond_out=tr_pond, & -#endif tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & tr_snow_out=tr_snow) call icepack_query_tracer_indices( & @@ -336,16 +357,6 @@ subroutine step_therm1 (dt, iblk) do j = jlo, jhi do i = ilo, ihi - if (highfreq) then ! include ice velocity in calculation of wind stress - uvel_center = p25*(uvel(i,j ,iblk) + uvel(i-1,j ,iblk) & ! cell-centered velocity - + uvel(i,j-1,iblk) + uvel(i-1,j-1,iblk)) ! assumes wind components - vvel_center = p25*(vvel(i,j ,iblk) + vvel(i-1,j ,iblk) & ! are also cell-centered - + vvel(i,j-1,iblk) + vvel(i-1,j-1,iblk)) - else - uvel_center = c0 ! not used - vvel_center = c0 - endif ! highfreq - if (tr_snow) then do n = 1, ncat do k = 1, nslyr @@ -391,8 +402,8 @@ subroutine step_therm1 (dt, iblk) vicen = vicen (i,j,:,iblk), & vsno = vsno (i,j, iblk), & vsnon = vsnon (i,j,:,iblk), & - uvel = uvel_center , & - vvel = vvel_center , & + uvel = uvelT_icep (i,j, iblk), & + vvel = vvelT_icep (i,j, iblk), & Tsfc = trcrn (i,j,nt_Tsfc,:,iblk), & zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & @@ -450,8 +461,8 @@ subroutine step_therm1 (dt, iblk) sst = sst (i,j, iblk), & sss = sss (i,j, iblk), & Tf = Tf (i,j, iblk), & - strocnxT = strocnxT (i,j, iblk), & - strocnyT = strocnyT (i,j, iblk), & + strocnxT = strocnxT_iavg(i,j, iblk), & + strocnyT = strocnyT_iavg(i,j, iblk), & fbot = fbot (i,j, iblk), & Tbot = Tbot (i,j, iblk), & Tsnice = Tsnice (i,j, iblk), & @@ -604,9 +615,7 @@ subroutine step_therm2 (dt, iblk) wave_spectrum, wavefreq, dwavefreq, & first_ice, bgrid, cgrid, igrid, floe_rad_c, floe_binwidth, & d_afsd_latg, d_afsd_newi, d_afsd_latm, d_afsd_weld - use ice_blocks, only: block, get_block use ice_calendar, only: yday - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, & @@ -746,8 +755,6 @@ end subroutine step_therm2 subroutine update_state (dt, daidt, dvidt, dagedt, offset) - use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks use ice_domain_size, only: ncat ! use ice_grid, only: tmask use ice_state, only: aicen, trcrn, vicen, vsnon, & @@ -864,8 +871,6 @@ subroutine step_dyn_wave (dt) use ice_arrays_column, only: wave_spectrum, & d_afsd_wave, floe_rad_l, floe_rad_c, wavefreq, dwavefreq - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: ncat, nfsd, nfreq use ice_state, only: trcrn, aicen, aice, vice use ice_timers, only: ice_timer_start, ice_timer_stop, timer_column, & @@ -939,16 +944,34 @@ end subroutine step_dyn_wave subroutine step_dyn_horiz (dt) + use ice_boundary, only: ice_HaloUpdate use ice_dyn_evp, only: evp use ice_dyn_eap, only: eap use ice_dyn_vp, only: implicit_solver use ice_dyn_shared, only: kdyn + use ice_flux, only: strocnxU, strocnyU, strocnxT_iavg, strocnyT_iavg use ice_flux, only: init_history_dyn + use ice_grid, only: grid_average_X2Y + use ice_state, only: aiU use ice_transport_driver, only: advection, transport_upwind, transport_remap real (kind=dbl_kind), intent(in) :: & dt ! dynamics time step + ! local variables + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + iblk, & ! block index + i, j ! horizontal indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(step_dyn_horiz)' call init_history_dyn ! initialize dynamic history variables @@ -961,6 +984,38 @@ subroutine step_dyn_horiz (dt) if (kdyn == 2) call eap (dt) if (kdyn == 3) call implicit_solver (dt) + !----------------------------------------------------------------- + ! Compute strocnxT_iavg, strocnyT_iavg for thermo and coupling + !----------------------------------------------------------------- + + ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T + ! conservation requires aiU be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk), iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + if (aiU(i,j,iblk) /= c0) then + work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) + work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (work2, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('F', work1, 'U', strocnxT_iavg, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT_iavg, 'T') + !----------------------------------------------------------------- ! Horizontal ice transport !----------------------------------------------------------------- @@ -983,8 +1038,6 @@ end subroutine step_dyn_horiz subroutine step_dyn_ridge (dt, ndtd, iblk) use ice_arrays_column, only: hin_max, fzsal, first_ice - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_aero, nblyr use ice_flux, only: & rdg_conv, rdg_shear, dardg1dt, dardg2dt, & @@ -1107,9 +1160,7 @@ end subroutine step_dyn_ridge subroutine step_snow (dt, iblk) - use ice_blocks, only: block, get_block use ice_calendar, only: nstreams - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nslyr, nilyr use ice_flux, only: snwcnt, wind, fresh, fhocn, fsloss, fsnow use ice_state, only: trcrn, vsno, vsnon, vicen, aicen, aice @@ -1225,9 +1276,7 @@ subroutine step_radiation (dt, iblk) alvdrn, alidrn, alvdfn, alidfn, apeffn, trcrn_sw, snowfracn, & kaer_tab, waer_tab, gaer_tab, kaer_bc_tab, waer_bc_tab, & gaer_bc_tab, bcenh, swgrid, igrid - use ice_blocks, only: block, get_block use ice_calendar, only: calendar_type, days_per_year, nextsw_cday, yday, msec - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, n_aero, nilyr, nslyr, n_zaero, n_algae, nblyr use ice_flux, only: swvdr, swvdf, swidr, swidf, coszen, fsnow use ice_grid, only: TLAT, TLON, tmask @@ -1415,7 +1464,6 @@ end subroutine step_radiation subroutine ocean_mixed_layer (dt, iblk) use ice_arrays_column, only: Cdn_atm, Cdn_atm_ratio - use ice_blocks, only: nx_block, ny_block use ice_flux, only: sst, Tf, Qa, uatmT, vatmT, wind, potT, rhoa, zlvl, & frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & @@ -1572,8 +1620,6 @@ subroutine biogeochemistry (dt, iblk) snow_bio_net, fswthrun, Rayleigh_criteria, & ocean_bio_all, sice_rho, fzsal, fzsal_g, & bgrid, igrid, icgrid, cgrid - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: nblyr, nilyr, nslyr, n_algae, n_zaero, ncat, & n_doc, n_dic, n_don, n_fed, n_fep use ice_flux, only: meltbn, melttn, congeln, snoicen, & diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 index 2b64f8932..9fda67dad 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 @@ -81,6 +81,7 @@ module ice_boundary module procedure ice_HaloUpdate2DR8, & ice_HaloUpdate2DR4, & ice_HaloUpdate2DI4, & + ice_HaloUpdate2DL1, & ice_HaloUpdate3DR8, & ice_HaloUpdate3DR4, & ice_HaloUpdate3DI4, & @@ -2384,6 +2385,69 @@ subroutine ice_HaloUpdate2DI4(array, halo, & end subroutine ice_HaloUpdate2DI4 +!*********************************************************************** + + subroutine ice_HaloUpdate2DL1(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal logical arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + iarray ! integer array for logical + + character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' + +!----------------------------------------------------------------------- +! +! copy logical into integer array and call haloupdate on integer array +! +!----------------------------------------------------------------------- + + allocate(iarray(size(array,dim=1),size(array,dim=2),size(array,dim=3))) + iarray(:,:,:) = 0 + where (array) iarray = 1 + + call ice_HaloUpdate(iarray, halo, & + fieldLoc, fieldKind, & + fillValue) + + array = .false. + where (iarray /= 0) array = .true. + deallocate(iarray) + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DL1 + !*********************************************************************** subroutine ice_HaloUpdate3DR8(array, halo, & diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index a5fed760b..4b94389f7 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -36,7 +36,6 @@ module ice_global_reductions private public :: global_sum, & - global_allreduce_sum, & global_sum_prod, & global_maxval, & global_minval @@ -56,12 +55,6 @@ module ice_global_reductions global_sum_scalar_int end interface - interface global_allreduce_sum - module procedure global_allreduce_sum_vector_dbl!, & - ! module procedure global_allreduce_sum_vector_real, & ! not yet implemented - ! module procedure global_allreduce_sum_vector_int ! not yet implemented - end interface - interface global_sum_prod module procedure global_sum_prod_dbl, & global_sum_prod_real, & @@ -707,68 +700,6 @@ function global_sum_scalar_int(scalar, dist) & end function global_sum_scalar_int -!*********************************************************************** - - function global_allreduce_sum_vector_dbl(vector, dist) & - result(globalSums) - -! Computes the global sums of sets of scalars (elements of 'vector') -! distributed across a parallel machine. -! -! This is actually the specific interface for the generic global_allreduce_sum -! function corresponding to double precision vectors. The generic -! interface is identical but will handle real and integer vectors. - - real (dbl_kind), dimension(:), intent(in) :: & - vector ! vector whose components are to be summed - - type (distrb), intent(in) :: & - dist ! block distribution - - real (dbl_kind), dimension(size(vector)) :: & - globalSums ! resulting array of global sums - -!----------------------------------------------------------------------- -! -! local variables -! -!----------------------------------------------------------------------- - - integer (int_kind) :: & - numProcs, &! number of processor participating - numBlocks, &! number of local blocks - communicator, &! communicator for this distribution - numElem ! number of elements in vector - - real (dbl_kind), dimension(:,:), allocatable :: & - work ! temporary local array - - character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)' - -!----------------------------------------------------------------------- -! -! get communicator for MPI calls -! -!----------------------------------------------------------------------- - - call ice_distributionGet(dist, & - numLocalBlocks = numBlocks, & - nprocs = numProcs, & - communicator = communicator) - - numElem = size(vector) - allocate(work(1,numElem)) - work(1,:) = vector - globalSums = c0 - - call compute_sums_dbl(work,globalSums,communicator,numProcs) - - deallocate(work) - -!----------------------------------------------------------------------- - - end function global_allreduce_sum_vector_dbl - !*********************************************************************** function global_sum_prod_dbl (array1, array2, dist, field_loc, & diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 index cafe4dc05..f10a9f432 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 @@ -68,6 +68,7 @@ module ice_boundary module procedure ice_HaloUpdate2DR8, & ice_HaloUpdate2DR4, & ice_HaloUpdate2DI4, & + ice_HaloUpdate2DL1, & ice_HaloUpdate3DR8, & ice_HaloUpdate3DR4, & ice_HaloUpdate3DI4, & @@ -1541,6 +1542,69 @@ subroutine ice_HaloUpdate2DI4(array, halo, & end subroutine ice_HaloUpdate2DI4 +!*********************************************************************** + + subroutine ice_HaloUpdate2DL1(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal logical arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + iarray ! integer array for logical + + character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' + +!----------------------------------------------------------------------- +! +! copy logical into integer array and call haloupdate on integer array +! +!----------------------------------------------------------------------- + + allocate(iarray(size(array,dim=1),size(array,dim=2),size(array,dim=3))) + iarray(:,:,:) = 0 + where (array) iarray = 1 + + call ice_HaloUpdate(iarray, halo, & + fieldLoc, fieldKind, & + fillValue) + + array = .false. + where (iarray /= 0) array = .true. + deallocate(iarray) + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DL1 + !*********************************************************************** subroutine ice_HaloUpdate3DR8(array, halo, & diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index e859ea2bd..5fcd45876 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -37,7 +37,6 @@ module ice_global_reductions private public :: global_sum, & - global_allreduce_sum, & global_sum_prod, & global_maxval, & global_minval @@ -57,12 +56,6 @@ module ice_global_reductions global_sum_scalar_int end interface - interface global_allreduce_sum - module procedure global_allreduce_sum_vector_dbl!, & - ! module procedure global_allreduce_sum_vector_real, & ! not yet implemented - ! module procedure global_allreduce_sum_vector_int ! not yet implemented - end interface - interface global_sum_prod module procedure global_sum_prod_dbl, & global_sum_prod_real, & @@ -708,68 +701,6 @@ function global_sum_scalar_int(scalar, dist) & end function global_sum_scalar_int -!*********************************************************************** - - function global_allreduce_sum_vector_dbl(vector, dist) & - result(globalSums) - -! Computes the global sums of sets of scalars (elements of 'vector') -! distributed across a parallel machine. -! -! This is actually the specific interface for the generic global_allreduce_sum -! function corresponding to double precision vectors. The generic -! interface is identical but will handle real and integer vectors. - - real (dbl_kind), dimension(:), intent(in) :: & - vector ! vector whose components are to be summed - - type (distrb), intent(in) :: & - dist ! block distribution - - real (dbl_kind), dimension(size(vector)) :: & - globalSums ! resulting array of global sums - -!----------------------------------------------------------------------- -! -! local variables -! -!----------------------------------------------------------------------- - - integer (int_kind) :: & - numProcs, &! number of processor participating - numBlocks, &! number of local blocks - communicator, &! communicator for this distribution - numElem ! number of elements in vector - - real (dbl_kind), dimension(:,:), allocatable :: & - work ! temporary local array - - character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)' - -!----------------------------------------------------------------------- -! -! get communicator for MPI calls -! -!----------------------------------------------------------------------- - - call ice_distributionGet(dist, & - numLocalBlocks = numBlocks, & - nprocs = numProcs, & - communicator = communicator) - - numElem = size(vector) - allocate(work(1,numElem)) - work(1,:) = vector - globalSums = c0 - - call compute_sums_dbl(work,globalSums,communicator,numProcs) - - deallocate(work) - -!----------------------------------------------------------------------- - - end function global_allreduce_sum_vector_dbl - !*********************************************************************** function global_sum_prod_dbl (array1, array2, dist, field_loc, & diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index ac56356e5..10254aa93 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -234,6 +234,7 @@ subroutine init_domain_blocks #if (defined CESMCOUPLED) nprocs = get_num_procs() #else + write(nu_diag,*) subname,'ERROR: nprocs, get_num_procs = ',nprocs,get_num_procs() call abort_ice(subname//'ERROR: Input nprocs not same as system request') #endif else if (nghost < 1) then diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 523c7ea2c..dfccdd413 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -129,6 +129,15 @@ module ice_grid dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) dyrect ! user_specified spacing (cm) in y-direction (uniform HTE) + ! growth factor for variable spaced grid + real (kind=dbl_kind), public :: & + dxscale, & ! scale factor for grid spacing in x direction (e.g., 1.02) + dyscale ! scale factor for gird spacing in y direction (e.g., 1.02) + + real (kind=dbl_kind), public :: & + lonrefrect, & ! lower left lon for rectgrid + latrefrect ! lower left lat for rectgrid + ! Corners of grid boxes for history output real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & lont_bounds, & ! longitude of gridbox corners for T point @@ -171,7 +180,8 @@ module ice_grid logical (kind=log_kind), public :: & use_bathymetry, & ! flag for reading in bathymetry_file - pgl_global_ext ! flag for init primary grid lengths (global ext.) + pgl_global_ext, & ! flag for init primary grid lengths (global ext.) + scale_dxdy ! flag to apply scale factor to vary dx/dy in rectgrid logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) @@ -182,11 +192,6 @@ module ice_grid lmask_n, & ! northern hemisphere mask lmask_s ! southern hemisphere mask - logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - iceumask, & ! ice extent mask (U-cell) - icenmask, & ! ice extent mask (N-cell) - iceemask ! ice extent mask (E-cell) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & rndex_global ! global index for local subdomain (dbl) @@ -267,7 +272,6 @@ subroutine alloc_grid umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) - iceumask (nx_block,ny_block,max_blocks), & ! u mask for dynamics lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) @@ -288,8 +292,6 @@ subroutine alloc_grid if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & - iceemask (nx_block,ny_block,max_blocks), & ! e mask for dynamics - icenmask (nx_block,ny_block,max_blocks), & ! n mask for dynamics ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & ratiodxNr(nx_block,ny_block,max_blocks), & @@ -475,6 +477,8 @@ subroutine init_grid2 ! lat, lon, cell widths, angle, land mask !----------------------------------------------------------------- + l_readCenter = .false. + call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -945,7 +949,6 @@ subroutine popgrid_nc call ice_open_nc(kmt_file,fid_kmt) diag = .true. ! write diagnostic info - l_readCenter = .false. !----------------------------------------------------------------- ! topography !----------------------------------------------------------------- @@ -1360,7 +1363,7 @@ subroutine rectgrid imid, jmid real (kind=dbl_kind) :: & - length, & + length, & rad_to_deg real (kind=dbl_kind), dimension(:,:), allocatable :: & @@ -1383,69 +1386,71 @@ subroutine rectgrid allocate(work_g1(nx_global,ny_global)) - ! Weddell Sea - ! lower left corner of grid is 55W, 75S - - ! Barrow AK - ! lower left corner of grid is 156.5W, 71.35N - - if (my_task == master_task) then - work_g1 = c0 - length = dxrect*cm_to_m/radius*rad_to_deg - -! work_g1(1,:) = -55._dbl_kind ! Weddell Sea - work_g1(1,:) = -156.5_dbl_kind ! Barrow AK - - do j = 1, ny_global - do i = 2, nx_global - work_g1(i,j) = work_g1(i-1,j) + length ! ULON - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call gridbox_verts(work_g1,lont_bounds) - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - if (my_task == master_task) then - work_g1 = c0 - length = dyrect*cm_to_m/radius*rad_to_deg - -! work_g1(:,1) = -75._dbl_kind ! Weddell Sea - work_g1(:,1) = 71.35_dbl_kind ! Barrow AK + if (scale_dxdy) then + ! scale grid spacing from center outward. + ! this different than original method in it + ! needs to define grid spacing before lat/lon. + ! original rectgrid defines latlon first + call rectgrid_scale_dxdy + else + ! rectgrid no grid spacing. + ! original method with addition to use namelist lat/lon reference + + if (my_task == master_task) then + work_g1 = c0 + length = dxrect*cm_to_m/radius*rad_to_deg + + work_g1(1,:) = lonrefrect ! reference lon from namelist + + do j = 1, ny_global + do i = 2, nx_global + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + if (my_task == master_task) then + work_g1 = c0 + length = dyrect*cm_to_m/radius*rad_to_deg + + work_g1(:,1) = latrefrect ! reference latitude from namelist + + do i = 1, nx_global + do j = 2, ny_global + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) - do i = 1, nx_global - do j = 2, ny_global - work_g1(i,j) = work_g1(i,j-1) + length ! ULAT - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call gridbox_verts(work_g1,latt_bounds) - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dxrect ! HTN + enddo + enddo + endif + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dxrect ! HTN - enddo - enddo - endif - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dyrect ! HTE + enddo + enddo + endif + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dyrect ! HTE - enddo - enddo - endif - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE + endif ! scale_dxdy !----------------------------------------------------------------- ! Construct T-cell land mask @@ -1526,6 +1531,168 @@ subroutine rectgrid end subroutine rectgrid +!======================================================================= + + subroutine rectgrid_scale_dxdy + + ! generate a variable spaced rectangluar grid. + ! extend spacing from center of grid outward. + use ice_constants, only: c0, c1, c2, radius, cm_to_m, & + field_loc_center, field_loc_NEcorner, field_type_scalar + + integer (kind=int_kind) :: & + i, j, iblk, & + imid, jmid, & + center1, center2 ! array centers for expanding dx, dy + + real (kind=dbl_kind) :: & + length, & + rad_to_deg + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + character(len=*), parameter :: subname = '(rectgrid_scale_dxdy)' + + call icepack_query_parameters(rad_to_deg_out=rad_to_deg) + + allocate(work_g1(nx_global,ny_global)) + + ! determine dx spacing + ! strategy: initialize with dxrect. + ! if want to scale the grid, work from center outwards, + ! multplying neighbor cell by scale factor. + ! this assumes dx varies in x direction only. + ! (i.e, dx is the same across same y location) + if (my_task == master_task) then + + ! initialize with initial dxrect + work_g1(:,:) = dxrect + + ! check if nx is even or odd + ! if even, middle 2 columns are center + ! of odd, middle 1 column is center + if (mod(nx_global,2) == 0) then ! nx_global is even + + ! with even number of x locatons, + ! the center two y columns are center + center1 = nx_global/2 ! integer math + center2 = center1 + 1 ! integer math + + else ! nx_global = odd + ! only one center index. set center2=center1 + center1 = ceiling(real(nx_global/2),int_kind) + center2 = center1 + endif + + ! note loop over only half the x grid points (center1)-1 + ! working from the center outward. + do j = 1, ny_global + do i = 1, center1-1 + ! work from center1 to left + work_g1(center1-i,j) = dxscale*work_g1(center1-i+1,j) + + ! work from center2 to right + work_g1(center2+i,j) = dxscale*work_g1(center2+i-1,j) + enddo ! i + enddo ! j + + endif ! my_task == master_task + + + ! note work_g1 is converted to meters in primary_grid_lengths_HTN + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE + + ! make ULON array + if (my_task == master_task) then + + ! make first column reference lon in radians. + ! the remaining work_g1 is still dx in meters + work_g1(1,:) = lonrefrect/rad_to_deg ! radians + + ! loop over remaining points and add spacing to successive + ! x locations + do j = 1, ny_global + do i = 2, nx_global ! start from i=2. i=1 is lonrefrect + length = work_g1(i,j)/radius ! grid spacing in radians + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo ! i + enddo ! j + endif ! mytask == master_task + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + ! determine dy spacing + ! strategy: initialize with dyrect. + ! if want to scale the grid, work from center outwards, + ! multplying neighbor cell by scale factor. + ! this assumes dy varies in y direction only. + ! (i.e, dy is the same across same x location) + if (my_task == master_task) then + + ! initialize with initial dxrect + work_g1(:,:) = dyrect + + ! check if ny is even or odd + ! if even, middle 2 rows are center + ! of odd, middle 1 row is center + if (mod(ny_global,2) == 0) then ! ny_global is even + + ! with even number of x locatons, + ! the center two y columns are center + center1 = ny_global/2 ! integer math + center2 = center1 + 1 ! integer math + + else ! ny_global = odd + ! only one center index. set center2=center1 + center1 = ceiling(real(ny_global/2),int_kind) + center2 = center1 + endif + + ! note loop over only half the y grid points (center1)-1 + ! working from the center outward. + do i = 1, nx_global + do j = 1, center1-1 + ! work from center1 to bottom + work_g1(i,center1-j) = dyscale*work_g1(i,center1-j+1) + + ! work from center2 to top + work_g1(i,center2+j) = dyscale*work_g1(i,center2+j-1) + enddo ! i + enddo ! j + endif ! mytask == master_task + ! note work_g1 is converted to meters primary_grid_lengths_HTE + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE + + ! make ULAT array + if (my_task == master_task) then + + ! make first row reference lat in radians. + ! the remaining work_g1 is still dy in meters + work_g1(:,1) = latrefrect/rad_to_deg ! radians + + + ! loop over remaining points and add spacing to successive + ! x locations + do j = 2, ny_global ! start from j=2. j=1 is latrefrect + do i = 1, nx_global + length = work_g1(i,j)/radius ! grid spacing in radians + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo ! i + enddo ! j + endif ! mytask == master_task + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + + deallocate(work_g1) + + end subroutine rectgrid_scale_dxdy + !======================================================================= ! Complex land mask for testing box cases @@ -2513,13 +2680,23 @@ subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,gri ! state masked case('NE2US') - call grid_average_X2YS_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) + call grid_average_X2Y_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) case('EN2US') - call grid_average_X2YS_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) + call grid_average_X2Y_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) case('NE2TS') - call grid_average_X2YS_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) + call grid_average_X2Y_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) case('EN2TS') - call grid_average_X2YS_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + call grid_average_X2Y_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + + ! state unmasked + case('NE2UA') + call grid_average_X2Y_2('NE2UA',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2UA') + call grid_average_X2Y_2('NE2UA',work1b,narea,npm,work1a,earea,epm,work2) + case('NE2TA') + call grid_average_X2Y_2('NE2TA',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2TA') + call grid_average_X2Y_2('NE2TA',work1b,narea,npm,work1a,earea,epm,work2) case default call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) @@ -3413,36 +3590,6 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) end subroutine grid_average_X2YF -!======================================================================= -! Compute the minimum of adjacent values of a field at specific indices, -! depending on the grid location (U, E, N) -! - real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - field ! field defined at T point - - integer (kind=int_kind), intent(in) :: & - i, j - - character(len=*), intent(in) :: & - grid_location ! grid location at which to compute the minumum (U, E, N) - - character(len=*), parameter :: subname = '(grid_neighbor_min)' - - select case (trim(grid_location)) - case('U') - mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) - case('E') - mini = min(field(i,j), field(i+1,j)) - case('N') - mini = min(field(i,j), field(i,j+1)) - case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location) - end select - - end function grid_neighbor_min - !======================================================================= ! Shifts quantities from one grid to another ! State masked version, simple weighted averager @@ -3451,7 +3598,7 @@ end function grid_neighbor_min ! ! author: T. Craig - subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) + subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) use ice_constants, only: c0 @@ -3478,7 +3625,7 @@ subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,wor type (block) :: & this_block ! block information for current block - character(len=*), parameter :: subname = '(grid_average_X2YS_2)' + character(len=*), parameter :: subname = '(grid_average_X2Y_2)' work2(:,:,:) = c0 @@ -3534,11 +3681,91 @@ subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,wor enddo !$OMP END PARALLEL DO + case('NE2UA') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1a(i ,j ,iblk) & + + wght1a(i+1,j ,iblk) & + + wght1b(i ,j ,iblk) & + + wght1b(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NE2TA') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1a(i ,j-1,iblk) & + + wght1a(i ,j ,iblk) & + + wght1b(i-1,j ,iblk) & + + wght1b(i ,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + case default call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select - end subroutine grid_average_X2YS_2 + end subroutine grid_average_X2Y_2 + +!======================================================================= +! Compute the minimum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the minumum (U, E, N) + + character(len=*), parameter :: subname = '(grid_neighbor_min)' + + select case (trim(grid_location)) + case('U') + mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + mini = min(field(i,j), field(i+1,j)) + case('N') + mini = min(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unknown grid_location: ' // grid_location) + end select + + end function grid_neighbor_min !======================================================================= ! Compute the maximum of adjacent values of a field at specific indices, diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 64b8d2101..bd5a49eaf 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -55,15 +55,16 @@ subroutine dumpfile(filename_spec) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_ice, tmask, iceumask, iceemask, icenmask + use ice_grid, only: grid_ice, tmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & uvelE, vvelE, uvelN, vvelN @@ -175,8 +176,8 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ocean stress (for bottom heat flux in thermo) !----------------------------------------------------------------- - call write_restart_field(nu_dump,0,strocnxT,'ruf8','strocnxT',1,diag) - call write_restart_field(nu_dump,0,strocnyT,'ruf8','strocnyT',1,diag) + call write_restart_field(nu_dump,0,strocnxT_iavg,'ruf8','strocnxT',1,diag) + call write_restart_field(nu_dump,0,strocnyT_iavg,'ruf8','strocnyT',1,diag) !----------------------------------------------------------------- ! internal stress @@ -220,7 +221,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceumask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -234,7 +235,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -246,7 +247,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -276,16 +277,16 @@ subroutine restartfile (ice_ic) use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type, grid_ice, & - iceumask, iceemask, icenmask + use ice_grid, only: tmask, grid_type, grid_ice, grid_average_X2Y use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & uvelE, vvelE, uvelN, vvelN, & @@ -428,9 +429,9 @@ subroutine restartfile (ice_ic) if (my_task == master_task) & write(nu_diag,*) 'min/max ocean stress components' - call read_restart_field(nu_restart,0,strocnxT,'ruf8', & + call read_restart_field(nu_restart,0,strocnxT_iavg,'ruf8', & 'strocnxT',1,diag,field_loc_center, field_type_vector) - call read_restart_field(nu_restart,0,strocnyT,'ruf8', & + call read_restart_field(nu_restart,0,strocnyT_iavg,'ruf8', & 'strocnyT',1,diag,field_loc_center, field_type_vector) !----------------------------------------------------------------- @@ -529,12 +530,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'iceumask',1,diag,field_loc_center, field_type_scalar) - iceumask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo @@ -546,12 +547,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'icenmask',1,diag,field_loc_center, field_type_scalar) - icenmask(:,:,:) = .false. + iceNmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. enddo enddo enddo @@ -562,12 +563,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'iceemask',1,diag,field_loc_center, field_type_scalar) - iceemask(:,:,:) = .false. + iceEmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. enddo enddo enddo @@ -707,13 +708,14 @@ subroutine restartfile_v4 (ice_ic) use ice_domain, only: nblocks, distrb_info use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks + use ice_dyn_shared, only: iceUmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_gather_scatter, only: scatter_global_stress - use ice_grid, only: tmask, iceumask + use ice_grid, only: tmask use ice_read_write, only: ice_open, ice_read, ice_read_global use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & @@ -873,9 +875,9 @@ subroutine restartfile_v4 (ice_ic) if (my_task == master_task) & write(nu_diag,*) 'min/max ocean stress components' - call ice_read(nu_restart,0,strocnxT,'ruf8',diag, & + call ice_read(nu_restart,0,strocnxT_iavg,'ruf8',diag, & field_loc_center, field_type_vector) - call ice_read(nu_restart,0,strocnyT,'ruf8',diag, & + call ice_read(nu_restart,0,strocnyT_iavg,'ruf8',diag, & field_loc_center, field_type_vector) !----------------------------------------------------------------- @@ -942,12 +944,12 @@ subroutine restartfile_v4 (ice_ic) call ice_read(nu_restart,0,work1,'ruf8',diag, & field_loc_center, field_type_scalar) - iceumask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index b2b438ebe..3e7abe3a3 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -58,11 +58,7 @@ subroutine init_restart_read(ice_ic) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow character(len=char_len_long) :: & @@ -87,11 +83,7 @@ subroutine init_restart_read(ice_ic) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -236,28 +228,6 @@ subroutine init_restart_read(ice_ic) endif endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (my_task == master_task) then - n = index(filename0,trim(restart_file)) - if (n == 0) call abort_ice(subname//'ERROR: pond_cesm restart: filename discrepancy') - string1 = trim(filename0(1:n-1)) - string2 = trim(filename0(n+lenstr(restart_file):lenstr(filename0))) - write(filename,'(a,a,a,a)') & - string1(1:lenstr(string1)), & - restart_file(1:lenstr(restart_file)),'.pond_cesm', & - string2(1:lenstr(string2)) - if (restart_ext) then - call ice_open_ext(nu_restart_pond,filename,0) - else - call ice_open(nu_restart_pond,filename,0) - endif - read (nu_restart_pond) iignore,rignore,rignore - write(nu_diag,*) 'Reading ',filename(1:lenstr(filename)) - endif - endif -#endif - if (tr_pond_lvl) then if (my_task == master_task) then n = index(filename0,trim(restart_file)) @@ -424,11 +394,7 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -444,11 +410,7 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -581,28 +543,6 @@ subroutine init_restart_write(filename_spec) endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - - write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - restart_dir(1:lenstr(restart_dir)), & - restart_file(1:lenstr(restart_file)),'.pond_cesm.', & - myear,'-',mmonth,'-',mday,'-',msec - - if (restart_ext) then - call ice_open_ext(nu_dump_pond,filename,0) - else - call ice_open(nu_dump_pond,filename,0) - endif - - if (my_task == master_task) then - write(nu_dump_pond) istep1,timesecs,time_forc - write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) - endif - - endif -#endif - if (tr_pond_lvl) then write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & @@ -871,11 +811,7 @@ subroutine final_restart() logical (kind=log_kind) :: & solve_zsal, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -889,11 +825,7 @@ subroutine final_restart() nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -908,9 +840,6 @@ subroutine final_restart() if (tr_iage) close(nu_dump_age) if (tr_FY) close(nu_dump_FY) if (tr_lvl) close(nu_dump_lvl) -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) close(nu_dump_pond) -#endif if (tr_pond_lvl) close(nu_dump_pond) if (tr_pond_topo) close(nu_dump_pond) if (tr_snow) close(nu_dump_snow) diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index f647bd96b..ed49a48f5 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -145,11 +145,7 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -185,11 +181,7 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -416,13 +408,6 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'vlvl',dims) end if -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - call define_rest_field(ncid,'apnd',dims) - call define_rest_field(ncid,'hpnd',dims) - end if -#endif - if (tr_pond_topo) then call define_rest_field(ncid,'apnd',dims) call define_rest_field(ncid,'hpnd',dims) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 1124cc048..679a2b6e6 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -151,11 +151,7 @@ subroutine init_restart_write(filename_spec) solve_zsal, skl_bgc, z_tracers logical (kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -191,11 +187,7 @@ subroutine init_restart_write(filename_spec) call icepack_query_tracer_sizes(nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -420,13 +412,6 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'vlvl',dims) end if -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - end if -#endif - if (tr_pond_topo) then call define_rest_field(File,'apnd',dims) call define_rest_field(File,'hpnd',dims) diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 87dc8d9a1..0b8ed689e 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -235,17 +235,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -260,11 +253,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -283,11 +272,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -346,21 +331,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index 91f7985bd..b67e1a223 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -145,11 +145,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -172,11 +168,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -185,11 +177,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -333,9 +321,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 87dc8d9a1..0b8ed689e 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -235,17 +235,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -260,11 +253,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -283,11 +272,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -346,21 +331,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index ea6a65165..c9875d769 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -145,11 +145,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -172,11 +168,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -185,11 +177,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -333,9 +321,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index cfc5bece9..a8bf96ad2 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -264,17 +264,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -291,11 +284,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -315,11 +304,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -381,21 +366,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index b96086c6d..5836479b4 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -137,11 +137,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -166,11 +162,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -188,11 +180,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -403,9 +391,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 7ac4f0bb7..868ed42b4 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -9,7 +9,7 @@ module ice_import_export use ice_constants , only: field_type_vector, c100 use ice_constants , only: p001, p5 use ice_blocks , only: block, get_block, nx_block, ny_block - use ice_flux , only: strairxT, strairyT, strocnxT, strocnyT + use ice_flux , only: strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only: alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only: flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only: fresh, fsalt, zlvl, uatm, vatm, potT, Tair, Qa @@ -571,8 +571,8 @@ subroutine ice_export( i2x ) + workx*sin(ANGLET(i,j,iblk)) ! ice/ocean stress (on POP T-grid: convert to lat-lon) - workx = -strocnxT(i,j,iblk) ! N/m^2 - worky = -strocnyT(i,j,iblk) ! N/m^2 + workx = -strocnxT_iavg(i,j,iblk) ! N/m^2 + worky = -strocnyT_iavg(i,j,iblk) ! N/m^2 tauxo(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & - worky*sin(ANGLET(i,j,iblk)) tauyo(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & diff --git a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 index 78b7d15c4..599249083 100644 --- a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 +++ b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 @@ -585,11 +585,11 @@ subroutine ice_prescribed_phys ! set non-computed fluxes, ice velocities, ice-ocn stresses to zero !-------------------------------------------------------------------- - frzmlt (:,:,:) = c0 - uvel (:,:,:) = c0 - vvel (:,:,:) = c0 - strocnxT (:,:,:) = c0 - strocnyT (:,:,:) = c0 + frzmlt (:,:,:) = c0 + uvel (:,:,:) = c0 + vvel (:,:,:) = c0 + strocnxT_iavg(:,:,:) = c0 + strocnyT_iavg(:,:,:) = c0 !----------------------------------------------------------------- ! other atm and ocn fluxes diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 3d5e5cc2a..5fbde9cce 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -222,17 +222,10 @@ subroutine init_restart() use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -249,11 +242,7 @@ subroutine init_restart() i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, tr_snow, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -274,11 +263,7 @@ subroutine init_restart() call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -340,21 +325,6 @@ subroutine init_restart() enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index c68583648..e908f509f 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -121,11 +121,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -150,11 +146,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -172,11 +164,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -382,9 +370,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index 182308973..afdee5590 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -17,12 +17,13 @@ module ice_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use ice_constants , only : ice_init_constants, c0 use ice_shr_methods , only : chkerr, state_setscalar, state_getscalar, state_diagnose, alarmInit - use ice_shr_methods , only : set_component_logging, get_component_instance, state_flddebug + use ice_shr_methods , only : get_component_instance, state_flddebug + use ice_import_export , only : ice_import, ice_export, ice_advertise_fields, ice_realize_fields use ice_domain_size , only : nx_global, ny_global use ice_grid , only : grid_type, init_grid2 use ice_communicate , only : init_communicate, my_task, master_task, mpi_comm_ice - use ice_calendar , only : force_restart_now, write_ic + use ice_calendar , only : force_restart_now, write_ic, init_calendar use ice_calendar , only : idate, mday, mmonth, myear, year_init use ice_calendar , only : msec, dt, calendar, calendar_type, nextsw_cday, istep use ice_calendar , only : ice_calendar_noleap, ice_calendar_gregorian @@ -40,7 +41,10 @@ module ice_comp_nuopc #ifdef CESMCOUPLED use shr_const_mod use shr_orb_mod , only : shr_orb_decl, shr_orb_params, SHR_ORB_UNDEF_REAL, SHR_ORB_UNDEF_INT - use ice_scam , only : scmlat, scmlon, scol_mask, scol_frac, scol_ni, scol_nj + use ice_scam , only : scmlat, scmlon, scol_mask, scol_frac, scol_ni, scol_nj, scol_area + use nuopc_shr_methods , only : set_component_logging +#else + use ice_shr_methods , only : set_component_logging #endif use ice_timers use CICE_InitMod , only : cice_init1, cice_init2 @@ -87,6 +91,15 @@ module ice_comp_nuopc integer :: dbug = 0 logical :: profile_memory = .false. + logical :: mastertask + integer :: start_ymd ! Start date (YYYYMMDD) + integer :: start_tod ! start time of day (s) + integer :: curr_ymd ! Current date (YYYYMMDD) + integer :: curr_tod ! Current time of day (s) + integer :: stop_ymd ! stop date (YYYYMMDD) + integer :: stop_tod ! stop time of day (sec) + integer :: ref_ymd ! Reference date (YYYYMMDD) + integer :: ref_tod ! reference time of day (s) integer , parameter :: debug_import = 0 ! internal debug level integer , parameter :: debug_export = 0 ! internal debug level character(*), parameter :: modName = "(ice_comp_nuopc)" @@ -201,12 +214,8 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) real(kind=dbl_kind) :: atmiter_conv_driver integer (kind=int_kind) :: natmiter integer (kind=int_kind) :: natmiter_driver - character(len=char_len) :: tfrz_option_driver ! tfrz_option from driver attributes - character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist - integer(int_kind) :: ktherm integer :: localPet integer :: npes - logical :: mastertask type(ESMF_VM) :: vm integer :: lmpicom ! local communicator type(ESMF_Time) :: currTime ! Current time @@ -215,14 +224,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) type(ESMF_Time) :: refTime ! Ref time type(ESMF_TimeInterval) :: timeStep ! Model timestep type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: start_ymd ! Start date (YYYYMMDD) - integer :: start_tod ! start time of day (s) - integer :: curr_ymd ! Current date (YYYYMMDD) - integer :: curr_tod ! Current time of day (s) - integer :: stop_ymd ! stop date (YYYYMMDD) - integer :: stop_tod ! stop time of day (sec) - integer :: ref_ymd ! Reference date (YYYYMMDD) - integer :: ref_tod ! reference time of day (s) integer :: yy,mm,dd ! Temporaries for time query integer :: dtime ! time step integer :: shrlogunit ! original log unit @@ -232,12 +233,15 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) integer :: iblk, jblk ! indices integer :: ig, jg ! indices integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain + character(len=char_len_long) :: diag_filename = 'unset' character(len=char_len_long) :: logmsg character(len=char_len_long) :: single_column_lnd_domainfile real(dbl_kind) :: scol_lon real(dbl_kind) :: scol_lat real(dbl_kind) :: scol_spval + character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist + character(len=char_len) :: tfrz_option_driver ! tfrz_option from cice namelist character(len=*), parameter :: subname=trim(modName)//':(InitializeAdvertise) ' !-------------------------------- @@ -541,12 +545,20 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call NUOPC_CompAttributeGet(gcomp, name='scol_nj', value=cvalue, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return read(cvalue,*) scol_nj + call NUOPC_CompAttributeGet(gcomp, name='scol_area', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_area call ice_mesh_create_scolumn(scmlon, scmlat, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return scol_valid = (scol_mask == 1) if (.not. scol_valid) then + ! Read the cice namelist as part of the call to cice_init1 + ! Note that if single_column is true and scol_valid is not - will never get here + call t_startf ('cice_init1') + call cice_init1 + call t_stopf ('cice_init1') ! Advertise fields call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return @@ -559,13 +571,18 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) RETURN end if end if - ! Read the cice namelist as part of the call to cice_init1 ! Note that if single_column is true and scol_valid is not - will never get here - call t_startf ('cice_init1') call cice_init1 call t_stopf ('cice_init1') + + !----------------------------------------------------------------- + ! Advertise fields + !----------------------------------------------------------------- + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + ! Form of ocean freezing temperature ! 'minus1p8' = -1.8 C @@ -624,6 +641,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call cice_init1 call t_stopf ('cice_init1') + !----------------------------------------------------------------- + ! Advertise fields + !----------------------------------------------------------------- + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + #endif !---------------------------------------------------------------------------- @@ -675,6 +698,43 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if end if + call t_stopf ('cice_init_total') + + end subroutine InitializeAdvertise + + !=============================================================================== + + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! Arguments + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState + type(ESMF_State) :: exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! Local variables + integer :: n + integer :: fieldcount + type(ESMF_Field) :: lfield + character(len=char_len_long) :: cvalue + real(dbl_kind) :: scol_lon + real(dbl_kind) :: scol_lat + real(dbl_kind) :: scol_spval + real(dbl_kind), pointer :: fldptr1d(:) + real(dbl_kind), pointer :: fldptr2d(:,:) + integer :: rank + character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist + integer(int_kind) :: ktherm + + character(len=char_len_long) :: single_column_lnd_domainfile + character(len=char_len_long) , pointer :: lfieldnamelist(:) => null() + character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' + !-------------------------------- + + rc = ESMF_SUCCESS + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) + !---------------------------------------------------------------------------- ! Second cice initialization phase -after initializing grid info !---------------------------------------------------------------------------- @@ -685,29 +745,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call t_startf ('cice_init2') call cice_init2() call t_stopf ('cice_init2') - - !---------------------------------------------------------------------------- - ! reset shr logging to my log file - !---------------------------------------------------------------------------- - - call icepack_query_parameters(ktherm_out=ktherm) - call icepack_query_parameters(tfrz_option_out=tfrz_option) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - ! Now write output to nu_diag - this must happen AFTER call to cice_init - if (mastertask) then - write(nu_diag,'(a,d21.14)') trim(subname)//' cice init nextsw_cday = ',nextsw_cday - write(nu_diag,'(a)') trim(subname)//' tfrz_option = '//trim(tfrz_option) - if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then - write(nu_diag,*) trim(subname),' Warning: Using ktherm = 2 and tfrz_option = ', trim(tfrz_option) - endif - write(nu_diag,'(a )') trim(subname)//' inst_name = '//trim(inst_name) - write(nu_diag,'(a,i8 )') trim(subname)//' inst_index = ',inst_index - write(nu_diag,'(a )') trim(subname)//' inst_suffix = ',trim(inst_suffix) - endif - !--------------------------------------------------------------------------- ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- @@ -758,6 +795,30 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if call calendar() ! update calendar info + + !---------------------------------------------------------------------------- + ! reset shr logging to my log file + !---------------------------------------------------------------------------- + + call icepack_query_parameters(ktherm_out=ktherm) + call icepack_query_parameters(tfrz_option_out=tfrz_option) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + ! Now write output to nu_diag - this must happen AFTER call to cice_init + if (mastertask) then + write(nu_diag,'(a,d21.14)') trim(subname)//' cice init nextsw_cday = ',nextsw_cday + write(nu_diag,'(a)') trim(subname)//' tfrz_option = '//trim(tfrz_option) + if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then + write(nu_diag,*) trim(subname),' Warning: Using ktherm = 2 and tfrz_option = ', trim(tfrz_option) + endif + write(nu_diag,'(a )') trim(subname)//' inst_name = '//trim(inst_name) + write(nu_diag,'(a,i8 )') trim(subname)//' inst_index = ',inst_index + write(nu_diag,'(a )') trim(subname)//' inst_suffix = ',trim(inst_suffix) + endif + + if (write_ic) then call accum_hist(dt) ! write initial conditions end if @@ -769,50 +830,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call ice_prescribed_init(clock, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - !----------------------------------------------------------------- - ! Advertise fields - !----------------------------------------------------------------- - - ! NOTE: the advertise phase needs to be called after the ice - ! initialization since the number of ice categories is needed for - ! ice_fraction_n and mean_sw_pen_to_ocn_ifrac_n - call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call t_stopf ('cice_init_total') - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! Arguments - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState - type(ESMF_State) :: exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! Local variables - integer :: n - integer :: fieldcount - type(ESMF_Field) :: lfield - character(len=char_len_long) :: cvalue - real(dbl_kind) :: scol_lon - real(dbl_kind) :: scol_lat - real(dbl_kind) :: scol_spval - real(dbl_kind), pointer :: fldptr1d(:) - real(dbl_kind), pointer :: fldptr2d(:,:) - integer :: rank - character(len=char_len_long) :: single_column_lnd_domainfile - character(len=char_len_long) , pointer :: lfieldnamelist(:) => null() - character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - #ifdef CESMCOUPLED ! if single column is not valid - set all export state fields to zero and return if (single_column .and. .not. scol_valid) then @@ -848,7 +865,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! *** RETURN HERE *** ! ******************* RETURN - else + else if(single_column) then write(nu_diag,'(a,3(f10.5,2x))')' (ice_comp_nuopc) single column mode lon/lat/frac is ',& scmlon,scmlat,scol_frac end if diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index f5a00cdf8..e4db010de 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -11,7 +11,7 @@ module ice_import_export use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat use ice_domain_size , only : nfreq, nfsd use ice_exit , only : abort_ice - use ice_flux , only : strairxT, strairyT, strocnxT, strocnyT + use ice_flux , only : strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only : flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only : fswthru_vdr, fswthru_vdf, fswthru_idr, fswthru_idf @@ -36,9 +36,9 @@ module ice_import_export use ice_shr_methods , only : chkerr, state_reset use icepack_intfc , only : icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc , only : icepack_query_parameters, icepack_query_tracer_flags - use icepack_intfc , only : icepack_query_tracer_indices use icepack_intfc , only : icepack_liquidus_temperature use icepack_intfc , only : icepack_sea_freezing_temperature + use icepack_intfc , only : icepack_query_tracer_indices use icepack_parameters , only : puny, c2 use cice_wrapper_mod , only : t_startf, t_stopf, t_barrierf #ifdef CESMCOUPLED @@ -307,6 +307,7 @@ end subroutine ice_advertise_fields !============================================================================== subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc) + use ice_scam, only : single_column ! input/output variables type(ESMF_GridComp) :: gcomp @@ -320,7 +321,7 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc type(ESMF_State) :: exportState type(ESMF_Field) :: lfield integer :: numOwnedElements - integer :: i, j, iblk, n, k + integer :: i, j, iblk, n integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block real(dbl_kind), allocatable :: mesh_areas(:) @@ -361,10 +362,10 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc tag=subname//':CICE_Import',& mesh=mesh, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - #ifdef CESMCOUPLED ! Get mesh areas from second field - using second field since the ! first field is the scalar field + if (single_column) return call ESMF_MeshGet(mesh, numOwnedElements=numOwnedElements, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return @@ -889,6 +890,8 @@ end subroutine ice_import !=============================================================================== subroutine ice_export( exportState, rc ) + use ice_scam, only : single_column + ! input/output variables type(ESMF_State), intent(inout) :: exportState integer , intent(out) :: rc @@ -911,12 +914,13 @@ subroutine ice_export( exportState, rc ) real (kind=dbl_kind) :: ailohi(nx_block,ny_block,max_blocks) ! fractional ice area real (kind=dbl_kind) :: floediam(nx_block,ny_block,max_blocks) real (kind=dbl_kind) :: floethick(nx_block,ny_block,max_blocks) ! ice thickness - real (kind=dbl_kind) :: Tffresh logical (kind=log_kind) :: tr_fsd integer (kind=int_kind) :: nt_fsd + real (kind=dbl_kind) :: Tffresh real (kind=dbl_kind), allocatable :: tempfld(:,:,:) real (kind=dbl_kind), pointer :: dataptr_ifrac_n(:,:) real (kind=dbl_kind), pointer :: dataptr_swpen_n(:,:) + logical (kind=log_kind), save :: first_call = .true. character(len=*),parameter :: subname = 'ice_export' !----------------------------------------------------- @@ -963,6 +967,9 @@ subroutine ice_export( exportState, rc ) ! ice fraction ailohi(i,j,iblk) = min(aice(i,j,iblk), c1) + ! surface temperature + Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) + if (tr_fsd) then ! floe thickness (m) if (aice(i,j,iblk) > puny) then @@ -984,9 +991,6 @@ subroutine ice_export( exportState, rc ) floediam(i,j,iblk) = MAX(c2*floe_rad_c(1),workx) endif - ! surface temperature - Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) - ! wind stress (on POP T-grid: convert to lat-lon) workx = strairxT(i,j,iblk) ! N/m^2 worky = strairyT(i,j,iblk) ! N/m^2 @@ -994,8 +998,8 @@ subroutine ice_export( exportState, rc ) tauya(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) + workx*sin(ANGLET(i,j,iblk)) ! ice/ocean stress (on POP T-grid: convert to lat-lon) - workx = -strocnxT(i,j,iblk) ! N/m^2 - worky = -strocnyT(i,j,iblk) ! N/m^2 + workx = -strocnxT_iavg(i,j,iblk) ! N/m^2 + worky = -strocnyT_iavg(i,j,iblk) ! N/m^2 tauxo(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) - worky*sin(ANGLET(i,j,iblk)) tauyo(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) + workx*sin(ANGLET(i,j,iblk)) enddo @@ -1042,8 +1046,11 @@ subroutine ice_export( exportState, rc ) !--------------------------------- ! Zero out fields with tmask for proper coupler accumulation in ice free areas - call state_reset(exportState, c0, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (first_call .or. .not.single_column) then + call state_reset(exportState, c0, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + first_call = .false. + endif ! Create a temporary field allocate(tempfld(nx_block,ny_block,nblocks)) diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index 0b1b9349c..a9b19df6b 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -427,7 +427,7 @@ subroutine ice_mesh_create_scolumn(scol_lon, scol_lat, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Allocate module variable ocn_gridcell_frac - allocate(ocn_gridcell_frac(nx_block,ny_block,max_blocks)) + allocate(ocn_gridcell_frac(2,2,1)) ocn_gridcell_frac(:,:,:) = scol_frac end subroutine ice_mesh_create_scolumn @@ -560,7 +560,7 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) ! Check CICE mesh use ice_constants, only : c1,c0,c360 - use ice_grid , only : tlon, tlat + use ice_grid , only : tlon, tlat, hm ! input/output parameters type(ESMF_GridComp) , intent(inout) :: gcomp @@ -569,7 +569,8 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) ! local variables type(ESMF_DistGrid) :: distGrid - integer :: n,c,g,i,j,m ! indices + type(ESMF_Array) :: elemMaskArray + integer :: n,i,j ! indices integer :: iblk, jblk ! indices integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block @@ -578,11 +579,15 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) real(dbl_kind), pointer :: ownedElemCoords(:) real(dbl_kind), pointer :: lat(:), latMesh(:) real(dbl_kind), pointer :: lon(:), lonMesh(:) + integer , pointer :: model_mask(:) real(dbl_kind) :: diff_lon real(dbl_kind) :: diff_lat real(dbl_kind) :: rad_to_deg real(dbl_kind) :: tmplon, eps_imesh logical :: isPresent, isSet + logical :: mask_error + integer :: mask_internal + integer :: mask_file character(len=char_len_long) :: cvalue character(len=char_len_long) :: logmsg character(len=*), parameter :: subname = ' ice_mesh_check: ' @@ -606,7 +611,7 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) allocate(ownedElemCoords(spatialDim*numownedelements)) allocate(lonmesh(numOwnedElements)) allocate(latmesh(numOwnedElements)) - call ESMF_MeshGet(ice_mesh, ownedElemCoords=ownedElemCoords) + call ESMF_MeshGet(ice_mesh, ownedElemCoords=ownedElemCoords, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return do n = 1,numOwnedElements lonMesh(n) = ownedElemCoords(2*n-1) @@ -650,8 +655,45 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) enddo enddo -100 format('ERROR: CICE n, lonmesh, lon, diff_lon = ',i6,2(f21.13,3x),d21.5) -101 format('ERROR: CICE n, latmesh, lat, diff_lat = ',i6,2(f21.13,3x),d21.5) + ! obtain internally generated ice mask for error checks + allocate(model_mask(numOwnedElements)) + call ESMF_MeshGet(ice_mesh, elementdistGrid=distGrid, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elemMaskArray = ESMF_ArrayCreate(distGrid, model_mask, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshGet(ice_mesh, elemMaskArray=elemMaskArray, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mask_error = .false. + n=0 + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + do j = jlo, jhi + jlo = this_block%jlo + jhi = this_block%jhi + do i = ilo, ihi + ilo = this_block%ilo + ihi = this_block%ihi + n = n+1 + mask_internal = nint(hm(i,j,iblk),kind=dbl_kind) + mask_file = model_mask(n) + if (mask_internal /= mask_file) then + write(6,102) n,mask_internal,mask_file + mask_error = .true. + end if + enddo !i + enddo !j + enddo !iblk + if (mask_error) then + call abort_ice(error_message=subname, file=__FILE__, line=__LINE__) + end if + + call ESMF_ArrayDestroy(elemMaskArray, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + +100 format('ERROR: CICE n, mesh_lon , lon, diff_lon = ',i8,2(f21.13,3x),d21.5) +101 format('ERROR: CICE n, mesh_lat , lat, diff_lat = ',i8,2(f21.13,3x),d21.5) +102 format('ERROR: CICE n, mesh_internal, mask_file = ',i8,2(i2,2x)) ! deallocate memory deallocate(ownedElemCoords) diff --git a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 index 84973e9dd..06b090ece 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 @@ -472,11 +472,11 @@ subroutine ice_prescribed_phys() ! set non-computed fluxes, ice velocities, ice-ocn stresses to zero !-------------------------------------------------------------------- - frzmlt (:,:,:) = c0 - uvel (:,:,:) = c0 - vvel (:,:,:) = c0 - strocnxT (:,:,:) = c0 - strocnyT (:,:,:) = c0 + frzmlt (:,:,:) = c0 + uvel (:,:,:) = c0 + vvel (:,:,:) = c0 + strocnxT_iavg(:,:,:) = c0 + strocnyT_iavg(:,:,:) = c0 !----------------------------------------------------------------- ! other atm and ocn fluxes diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 78d462d4c..22596429d 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -271,17 +271,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -298,11 +291,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -322,11 +311,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -388,21 +373,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 6e799723e..77bb7738e 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -157,11 +157,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -184,11 +180,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -206,11 +198,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -412,9 +400,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 0ec1dea5a..c4c6bea55 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -1023,8 +1023,8 @@ module cice_cap dataPtr_vice (i1,j1,iblk) = vice(i,j,iblk) ! sea ice volume dataPtr_vsno (i1,j1,iblk) = vsno(i,j,iblk) ! snow volume dataPtr_fswthru (i1,j1,iblk) = fswthru(i,j,iblk) ! short wave penetration through ice - ui = strocnxT(i,j,iblk) - vj = strocnyT(i,j,iblk) + ui = strocnxT_iavg(i,j,iblk) + vj = strocnyT_iavg(i,j,iblk) angT = ANGLET(i,j,iblk) dataPtr_strocnxT(i1,j1,iblk) = ui*cos(-angT) + vj*sin(angT) ! ice ocean stress dataPtr_strocnyT(i1,j1,iblk) = -ui*sin(angT) + vj*cos(-angT) ! ice ocean stress diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 07a151a01..9c30b15a3 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -266,17 +266,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -293,11 +286,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -317,11 +306,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -383,21 +368,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 00c7921d1..ae7f7ab1f 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -151,11 +151,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -178,11 +174,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -200,11 +192,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -408,9 +396,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index ad355d783..84d1a3a60 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -241,17 +241,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -267,11 +260,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -290,11 +279,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -354,21 +339,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index ad355d783..84d1a3a60 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -241,17 +241,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -267,11 +260,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -290,11 +279,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -354,21 +339,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index aba435b0e..d9ea72d8c 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -58,9 +58,6 @@ program sumchk integer(int_kind),parameter :: ntests3 = 3 character(len=8) :: errorflag3(ntests3) character(len=32) :: stringflag3(ntests3) - integer(int_kind),parameter :: ntests4 = 1 - character(len=8) :: errorflag4(ntests4) - character(len=32) :: stringflag4(ntests4) integer(int_kind) :: npes, ierr, ntask @@ -100,7 +97,6 @@ program sumchk errorflag1 = passflag errorflag2 = passflag errorflag3 = passflag - errorflag4 = passflag npes = get_num_procs() if (my_task == master_task) then @@ -600,63 +596,6 @@ program sumchk endif enddo - ! --------------------------- - ! Test Vector Reductions - ! --------------------------- - - if (my_task == master_task) write(6,*) ' ' - - n = 1 ; stringflag4(n) = 'dble sum vector' - allocate(vec8(3)) - allocate(sum8(3)) - - minval = -5. - maxval = 8. - - vec8(1) = 1. - - ! fill one gridcell with a min and max value - ntask = max(npes-1,1)-1 - if (my_task == ntask) then - vec8(1) = minval - endif - ntask = min(npes,2)-1 - if (my_task == ntask) then - vec8(1) = maxval - endif - vec8(2) = 2. * vec8(1) - vec8(3) = 3. * vec8(1) - - ! compute correct results - if (npes == 1) then - minval = maxval - corval = maxval - else - corval = (npes - 2) * 1.0 + minval + maxval - endif - - do k = 1,ntests4 - string = stringflag4(k) - sum8 = -888e12 - if (k == 1) then - sum8 = global_allreduce_sum(vec8, distrb_info) - else - call abort_ice(subname//' illegal k vector',file=__FILE__,line=__LINE__) - endif - - if (my_task == master_task) then - write(6,'(1x,a,3g16.8)') string, sum8(1),sum8(2),sum8(3) - endif - - if (sum8(1) /= corval .or. sum8(2) /= 2.*corval .or. sum8(3) /= 3.*corval) then - errorflag4(k) = failflag - errorflag0 = failflag - if (my_task == master_task) then - write(6,*) '**** ERROR ', sum8(1),sum8(2),sum8(3),corval - endif - endif - enddo - ! --------------------------- if (my_task == master_task) then @@ -670,9 +609,6 @@ program sumchk do k = 1,ntests3 write(6,*) errorflag3(k),stringflag3(k) enddo - do k = 1,ntests4 - write(6,*) errorflag4(k),stringflag4(k) - enddo write(6,*) ' ' write(6,*) 'SUMCHK COMPLETED SUCCESSFULLY' if (errorflag0 == passflag) then diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index c9e8be8db..b4727d3fd 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -24,6 +24,7 @@ module ice_arrays_column public :: alloc_arrays_column ! icepack_atmo.F90 + ! Cdn variables on the T-grid real (kind=dbl_kind), public, & dimension (:,:,:), allocatable :: & Cdn_atm , & ! atm drag coefficient diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 5339aa6ec..06ab79cdb 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -44,11 +44,7 @@ module ice_init_column private public :: init_thermo_vertical, init_shortwave, & init_age, init_FY, init_lvl, init_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_bgc, init_hbrine, init_zbgc, input_zbgc, & count_tracers, init_isotope, init_snowtracers @@ -547,23 +543,6 @@ subroutine init_lvl(iblk, alvl, vlvl) end subroutine init_lvl -#ifdef UNDEPRECATE_CESMPONDS -!======================================================================= - -! Initialize melt ponds. - - subroutine init_meltponds_cesm(apnd, hpnd) - - real(kind=dbl_kind), dimension(:,:,:), intent(out) :: & - apnd , & ! melt pond area fraction - hpnd ! melt pond depth - character(len=*),parameter :: subname='(init_meltponds_cesm)' - - apnd(:,:,:) = c0 - hpnd(:,:,:) = c0 - - end subroutine init_meltponds_cesm -#endif !======================================================================= ! Initialize melt ponds. @@ -1818,11 +1797,7 @@ subroutine count_tracers integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_fsd logical (kind=log_kind) :: tr_snow -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_iso, tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_iso, tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, nt_aero integer (kind=int_kind) :: nt_fsd, nt_isosno, nt_isoice @@ -1907,11 +1882,7 @@ subroutine count_tracers call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_aero_out=tr_aero, tr_pond_out=tr_pond, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_fsd_out=tr_fsd, & tr_snow_out=tr_snow, tr_iso_out=tr_iso, & tr_bgc_Nit_out=tr_bgc_Nit, tr_bgc_Am_out =tr_bgc_Am, tr_bgc_Sil_out=tr_bgc_Sil, & diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index a15f9d2c1..86ff170c7 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -29,9 +29,6 @@ module ice_restart_column public :: write_restart_age, read_restart_age, & write_restart_FY, read_restart_FY, & write_restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_pond_cesm, read_restart_pond_cesm, & -#endif write_restart_pond_lvl, read_restart_pond_lvl, & write_restart_pond_topo, read_restart_pond_topo, & write_restart_snow, read_restart_snow, & @@ -45,9 +42,6 @@ module ice_restart_column restart_age , & ! if .true., read age tracer restart file restart_FY , & ! if .true., read FY tracer restart file restart_lvl , & ! if .true., read lvl tracer restart file -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, & ! if .true., read meltponds restart file -#endif restart_pond_lvl , & ! if .true., read meltponds restart file restart_pond_topo, & ! if .true., read meltponds restart file restart_snow , & ! if .true., read snow tracer restart file @@ -260,74 +254,6 @@ subroutine read_restart_lvl() end subroutine read_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS -!======================================================================= -! -! Dumps all values needed for restarting -! -! authors Elizabeth C. Hunke, LANL -! David A. Bailey, NCAR - - subroutine write_restart_pond_cesm() - - use ice_fileunits, only: nu_dump_pond - use ice_state, only: trcrn - - ! local variables - - logical (kind=log_kind) :: diag - integer (kind=int_kind) :: nt_apnd, nt_hpnd - character(len=*),parameter :: subname='(write_restart_pond_cesm)' - - call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - diag = .true. - - call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & - 'apnd',ncat,diag) - call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & - 'hpnd',ncat,diag) - - end subroutine write_restart_pond_cesm - -!======================================================================= - -! Reads all values needed for a meltpond volume restart -! -! authors Elizabeth C. Hunke, LANL -! David A. Bailey, NCAR - - subroutine read_restart_pond_cesm() - - use ice_fileunits, only: nu_restart_pond - use ice_state, only: trcrn - - ! local variables - - logical (kind=log_kind) :: & - diag - integer (kind=int_kind) :: nt_apnd, nt_hpnd - character(len=*),parameter :: subname='(read_restart_pond_cesm)' - - call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - diag = .true. - - if (my_task == master_task) write(nu_diag,*) subname,'min/max cesm ponds' - - call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & - 'apnd',ncat,diag,field_loc_center,field_type_scalar) - call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & - 'hpnd',ncat,diag,field_loc_center,field_type_scalar) - - end subroutine read_restart_pond_cesm -#endif !======================================================================= ! ! Dumps all values needed for restarting diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index fbe172f51..1cf23da45 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -33,6 +33,19 @@ cat >> ${jobfile} << EOFB #PBS -l walltime=${batchtime} EOFB +else if (${ICE_MACHINE} =~ gust*) then +cat >> ${jobfile} << EOFB +#PBS -q ${queue} +#PBS -l job_priority=regular +#PBS -N ${ICE_CASENAME} +#PBS -A ${acct} +#PBS -l select=${nnodes}:ncpus=${corespernode}:mpiprocs=${taskpernodelimit}:ompthreads=${nthrds} +#PBS -l walltime=${batchtime} +#PBS -j oe +#PBS -W umask=022 +#PBS -o ${ICE_CASEDIR} +EOFB + else if (${ICE_MACHINE} =~ hobart*) then cat >> ${jobfile} << EOFB #PBS -j oe @@ -90,8 +103,8 @@ else if (${ICE_MACHINE} =~ nrlssc*) then # nrlssc queue system has nodes with different task per node if (${taskpernode} <= 12) set tpnstr = 'twelve' if (${taskpernode} == 20) set tpnstr = 'twenty' -if (${taskpernode} == 24) set tpnstr = 'twentyfour' -if (${taskpernode} == 28) set tpnstr = 'twentyeight' +if (${taskpernode} >= 24) set tpnstr = 'twentyfour' +#if (${taskpernode} == 28) set tpnstr = 'twentyeight' cat >> ${jobfile} <> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -23,7 +23,19 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ hobart* || ${ICE_MACHINE} =~ izumi*) then +else if (${ICE_MACHCOMP} =~ gust*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpiexec --cpu-bind depth -n ${ntasks} -ppn ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + +#======= +else if (${ICE_MACHCOMP} =~ hobart* || ${ICE_MACHCOMP} =~ izumi*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -35,7 +47,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gaffney* || ${ICE_MACHINE} =~ koehr* || ${ICE_MACHINE} =~ mustang*) then +else if (${ICE_MACHCOMP} =~ gaffney* || ${ICE_MACHCOMP} =~ koehr* || ${ICE_MACHCOMP} =~ mustang*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -47,7 +59,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ nrlssc*) then +else if (${ICE_MACHCOMP} =~ nrlssc*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -59,13 +71,19 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ onyx* || ${ICE_MACHINE} =~ narwhal) then +else if (${ICE_MACHCOMP} =~ narwhal_*hpcx*) then cat >> ${jobfile} << EOFR -aprun -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +mpirun -np ${ntasks} -hostfile \$PBS_NODEFILE \${EXTRA_OMPI_SETTINGS} ./cice >&! \$ICE_RUNLOG_FILE +EOFR + +#======= +else if (${ICE_MACHCOMP} =~ onyx* || ${ICE_MACHCOMP} =~ narwhal*) then +cat >> ${jobfile} << EOFR +aprun -q -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ cori*) then +else if (${ICE_MACHCOMP} =~ cori*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -77,7 +95,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ compy*) then +else if (${ICE_MACHCOMP} =~ compy*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -89,7 +107,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ badger*) then +else if (${ICE_MACHCOMP} =~ badger*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -101,7 +119,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ fram*) then +else if (${ICE_MACHCOMP} =~ fram*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -113,7 +131,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ cesium*) then +else if (${ICE_MACHCOMP} =~ cesium*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -125,7 +143,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ millikan*) then +else if (${ICE_MACHCOMP} =~ millikan*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -137,7 +155,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ daley* || ${ICE_MACHINE} =~ banting*) then +else if (${ICE_MACHCOMP} =~ daley* || ${ICE_MACHCOMP} =~ banting*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -149,7 +167,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ ppp5* || ${ICE_MACHINE} =~ ppp6* || ${ICE_MACHINE} =~ robert* || ${ICE_MACHINE} =~ underhill*) then +else if (${ICE_MACHCOMP} =~ ppp5* || ${ICE_MACHCOMP} =~ ppp6* || ${ICE_MACHCOMP} =~ robert* || ${ICE_MACHCOMP} =~ underhill*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -161,7 +179,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ ppp3*) then +else if (${ICE_MACHCOMP} =~ ppp3*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -173,7 +191,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gpsc3*) then +else if (${ICE_MACHCOMP} =~ gpsc3*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -186,7 +204,7 @@ endif #======= -else if (${ICE_MACHINE} =~ freya*) then +else if (${ICE_MACHCOMP} =~ freya*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR aprun -n 1 -N 1 -d 1 ./cice >&! \$ICE_RUNLOG_FILE @@ -198,45 +216,45 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gaea*) then +else if (${ICE_MACHCOMP} =~ gaea*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ hera*) then +else if (${ICE_MACHCOMP} =~ hera*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ orion*) then +else if (${ICE_MACHCOMP} =~ orion*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ high_Sierra*) then +else if (${ICE_MACHCOMP} =~ high_Sierra*) then cat >> ${jobfile} << EOFR mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE #./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ phase3*) then +else if (${ICE_MACHCOMP} =~ phase3*) then cat >> ${jobfile} << EOFR mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE #./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ testmachine*) then +else if (${ICE_MACHCOMP} =~ testmachine*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ travisCI*) then +else if (${ICE_MACHCOMP} =~ travisCI*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -251,7 +269,7 @@ endif #EOFR #======= -else if (${ICE_MACHINE} =~ conda*) then +else if (${ICE_MACHCOMP} =~ conda*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -265,7 +283,7 @@ endif #======= else - echo "${0} ERROR ${ICE_MACHINE} unknown" + echo "${0} ERROR ${ICE_MACHCOMP} unknown" exit -1 endif #======= diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index ec582873a..8262f34ec 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -75,6 +75,11 @@ kcatbound = 0 dxrect = 30.e5 dyrect = 30.e5 + lonrefrect = -156.50d0 + latrefrect = 71.35d0 + scale_dxdy = .false. + dxscale = 1.d0 + dyscale = 1.d0 close_boundaries = .false. ncat = 5 nfsd = 1 @@ -162,7 +167,7 @@ kridge = 1 ktransport = 1 ssh_stress = 'geostrophic' - maxits_nonlin = 4 + maxits_nonlin = 10 precond = 'pgmres' dim_fgmres = 50 dim_pgmres = 5 @@ -173,7 +178,7 @@ monitor_pgmres = .false. ortho_type = 'mgs' reltol_nonlin = 1e-8 - reltol_fgmres = 1e-2 + reltol_fgmres = 1e-1 reltol_pgmres = 1e-6 algo_nonlin = 'picard' use_mean_vrel = .true. diff --git a/configuration/scripts/machines/Macros.gust_cray b/configuration/scripts/machines/Macros.gust_cray new file mode 100644 index 000000000..eadc14884 --- /dev/null +++ b/configuration/scripts/machines/Macros.gust_cray @@ -0,0 +1,71 @@ +#============================================================================== +# Makefile macros for NCAR cheyenne, intel compiler +#============================================================================== + +CPP := ftn -e P +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 + +FIXEDFLAGS := -132 +FREEFLAGS := +FFLAGS := -hbyteswapio +FFLAGS_NOOPT:= -O0 +LDFLAGS := -hbyteswapio + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -hfp0 -g -Rbcdps -Ktrap=fp +else + FFLAGS += -O2 -hfp0 +endif + +SCC := cc +SFC := ftn +MPICC := cc +MPIFC := ftn + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +NETCDF_PATH := $(NETCDF) + +PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/u/apps/ch/opt/pio/2.2/mpt/2.15f/intel/17.0.1/lib + +INCLDIR := $(INCLDIR) + +LIB_NETCDF := $(NETCDF)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) + +#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff -L$(LIB_PNETCDF) -lpnetcdf -lgptl +SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -fopenmp + CFLAGS += -fopenmp + FFLAGS += -fopenmp +else + LDFLAGS += -hnoomp +# CFLAGS += -hnoomp + FFLAGS += -hnoomp +endif + +#ifeq ($(ICE_IOTYPE), pio1) +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpio +#endif + +ifeq ($(ICE_IOTYPE), pio2) + CPPDEFS := $(CPPDEFS) -DGPTL +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpiof -lpioc -lgptl +endif + diff --git a/configuration/scripts/machines/Macros.gust_intel b/configuration/scripts/machines/Macros.gust_intel new file mode 100644 index 000000000..18d5d10d0 --- /dev/null +++ b/configuration/scripts/machines/Macros.gust_intel @@ -0,0 +1,68 @@ +#============================================================================== +# Makefile macros for NCAR cheyenne, intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback +FFLAGS_NOOPT:= -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -link_mpi=dbg +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -link_mpi=dbg -stand f08 +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays -link_mpi=dbg +else + FFLAGS += -O2 +endif + +SCC := cc +SFC := ftn +MPICC := cc +MPIFC := ftn + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +NETCDF_PATH := $(NETCDF) + +PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/u/apps/ch/opt/pio/2.2/mpt/2.15f/intel/17.0.1/lib + +INCLDIR := $(INCLDIR) + +LIB_NETCDF := $(NETCDF)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) + +#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff -L$(LIB_PNETCDF) -lpnetcdf -lgptl +SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + +#ifeq ($(ICE_IOTYPE), pio1) +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpio +#endif + +ifeq ($(ICE_IOTYPE), pio2) + CPPDEFS := $(CPPDEFS) -DGPTL +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpiof -lpioc -lgptl +endif + diff --git a/configuration/scripts/machines/Macros.narwhal_aocc b/configuration/scripts/machines/Macros.narwhal_aocc index 95f301e85..b22aeda64 100644 --- a/configuration/scripts/machines/Macros.narwhal_aocc +++ b/configuration/scripts/machines/Macros.narwhal_aocc @@ -49,7 +49,7 @@ INCLDIR := $(INCLDIR) ifeq ($(ICE_THREADED), true) LDFLAGS += -mp - CFLAGS += -mp +# CFLAGS += -mp FFLAGS += -mp else LDFLAGS += -nomp diff --git a/configuration/scripts/machines/Macros.narwhal_intelhpcx b/configuration/scripts/machines/Macros.narwhal_intelhpcx new file mode 100644 index 000000000..0e2a94b20 --- /dev/null +++ b/configuration/scripts/machines/Macros.narwhal_intelhpcx @@ -0,0 +1,57 @@ +#============================================================================== +# Macros file for NAVYDSRC narwhal, intel compiler with hpcx hcoll openmpi +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise -fcommon + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback +# -mcmodel medium -shared-intel +FFLAGS_NOOPT:= -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays +else + FFLAGS += -O2 +endif + +SCC := cc +SFC := ftn +MPICC := mpicc +MPIFC := mpif90 + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +# defined by module +NETCDF_PATH := $(NETCDF_DIR) +#PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default +#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib + +#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +INCLDIR := $(INCLDIR) +INCLDIR += -I$(NETCDF_PATH)/include + +LIB_NETCDF := $(NETCDF_PATH)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) +SLIBS := $(HCOLL_LIBS) -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/env.badger_intel b/configuration/scripts/machines/env.badger_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.banting_gnu b/configuration/scripts/machines/env.banting_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.banting_intel b/configuration/scripts/machines/env.banting_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.compy_intel b/configuration/scripts/machines/env.compy_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.conda_linux b/configuration/scripts/machines/env.conda_linux old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.conda_macos b/configuration/scripts/machines/env.conda_macos old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.daley_gnu b/configuration/scripts/machines/env.daley_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.daley_intel b/configuration/scripts/machines/env.daley_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.fram_intel b/configuration/scripts/machines/env.fram_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.freya_gnu b/configuration/scripts/machines/env.freya_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.freya_intel b/configuration/scripts/machines/env.freya_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaea_intel b/configuration/scripts/machines/env.gaea_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaffney_gnu b/configuration/scripts/machines/env.gaffney_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaffney_intel b/configuration/scripts/machines/env.gaffney_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gust_cray b/configuration/scripts/machines/env.gust_cray new file mode 100644 index 000000000..8246cd9b9 --- /dev/null +++ b/configuration/scripts/machines/env.gust_cray @@ -0,0 +1,72 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module --force purge +module load ncarenv/22.10 +module load craype/2.7.17 +module load cce/14.0.3 +module load ncarcompilers/0.7.1 +module load cray-mpich/8.1.19 +module load hdf5/1.12.2 +module load netcdf/4.8.1 +module load cray-libsci/22.08.1.1 + +if ($?ICE_IOTYPE) then +if ($ICE_IOTYPE =~ pio*) then + module unload netcdf + module load netcdf-mpi/4.8.1 + module load pnetcdf/1.12.2 + if ($ICE_IOTYPE == "pio1") then + module load pio/1.10.1 + else + module load pio/2.5.9 + endif +endif +endif + +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest +endif +endif + +# For perftools with mpiexec +# module load perftools-base +# module load perftools +#setenv PALS_TRANSFER FALSE + +endif + +limit coredumpsize unlimited +limit stacksize unlimited +setenv PALS_QUIET TRUE + +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE + +setenv ICE_MACHINE_MACHNAME gust +setenv ICE_MACHINE_MACHINFO "Cray XE Milan Slingshot 11" +setenv ICE_MACHINE_ENVNAME crayg +setenv ICE_MACHINE_ENVINFO "cce 14.0.3, cray-mpich 2.19, netcdf4.8.1, pnetcdf1.12.2, pio2.5.9" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /glade/gust/scratch/$user/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /glade/work/tcraig/cice-consortium-data +setenv ICE_MACHINE_BASELINE /glade/gust/scratch/$user/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub" +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "main" +setenv ICE_MACHINE_TPNODE 128 +setenv ICE_MACHINE_BLDTHRDS 8 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.gust_intel b/configuration/scripts/machines/env.gust_intel new file mode 100644 index 000000000..08a99b940 --- /dev/null +++ b/configuration/scripts/machines/env.gust_intel @@ -0,0 +1,72 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module --force purge +module load ncarenv/22.10 +module load craype/2.7.17 +module load intel/2021.6.0 +module load ncarcompilers/0.7.1 +module load cray-mpich/8.1.19 +module load hdf5/1.12.2 +module load netcdf/4.8.1 +module load cray-libsci/22.08.1.1 + +if ($?ICE_IOTYPE) then +if ($ICE_IOTYPE =~ pio*) then + module unload netcdf + module load netcdf-mpi/4.8.1 + module load pnetcdf/1.12.2 + if ($ICE_IOTYPE == "pio1") then + module load pio/1.10.1 + else + module load pio/2.5.9 + endif +endif +endif + +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest +endif +endif + +# For perftools with mpiexec +# module load perftools-base +# module load perftools +#setenv PALS_TRANSFER FALSE + +endif + +limit coredumpsize unlimited +limit stacksize unlimited +setenv PALS_QUIET TRUE + +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE + +setenv ICE_MACHINE_MACHNAME gust +setenv ICE_MACHINE_MACHINFO "Cray XE Milan Slingshot 11" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "ifort 2021.6.0 20220226, cray-mpich 2.19, netcdf4.8.1, pnetcdf1.12.2, pio2.5.9" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /glade/gust/scratch/$user/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /glade/work/tcraig/cice-consortium-data +setenv ICE_MACHINE_BASELINE /glade/gust/scratch/$user/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub" +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "main" +setenv ICE_MACHINE_TPNODE 128 +setenv ICE_MACHINE_BLDTHRDS 8 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.high_Sierra_gnu b/configuration/scripts/machines/env.high_Sierra_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.hobart_intel b/configuration/scripts/machines/env.hobart_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.hobart_nag b/configuration/scripts/machines/env.hobart_nag old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_gnu b/configuration/scripts/machines/env.izumi_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_intel b/configuration/scripts/machines/env.izumi_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_nag b/configuration/scripts/machines/env.izumi_nag old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_pgi b/configuration/scripts/machines/env.izumi_pgi old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.koehr_intel b/configuration/scripts/machines/env.koehr_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.mustang_intel18 b/configuration/scripts/machines/env.mustang_intel18 old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.mustang_intel19 b/configuration/scripts/machines/env.mustang_intel19 old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.mustang_intel20 b/configuration/scripts/machines/env.mustang_intel20 old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc old mode 100755 new mode 100644 index 4016a1d7d..875296520 --- a/configuration/scripts/machines/env.narwhal_aocc +++ b/configuration/scripts/machines/env.narwhal_aocc @@ -14,21 +14,18 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-aocc/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload aocc -module load aocc/2.2.0.1 -module unload cray-mpich -module load cray-mpich/8.1.5 - +module load PrgEnv-aocc +module load cpe/22.03 +module load aocc/3.0.0 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -41,7 +38,7 @@ endif setenv ICE_MACHINE_MACHNAME narwhal setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.5, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_cray b/configuration/scripts/machines/env.narwhal_cray old mode 100755 new mode 100644 index d0fcc9ba7..a9e5bd14a --- a/configuration/scripts/machines/env.narwhal_cray +++ b/configuration/scripts/machines/env.narwhal_cray @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-cray/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload cce -module load cce/12.0.3 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-cray +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME cray -setenv ICE_MACHINE_ENVINFO "cce 12.0.3, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "cce 13.0.2, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_gnu b/configuration/scripts/machines/env.narwhal_gnu old mode 100755 new mode 100644 index 51a272f4e..701920161 --- a/configuration/scripts/machines/env.narwhal_gnu +++ b/configuration/scripts/machines/env.narwhal_gnu @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-gnu/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload gcc -module load gcc/11.2.0 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-gnu +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "gnu fortran/c 11.2.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" +setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu c/fortran 11.2.0 20210728, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_intel b/configuration/scripts/machines/env.narwhal_intel old mode 100755 new mode 100644 index f79d962ff..4cc60acac --- a/configuration/scripts/machines/env.narwhal_intel +++ b/configuration/scripts/machines/env.narwhal_intel @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-intel/8.0.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload intel -module load intel/2021.1 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-intel +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 2021.1 Beta 20201112, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "ifort 2021.4.0 20210910, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_intelhpcx b/configuration/scripts/machines/env.narwhal_intelhpcx new file mode 100644 index 000000000..5b7a33ece --- /dev/null +++ b/configuration/scripts/machines/env.narwhal_intelhpcx @@ -0,0 +1,52 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module unload hpcx-ompi-intel +module unload libfabric +module unload PrgEnv-aocc +module unload PrgEnv-cray +module unload PrgEnv-gnu +module unload PrgEnv-intel +module unload PrgEnv-nvidia +module unload craype-network-ofi +module unload craype-network-ucx +module load hpcx-ompi-intel +module load libfabric + +module unload cray-hdf5 +module unload cray-hdf5-parallel +module unload cray-netcdf-hdf5parallel +module unload cray-parallel-netcdf +module unload netcdf +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 + +limit coredumpsize unlimited +limit stacksize unlimited +setenv OMP_STACKSIZE 128M +setenv OMP_WAIT_POLICY PASSIVE + +endif + +setenv ICE_MACHINE_MACHNAME narwhal +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "ifort 2021.4.0 20210910, hpcx2.9.0 openmpi, netcdf/4.8.1.1" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium +setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub " +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "debug" +setenv ICE_MACHINE_TPNODE 128 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 12 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.nrlssc_gnu b/configuration/scripts/machines/env.nrlssc_gnu old mode 100755 new mode 100644 index f050a61b8..1f8dd4441 --- a/configuration/scripts/machines/env.nrlssc_gnu +++ b/configuration/scripts/machines/env.nrlssc_gnu @@ -1,7 +1,7 @@ #!/bin/csh -f -setenv ICE_MACHINE_MACHNAME NRLSSC -setenv ICE_MACHINE_MACHINFO "NRLSSC" +setenv ICE_MACHINE_MACHNAME nrlssc +setenv ICE_MACHINE_MACHINFO "nrlssc" setenv ICE_MACHINE_ENVNAME gnu setenv ICE_MACHINE_ENVINFO "gnu" setenv ICE_MACHINE_MAKE gmake diff --git a/configuration/scripts/machines/env.onyx_cray b/configuration/scripts/machines/env.onyx_cray old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_gnu b/configuration/scripts/machines/env.onyx_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_intel b/configuration/scripts/machines/env.onyx_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.orion_intel b/configuration/scripts/machines/env.orion_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.phase3_intel b/configuration/scripts/machines/env.phase3_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.ppp5_intel b/configuration/scripts/machines/env.ppp5_intel index c4987124a..c6e272692 100644 --- a/configuration/scripts/machines/env.ppp5_intel +++ b/configuration/scripts/machines/env.ppp5_intel @@ -18,6 +18,12 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 @@ -32,6 +38,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/ppp5/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall5/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/ppp5/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_gnu b/configuration/scripts/machines/env.ppp6_gnu index 69ed6ff8b..94f478774 100644 --- a/configuration/scripts/machines/env.ppp6_gnu +++ b/configuration/scripts/machines/env.ppp6_gnu @@ -8,7 +8,7 @@ endif if ("$inp" != "-nomodules") then # OpenMPI -source /usr/mpi/gcc/openmpi-4.1.2a1/bin/mpivars.csh +setenv PATH "/home/phb001/.local_rhel-8-icelake-64_gcc/bin:$PATH" # OpenMP setenv OMP_STACKSIZE 64M @@ -21,6 +21,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_gnu-impi b/configuration/scripts/machines/env.ppp6_gnu-impi index 461e09a43..61cd46ed9 100644 --- a/configuration/scripts/machines/env.ppp6_gnu-impi +++ b/configuration/scripts/machines/env.ppp6_gnu-impi @@ -18,6 +18,12 @@ setenv I_MPI_F90 gfortran setenv I_MPI_FC gfortran setenv I_MPI_CC gcc setenv I_MPI_CXX g++ +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # OpenMP setenv OMP_STACKSIZE 64M @@ -30,6 +36,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_intel b/configuration/scripts/machines/env.ppp6_intel index ef9396575..faecb9b5a 100644 --- a/configuration/scripts/machines/env.ppp6_intel +++ b/configuration/scripts/machines/env.ppp6_intel @@ -18,6 +18,12 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 @@ -32,6 +38,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/ppp6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/ppp6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_intel19 b/configuration/scripts/machines/env.ppp6_intel19 index 6cdd9a036..554b31e42 100644 --- a/configuration/scripts/machines/env.ppp6_intel19 +++ b/configuration/scripts/machines/env.ppp6_intel19 @@ -17,6 +17,12 @@ setenv FOR_DUMP_CORE_FILE 1 source $ssmuse -d /fs/ssm/hpco/exp/intelpsxe-impi-19.0.3.199 setenv FI_PROVIDER verbs setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d hpco/exp/hdf5-netcdf4/serial/static/intel-19.0.3.199/02 @@ -31,6 +37,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT P0000000 diff --git a/configuration/scripts/machines/env.robert_intel b/configuration/scripts/machines/env.robert_intel index d3d9c1eae..592d765fe 100644 --- a/configuration/scripts/machines/env.robert_intel +++ b/configuration/scripts/machines/env.robert_intel @@ -18,6 +18,8 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 diff --git a/configuration/scripts/machines/env.testmachine_intel b/configuration/scripts/machines/env.testmachine_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.travisCI_gnu b/configuration/scripts/machines/env.travisCI_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.underhill_intel b/configuration/scripts/machines/env.underhill_intel index bc3eec857..dee0ab92b 100644 --- a/configuration/scripts/machines/env.underhill_intel +++ b/configuration/scripts/machines/env.underhill_intel @@ -18,6 +18,8 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 diff --git a/configuration/scripts/options/set_nml.bgczm b/configuration/scripts/options/set_nml.bgczm new file mode 100644 index 000000000..2644576cd --- /dev/null +++ b/configuration/scripts/options/set_nml.bgczm @@ -0,0 +1,30 @@ +nilyr = 7 +nslyr = 1 +ncat = 5 +nblyr = 7 +n_aero = 0 +n_zaero = 3 +n_algae = 3 +n_doc = 2 +n_dic = 0 +n_don = 1 +n_fed = 1 +n_fep = 1 +ice_ic = 'none' +tr_brine = .true. +tr_zaero = .true. +z_tracers = .true. +solve_zbgc = .true. +scale_bgc = .true. +bgc_data_type = 'default' +tr_bgc_Nit = .true. +tr_bgc_C = .true. +tr_bgc_Am = .true. +tr_bgc_Sil = .true. +tr_bgc_DMS = .true. +tr_bgc_PON = .true. +tr_bgc_hum = .true. +tr_bgc_DON = .true. +tr_bgc_Fe = .true. +modal_aero = .true. +# dEdd_algae = .true. diff --git a/configuration/scripts/options/set_nml.boxsymw b/configuration/scripts/options/set_nml.boxclosed similarity index 89% rename from configuration/scripts/options/set_nml.boxsymw rename to configuration/scripts/options/set_nml.boxclosed index 4be1f5f95..d55faa302 100644 --- a/configuration/scripts/options/set_nml.boxsymw +++ b/configuration/scripts/options/set_nml.boxclosed @@ -23,11 +23,6 @@ kridge = -1 ktransport = -1 coriolis = 'zero' atmbndy = 'constant' -atm_data_type = 'uniform_west' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxforcee b/configuration/scripts/options/set_nml.boxforcee new file mode 100644 index 000000000..357fa69d5 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxforcee @@ -0,0 +1,5 @@ +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' diff --git a/configuration/scripts/options/set_nml.boxforcen b/configuration/scripts/options/set_nml.boxforcen new file mode 100644 index 000000000..87cbaea40 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxforcen @@ -0,0 +1,5 @@ +atm_data_type = 'uniform_north' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' diff --git a/configuration/scripts/options/set_nml.boxforcene b/configuration/scripts/options/set_nml.boxforcene new file mode 100644 index 000000000..396fabde0 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxforcene @@ -0,0 +1,5 @@ +atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn deleted file mode 100644 index dd386ce5a..000000000 --- a/configuration/scripts/options/set_nml.boxislandsn +++ /dev/null @@ -1,46 +0,0 @@ -npt = 48 -kmt_type = 'boxislands' -ice_ic = 'internal' -use_leap_years = .false. -histfreq = 'd','x','x','x','x' -grid_type = 'rectangular' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'open' -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'constant' -atmbndy = 'constant' -atm_data_type = 'uniform_north' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -rotate_wind = .false. -calc_strair = .false. -restore_ice = .false. -tr_iage = .false. -tr_FY = .false. -tr_pond_lvl = .false. -f_aice = 'd' -f_hi = 'd' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd' -f_vvel = 'd' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd' -f_strairy = 'd' -f_strocnx = 'd' -f_strocny = 'd' -f_divu = 'd' -f_sig1 = 'd' -f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne deleted file mode 100644 index 75db55722..000000000 --- a/configuration/scripts/options/set_nml.boxislandsne +++ /dev/null @@ -1,46 +0,0 @@ -npt = 48 -kmt_type = 'boxislands' -ice_ic = 'internal' -use_leap_years = .false. -histfreq = 'd','x','x','x','x' -grid_type = 'rectangular' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'open' -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'constant' -atmbndy = 'constant' -atm_data_type = 'uniform_northeast' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -rotate_wind = .false. -calc_strair = .false. -restore_ice = .false. -tr_iage = .false. -tr_FY = .false. -tr_pond_lvl = .false. -f_aice = 'd' -f_hi = 'd' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd' -f_vvel = 'd' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd' -f_strairy = 'd' -f_strocnx = 'd' -f_strocny = 'd' -f_divu = 'd' -f_sig1 = 'd' -f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandse b/configuration/scripts/options/set_nml.boxopen similarity index 84% rename from configuration/scripts/options/set_nml.boxislandse rename to configuration/scripts/options/set_nml.boxopen index 4a8a47705..bdc832f7d 100644 --- a/configuration/scripts/options/set_nml.boxislandse +++ b/configuration/scripts/options/set_nml.boxopen @@ -1,5 +1,4 @@ npt = 48 -kmt_type = 'boxislands' ice_ic = 'internal' use_leap_years = .false. histfreq = 'd','x','x','x','x' @@ -16,11 +15,6 @@ kridge = -1 ktransport = -1 coriolis = 'constant' atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' rotate_wind = .false. calc_strair = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme deleted file mode 100644 index 3ff31d2c4..000000000 --- a/configuration/scripts/options/set_nml.boxsyme +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn deleted file mode 100644 index 90ef74813..000000000 --- a/configuration/scripts/options/set_nml.boxsymn +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_north' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne deleted file mode 100644 index 5c7374976..000000000 --- a/configuration/scripts/options/set_nml.boxsymne +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'constant' -atmbndy = 'constant' -atm_data_type = 'uniform_northeast' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsyms b/configuration/scripts/options/set_nml.boxsyms deleted file mode 100644 index 7fc0fc5a0..000000000 --- a/configuration/scripts/options/set_nml.boxsyms +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_south' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 index 96701522f..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox12 +++ b/configuration/scripts/options/set_nml.gbox12 @@ -2,7 +2,7 @@ ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' -ocn_data_type = 'calm' +ocn_data_type = 'box2001' ice_data_type = 'box2001' ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 96701522f..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -2,7 +2,7 @@ ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' -ocn_data_type = 'calm' +ocn_data_type = 'box2001' ice_data_type = 'box2001' ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index 96701522f..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -2,7 +2,7 @@ ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' -ocn_data_type = 'calm' +ocn_data_type = 'box2001' ice_data_type = 'box2001' ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.modal b/configuration/scripts/options/set_nml.modal new file mode 100644 index 000000000..767160a53 --- /dev/null +++ b/configuration/scripts/options/set_nml.modal @@ -0,0 +1,4 @@ +shortwave = 'dEdd' +tr_aero = .true. +modal_aero = .true. + diff --git a/configuration/scripts/options/set_nml.scale1 b/configuration/scripts/options/set_nml.scale1 new file mode 100644 index 000000000..af75bc151 --- /dev/null +++ b/configuration/scripts/options/set_nml.scale1 @@ -0,0 +1,2 @@ +dxscale = 1.d0 +dyscale = 1.d0 diff --git a/configuration/scripts/options/set_nml.vargrid b/configuration/scripts/options/set_nml.vargrid new file mode 100644 index 000000000..29dfcc8bb --- /dev/null +++ b/configuration/scripts/options/set_nml.vargrid @@ -0,0 +1,13 @@ +scale_dxdy = .true. +dxscale = 1.02d0 +dyscale = 1.01d0 +f_dxt = .true. +f_dyt = .true. +f_dxu = .true. +f_dyu = .true. +f_dxe = .true. +f_dye = .true. +f_dxn = .true. +f_dyn = .true. +f_HTN = .true. +f_HTE = .true. diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index d4bbe8031..3007380ab 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -43,8 +43,9 @@ smoke gbox128 4x4 boxrestore,short,debug restart gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl smoke gbox12 1x1x12x12x1 boxchan,diag1,debug +restart gx3 8x2 modal smoke gx3 8x2 bgcz -smoke gx3 8x2 bgcz,debug +smoke gx3 8x2 bgczm,debug smoke gx3 8x1 bgcskl,debug #smoke gx3 4x1 bgcz,thread smoke_gx3_8x2_bgcz restart gx1 4x2 bgcsklclim,medium diff --git a/configuration/scripts/tests/baseline.script b/configuration/scripts/tests/baseline.script index bb8f50a1f..2700fe71f 100644 --- a/configuration/scripts/tests/baseline.script +++ b/configuration/scripts/tests/baseline.script @@ -29,7 +29,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then set base_file = `ls -1t ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME}/cice.runlog* | head -1` echo "" - echo "bfb Log Compare Mode:" + echo "Regression Log Compare Mode:" echo "base_file: ${base_file}" echo "test_file: ${test_file}" @@ -58,7 +58,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then endif echo "" - echo "Regression Compare Mode:" + echo "Regression Restart Compare Mode:" echo "base_dir: ${base_dir}" echo "test_dir: ${test_dir}" @@ -71,7 +71,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then set base_file = `ls -1t ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME}/cice.runlog* | head -1` echo "" - echo "bfb Log Compare Mode:" + echo "Regression Log Compare Mode:" echo "base_file: ${base_file}" echo "test_file: ${test_file}" diff --git a/configuration/scripts/tests/cice.results.csh b/configuration/scripts/tests/cice.results.csh index 13580c0be..4074115cb 100644 --- a/configuration/scripts/tests/cice.results.csh +++ b/configuration/scripts/tests/cice.results.csh @@ -5,12 +5,13 @@ cat ./results.log | grep -iv "#machinfo" | grep -iv "#envinfo" set pends = `cat ./results.log | grep PEND | wc -l` set misses = `cat ./results.log | grep MISS | wc -l` set failures = `cat ./results.log | grep FAIL | wc -l` -set failbuild = `cat ./results.log | grep FAIL | grep " build " | wc -l` -set failrun = `cat ./results.log | grep FAIL | grep " run " | wc -l` -set failtest = `cat ./results.log | grep FAIL | grep " test " | wc -l` -set failcomp = `cat ./results.log | grep FAIL | grep " compare " | wc -l` -set failbfbc = `cat ./results.log | grep FAIL | grep " bfbcomp " | wc -l` -set failgen = `cat ./results.log | grep FAIL | grep " generate " | wc -l` +set failbuild = `cat ./results.log | grep FAIL | grep " build" | wc -l` +set failrun = `cat ./results.log | grep FAIL | grep " run" | wc -l` +set failtest = `cat ./results.log | grep FAIL | grep " test" | wc -l` +set failcomp = `cat ./results.log | grep FAIL | grep " compare" | wc -l` +set failclog = `cat ./results.log | grep FAIL | grep " complog" | wc -l` +set failbfbc = `cat ./results.log | grep FAIL | grep " bfbcomp" | wc -l` +set failgen = `cat ./results.log | grep FAIL | grep " generate" | wc -l` set success = `cat ./results.log | grep 'PASS\|COPY' | wc -l` set comments = `cat ./results.log | grep "#" | wc -l` set alltotal = `cat ./results.log | wc -l` @@ -29,9 +30,13 @@ echo " #failbuild = $failbuild" >> results.log echo " #failrun = $failrun" >> results.log echo " #failtest = $failtest" >> results.log echo " #failcomp = $failcomp" >> results.log +echo " #failclog = $failclog" >> results.log echo " #failbfbc = $failbfbc" >> results.log echo " #failgen = $failgen" >> results.log +set stamp = `date '+%y%m%d-%H%M%S'` +cp results.log results.$stamp.log + echo "" echo "Descriptors:" echo " PASS - successful completion" @@ -49,6 +54,7 @@ echo "$failures of $chkcnt tests FAILED" #echo " $failrun of $failures FAILED run" #echo " $failtest of $failures FAILED test" #echo " $failcomp of $failures FAILED compare" +#echo " $failclog of $failures FAILED compare" #echo " $failbfbc of $failures FAILED bfbcomp" #echo " $failgen of $failures FAILED generate" exit $failures diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index 10d8ae36c..d9752073f 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -8,15 +8,10 @@ smoke gbox80 1x1 box2001 smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn -#smoke gbox80 2x2 boxsymn,run1day -smoke gbox80 4x2 boxsyme,run1day -#smoke gbox80 4x1 boxsymne,run1day -#smoke gbox80 2x2 boxsymn,run1day,kmtislands -smoke gbox80 4x1 boxsyme,run1day,kmtislands -#smoke gbox80 4x2 boxsymne,run1day,kmtislands -#smoke gbox80 8x1 boxislandsn,run1day -smoke gbox80 4x2 boxislandse,run1day -#smoke gbox80 2x4 boxislandsne,run1day +smoke gbox80 4x2 boxclosed,boxforcee,run1day +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day +smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day @@ -35,15 +30,10 @@ smoke gbox80 1x1 box2001,gridcd smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd -#smoke gbox80 2x2 boxsymn,run1day,gridcd -smoke gbox80 4x2 boxsyme,run1day,gridcd -#smoke gbox80 4x1 boxsymne,run1day,gridcd -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridcd -smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridcd -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridcd -#smoke gbox80 8x1 boxislandsn,run1day,gridcd -smoke gbox80 4x2 boxislandse,run1day,gridcd -#smoke gbox80 2x4 boxislandsne,run1day,gridcd +smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd +smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridcd smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day @@ -62,15 +52,10 @@ smoke gbox80 1x1 box2001,gridc smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc -#smoke gbox80 2x2 boxsymn,run1day,gridc -smoke gbox80 4x2 boxsyme,run1day,gridc -#smoke gbox80 4x1 boxsymne,run1day,gridc -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridc -smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridc -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridc -#smoke gbox80 8x1 boxislandsn,run1day,gridc -smoke gbox80 4x2 boxislandse,run1day,gridc -#smoke gbox80 2x4 boxislandsne,run1day,gridc +smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc +smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridc smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day @@ -78,4 +63,4 @@ smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke smoke gx1 32x1x16x16x32 reprosum,run10day,gridc smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day +smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/io_suite.ts b/configuration/scripts/tests/io_suite.ts index 01ea21ec2..84d064f32 100644 --- a/configuration/scripts/tests/io_suite.ts +++ b/configuration/scripts/tests/io_suite.ts @@ -23,7 +23,7 @@ restart gx3 8x4 alt04,histall,ionetcdf,cdf64 restart gx3 8x4 alt05,histall,ionetcdf,precision8,cdf64 restart gx3 16x2 alt06,histall,ionetcdf restart gx3 16x2 alt07,histall,ionetcdf -restart gx3 30x1 bgcz,histall,ionetcdf +restart gx3 30x1 bgczm,histall,ionetcdf restart gx3 15x2 bgcskl,histall,ionetcdf,precision8 restart gx3 31x1 isotope,histall,ionetcdf,cdf64 restart gx3 14x2 fsd12,histall,ionetcdf,precision8 @@ -37,7 +37,7 @@ restart gx3 8x4 alt04,histall,iopio1,precision8,cdf64 restart gx3 8x4 alt05,histall,iopio1,cdf64 restart gx3 32x1 alt06,histall,iopio1,precision8 restart gx3 32x1 alt07,histall,iopio1,precision8 -restart gx3 16x2 bgcz,histall,iopio1,precision8 +restart gx3 16x2 bgczm,histall,iopio1,precision8 restart gx3 30x1 bgcskl,histall,iopio1 restart gx3 8x4 isotope,histall,iopio1,precision8,cdf64 restart gx3 12x2 fsd12,histall,iopio1,cdf64 @@ -51,7 +51,7 @@ restart gx3 8x4 alt04,histall,iopio2 restart gx3 8x4 alt05,histall,iopio2,precision8,cdf64 restart gx3 16x2 alt06,histall,iopio2,cdf64 restart gx3 16x2 alt07,histall,iopio2,cdf64 -restart gx3 16x2 bgcz,histall,iopio2,cdf64 +restart gx3 16x2 bgczm,histall,iopio2,cdf64 restart gx3 30x1 bgcskl,histall,iopio2,precision8 restart gx3 8x4 isotope,histall,iopio2 restart gx3 12x2 fsd12,histall,iopio2,precision8,cdf64 @@ -65,7 +65,7 @@ restart gx3 8x4 alt04,histall,iopio1p,precision8 restart gx3 8x4 alt05,histall,iopio1p restart gx3 6x4 alt06,histall,iopio1p,precision8,cdf64 restart gx3 6x4 alt07,histall,iopio1p,precision8,cdf64 -restart gx3 16x2 bgcz,histall,iopio1p,precision8,cdf64 +restart gx3 16x2 bgczm,histall,iopio1p,precision8,cdf64 restart gx3 30x1 bgcskl,histall,iopio1p,cdf64 restart gx3 8x4 isotope,histall,iopio1p,precision8 restart gx3 12x2 fsd12,histall,iopio1p @@ -79,7 +79,7 @@ restart gx3 8x4 alt04,histall,iopio2p,cdf64 restart gx3 8x4 alt05,histall,iopio2p,precision8 restart gx3 24x1 alt06,histall,iopio2p restart gx3 24x1 alt07,histall,iopio2p -restart gx3 16x2 bgcz,histall,iopio2p +restart gx3 16x2 bgczm,histall,iopio2p restart gx3 30x1 bgcskl,histall,iopio2p,precision8,cdf64 restart gx3 8x4 isotope,histall,iopio2p,cdf64 restart gx3 12x2 fsd12,histall,iopio2p,precision8 diff --git a/configuration/scripts/tests/nothread_suite.ts b/configuration/scripts/tests/nothread_suite.ts index 93839b000..a262ec135 100644 --- a/configuration/scripts/tests/nothread_suite.ts +++ b/configuration/scripts/tests/nothread_suite.ts @@ -54,7 +54,7 @@ restart gx3 12x1 short #restart gx1 24x1 short smoke gx3 16x1 bgcz -smoke gx3 16x1 bgcz,debug +smoke gx3 16x1 bgczm,debug smoke gx3 24x1 bgcskl,debug #tcraig, hangs nodes intermittently on izumi #restart gx1 128x1 bgcsklclim,medium diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 5d5e18376..686fa72db 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -8,7 +8,7 @@ smoke gx3 4x4 alt04,reprosum,run10day smoke gx3 4x4 alt05,reprosum,run10day smoke gx3 8x2 alt06,reprosum,run10day smoke gx3 7x2 alt07,reprosum,run10day -smoke gx3 8x2 bgcz,reprosum,run10day +smoke gx3 8x2 bgczm,reprosum,run10day smoke gx1 15x2 reprosum,run10day smoke gx1 15x2 seabedprob,reprosum,run10day smoke gx3 14x2 fsd12,reprosum,run10day @@ -34,7 +34,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread smoke_gx3_7x2_alt07_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgczm_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day @@ -62,7 +62,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridc smoke gx3 4x4 alt05,reprosum,run10day,gridc smoke gx3 8x2 alt06,reprosum,run10day,gridc smoke gx3 7x2 alt07,reprosum,run10day,gridc -smoke gx3 8x2 bgcz,reprosum,run10day,gridc +smoke gx3 8x2 bgczm,reprosum,run10day,gridc smoke gx1 15x2 reprosum,run10day,gridc smoke gx1 15x2 seabedprob,reprosum,run10day,gridc smoke gx3 14x2 fsd12,reprosum,run10day,gridc @@ -88,7 +88,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_7x2_alt07_gridc_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgczm_gridc_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day @@ -116,7 +116,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridcd smoke gx3 4x4 alt05,reprosum,run10day,gridcd smoke gx3 8x2 alt06,reprosum,run10day,gridcd smoke gx3 7x2 alt07,reprosum,run10day,gridcd -smoke gx3 8x2 bgcz,reprosum,run10day,gridcd +smoke gx3 8x2 bgczm,reprosum,run10day,gridcd smoke gx1 15x2 reprosum,run10day,gridcd smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd smoke gx3 14x2 fsd12,reprosum,run10day,gridcd @@ -142,7 +142,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_7x2_alt07_gridcd_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgczm_gridcd_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day diff --git a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 index 30c952510..3717f7d86 100644 --- a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 +++ b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 @@ -39,12 +39,8 @@ program convert_restarts logical (kind=log_kind), parameter :: & oceanmixed_ice = .true., & ! if true, read/write ocean mixed layer fields heat_capacity = .true., & ! if true, ice has nonzero heat capacity -#ifdef UNDEPRECATE_0LAYER - ! if false, use zero-layer thermodynamics -#else ! heat_capacity = .false. (zero-layer thermodynamics) ! has been deprecated in CICE and Icepack -#endif diag = .true. ! write min/max diagnostics for fields ! file names diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 99679e791..a8b9d08f1 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -9,7 +9,7 @@ This index defines many of the symbols used frequently in the CICE model code. Namelist variables are partly included here but also documented elsewhere, see Section :ref:`tabnamelist`. All quantities in the code are expressed in MKS units (temperatures may take -either Celsius or Kelvin units). +either Celsius or Kelvin units). Deprecated parameters are listed at the end. .. csv-table:: *Alphabetical Index* :header: " ", " ", " " @@ -72,7 +72,7 @@ either Celsius or Kelvin units). "awtvdf", "weighting factor for visible, diffuse albedo", "0.63282" "awtvdr", "weighting factor for visible, direct albedo", "0.00318" "**B**", "", "" - "bfb_flag", "for bit-for-bit reproducible diagnostics", "" + "bfbflag", "for bit-for-bit reproducible diagnostics, and reproducible outputs when using the VP solver", "" "bgc_data_dir", "data directory for bgc", "" "bgc_data_type", "source of silicate, nitrate data", "" "bgc_flux_type", "ice–ocean flux velocity", "" @@ -330,7 +330,7 @@ either Celsius or Kelvin units). "hpmin", "minimum melt pond depth (shortwave)", "0.005 m" "hpondn", "melt pond depth", "m" "hs_min", "minimum thickness for which :math:`T_s` is computed", "1.\ :math:`\times`\ 10\ :math:`^{-4}` m" - "hs0", "snow depth at which transition to ice occurs (dEdd)", "0.03 m" + "hs0", "snow depth at which transition to ice occurs (dEdd)", "m" "hs1", "snow depth of transition to pond ice", "0.03 m" "hs_ssl", "snow surface scattering layer thickness", "0.04 m" "Hstar", "determines mean thickness of ridged ice", "25. m" @@ -354,10 +354,10 @@ either Celsius or Kelvin units). "icells", "number of grid cells with specified property (for vectorization)", "" "iceruf", "ice surface roughness at atmosphere interface", "5.\ :math:`\times`\ 10\ :math:`^{-4}` m" "iceruf_ocn", "under-ice roughness (at ocean interface)", "0.03 m" - "iceemask", "ice extent mask (E-cell)", "" - "icenmask", "ice extent mask (N-cell)", "" - "icetmask", "ice extent mask (T-cell)", "" - "iceumask", "ice extent mask (U-cell)", "" + "iceEmask", "dynamics ice extent mask (E-cell)", "" + "iceNmask", "dynamics ice extent mask (N-cell)", "" + "iceTmask", "dynamics ice extent mask (T-cell)", "" + "iceUmask", "dynamics ice extent mask (U-cell)", "" "idate", "the date at the end of the current time step (yyyymmdd)", "" "idate0", "initial date", "" "ierr", "general-use error flag", "" @@ -699,7 +699,6 @@ either Celsius or Kelvin units). "tr_FY", "if true, use first-year area tracer", "" "tr_iage", "if true, use ice age tracer", "" "tr_lvl", "if true, use level ice area and volume tracers", "" - "tr_pond_cesm", "if true, use CESM melt pond scheme", "" "tr_pond_lvl", "if true, use level-ice melt pond scheme", "" "tr_pond_topo", "if true, use topo melt pond scheme", "" "trcr", "ice tracers", "" @@ -772,9 +771,11 @@ either Celsius or Kelvin units). "zref", "reference height for stability", "10. m" "zTrf", "reference height for :math:`T_{ref}`, :math:`Q_{ref}`, :math:`U_{ref}`", "2. m" "zvir", "gas constant (water vapor)/gas constant (air) - 1", "0.606" - -.. - ktherm=0 has been deprecated + "**Deprecated options and parameters**", "", "" "heat_capacity", "if true, use salinity-dependent thermodynamics", "T" "kseaice", "thermal conductivity of ice for zero-layer thermodynamics", "2.0 W/m/deg" "ktherm", "thermodynamic formulation (0 = zero-layer, 1 = :cite:`Bitz99`, 2 = mushy)", "" + "tr_pond_cesm", "if true, use CESM melt pond scheme", "" + +.. + new deprecation comments diff --git a/doc/source/developer_guide/dg_other.rst b/doc/source/developer_guide/dg_other.rst index 8d4f9716e..308c2629c 100644 --- a/doc/source/developer_guide/dg_other.rst +++ b/doc/source/developer_guide/dg_other.rst @@ -30,11 +30,11 @@ To run with an interactive debugger, the following general steps should be taken Reproducible Sums ---------------------- -Reproducible sums in the CICE diagnostics are set with the namelist `bfbflag`. -CICE prognostics results do NOT depend on the global sum implementation. The +Reproducible sums in CICE are set with the namelist `bfbflag`. +CICE prognostics results do NOT depend on the global sum implementation when using the default dynamics solver (EVP) or the EAP solver. With these solvers, the results are bit-for-bit identical with any `bfbflag`. The `bfbflag` only impacts the results and performance of the global diagnostics written to the CICE -log file. For best performance, the off (or lsum8 which is equivalent) setting is recommended. +log file (for all dynamics solvers), as well as the model results when using the VP solver. For best performance, the off setting is recommended. This will probably not produce bit-for-bit results with different decompositions. For bit-for-bit results, the reprosum setting is recommended. This should be only slightly slower than the lsum8 implementation. @@ -50,7 +50,7 @@ The `bfbflag` namelist is a character string with several valid settings. The tradeoff in these settings is the likelihood for bit-for-bit results versus their cost. The `bfbflag` settings are implemented as follows, -off is the default and equivalent to lsum8. +off is the default and mostly equivalent to lsum8 (some computations in the VP solver use a different code path when lsum8 is chosen). lsum4 is a local sum computed with single precision (4 byte) data and a scalar mpi allreduce. This is extremely unlikely to be bit-for-bit for different decompositions. diff --git a/doc/source/intro/quickstart.rst b/doc/source/intro/quickstart.rst index 56d19ee70..b68fc32d7 100644 --- a/doc/source/intro/quickstart.rst +++ b/doc/source/intro/quickstart.rst @@ -6,13 +6,14 @@ Quick Start =========== -Download the model from the CICE-Consortium repository, - https://github.com/CICE-Consortium/CICE +Clone the model from the CICE-Consortium repository:: -Instructions for working in github with CICE (and Icepack) can be -found in the `CICE Git and Workflow Guide `_. + git clone --recurse-submodules https://github.com/CICE-Consortium/CICE -You will probably have to download some inputdata, see the `CICE wiki `_ or :ref:`force`. +Instructions for working with Git and GitHub with CICE (and Icepack) can be +found in the `CICE Git Workflow Guide `_. + +You will probably have to download some input data, see the `CICE wiki `_ or :ref:`force`. Software requirements are noted in this :ref:`software` section. diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 241fa05fe..e6b918538 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -92,8 +92,7 @@ dynamics into CICE is described in detail in The VP solver implementation mostly follows :cite:`Lemieux08`, with FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. -Note that the VP solver has not yet been tested on the ``tx1`` grid or with -threading enabled. +Note that the VP solver has not yet been tested on the ``tx1`` grid. The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. diff --git a/doc/source/science_guide/sg_fundvars.rst b/doc/source/science_guide/sg_fundvars.rst index e80b68f4a..5b5703266 100644 --- a/doc/source/science_guide/sg_fundvars.rst +++ b/doc/source/science_guide/sg_fundvars.rst @@ -14,7 +14,7 @@ modeling is to describe the evolution of the ice thickness distribution In addition to an ice thickness distribution, CICE includes an optional capability for a floe size distribution. -Ice floe horizontal size may change through vertical and lateral growth and melting of existing floes, freezing of new ice, wave breaking, and welding of floes in freezing conditions. The floe size distribution (FSD) is a probability function that characterizes this variability. The scheme is based on the theoretical framework described in :cite:`Horvat15` for a joint floe size and thickness distribution (FSTD), and was implemented by :cite:`Roach18` and :cite:`Roach19`. The joint floe size distribution is carried as an area-weighted tracer, defined as the fraction of ice belonging to a given thickness category with lateral floe size belong to a given floe size class. This development includes interactions between sea ice and ocean surface waves. Input data on ocean surface wave spectra at a single time is provided for testing, but as with the other CICE datasets, it should not be used for production runs or publications. +Ice floe horizontal size may change through vertical and lateral growth and melting of existing floes, freezing of new ice, wave breaking, and welding of floes in freezing conditions. The floe size distribution (FSD) is a probability function that characterizes this variability. The scheme is based on the theoretical framework described in :cite:`Horvat15` for a joint floe size and thickness distribution (FSTD), and was implemented by :cite:`Roach18` and :cite:`Roach19`. The joint floe size distribution is carried as an area-weighted tracer, defined as the fraction of ice belonging to a given thickness category with lateral floe size belong to a given floe size class. This development includes interactions between sea ice and ocean surface waves. Input data on ocean surface wave spectra at a single time is provided for testing, but as with the other CICE datasets, it should not be used for production runs or publications. It is not recommended to use the FSD without ocean surface waves. Additional information about the ITD and joint FSTD for CICE can be found in the `Icepack documentation `_. diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 64264613c..a34c69822 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -247,7 +247,9 @@ grid_nml "", "``pop``", "pop thickness file in cm in ascii format", "" "``close_boundaries``", "logical", "force two gridcell wide land mask on boundaries for rectangular grids", "``.false.``" "``dxrect``", "real", "x-direction grid spacing for rectangular grid in cm", "0.0" + "``dxscale``", "real", "user defined rectgrid x-grid scale factor", "1.0" "``dyrect``", "real", "y-direction grid spacing for rectangular grid in cm", "0.0" + "``dyscale``", "real", "user defined rectgrid y-grid scale factor", "1.0" "``gridcpl_file``", "string", "input file for coupling grid info", "'unknown_gridcpl_file'" "``grid_atm``", "``A``", "atm forcing/coupling grid, all fields on T grid", "``A``" "", "``B``", "atm forcing/coupling grid, thermo fields on T grid, dyn fields on U grid", "" @@ -277,12 +279,15 @@ grid_nml "", "default", "ocean/land mask set internally, land in upper left and lower right of domain, ", "" "", "file", "ocean/land mask setup read from file, see kmt_file", "" "", "wall", "ocean/land mask set at right edge of domain", "" + "``latrefrect``","real","lower left corner lat for rectgrid in deg", "71.35" + "``lonrefrect``","real","lower left corner lon for rectgrid in deg", "-156.5" "``nblyr``", "integer", "number of zbgc layers", "0" "``ncat``", "integer", "number of ice thickness categories", "0" "``nfsd``", "integer", "number of floe size categories", "1" "``nilyr``", "integer", "number of vertical layers in ice", "0" "``nslyr``", "integer", "number of vertical layers in snow", "0" "``orca_halogrid``", "logical", "use orca haloed grid for data/grid read", "``.false.``" + "``scale_dxdy``", "logical", "apply dxscale, dyscale to rectgrid", "``false``" "``use_bathymetry``", "logical", "use read in bathymetry file for seabedstress option", "``.false.``" "", "", "", "" @@ -398,9 +403,6 @@ thermo_nml "``Rac_rapid_mode``", "real", "critical Rayleigh number", "10.0" "", "", "", "" -.. - ktherm=0 has been deprecated - "", "``0``", "zero-layer thermodynamic model", "" .. _dynamics_nml: @@ -464,7 +466,7 @@ dynamics_nml "``k1``", "real", "1st free parameter for landfast parameterization", "7.5" "``k2``", "real", "2nd free parameter (N/m\ :math:`^3`) for landfast parameterization", "15.0" "``maxits_fgmres``", "integer", "maximum number of restarts for FGMRES solver", "1" - "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "1000" + "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "10" "``maxits_pgmres``", "integer", "maximum number of restarts for PGMRES preconditioner", "1" "``monitor_fgmres``", "logical", "write velocity norm at each FGMRES iteration", "``.false.``" "``monitor_nonlin``", "logical", "write velocity norm at each nonlinear iteration", "``.false.``" @@ -477,7 +479,7 @@ dynamics_nml "", "``ident``", "Don't use a preconditioner for the FGMRES solver", "" "", "``pgmres``", "Use GMRES as preconditioner for FGMRES solver", "" "``Pstar``", "real", "constant in Hibler strength formula (N/m\ :math:`^2`)", "2.75e4" - "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-2" + "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-1" "``reltol_nonlin``", "real", "relative tolerance for nonlinear solver", "1e-8" "``reltol_pgmres``", "real", "relative tolerance for PGMRES preconditioner", "1e-6" "``revised_evp``", "logical", "use revised EVP formulation", "``.false.``" @@ -533,7 +535,7 @@ ponds_nml "``frzpnd``", "``cesm``", "CESM pond refreezing forumulation", "``cesm``" "", "``hlid``", "Stefan refreezing with pond ice thickness", "" "``hp1``", "real", "critical ice lid thickness for topo ponds in m", "0.01" - "``hs0``", "real", "snow depth of transition to bare sea ice in m", "0.03" + "``hs0``", "real", "snow depth of transition to bare sea ice in m", "" "``hs1``", "real", "snow depth of transition to pond ice in m", "0.03" "``pndaspect``", "real", "aspect ratio of pond changes (depth:area)", "0.8" "``rfracmax``", ":math:`0 \le r_{max} \le 1`", "maximum melt water added to ponds", "0.85" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index a3f7d11bc..a7cc66948 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -312,6 +312,34 @@ Special treatment is also required in the scattering routine, and when computing global sums one of each pair of coincident points has to be excluded. +***************** +Rectangular grids +***************** + +Rectangular test grids can be defined for CICE. They are generated +internally and defined by several namelist +settings including ``grid_type`` = ``rectangular``, ``nx_global``, ``ny_global``, +``dx_rect``, ``dy_rect``, ``lonrefrect``, and ``latrefrect``. Forcing and +initial condition can be set via namelists ``atm_data_type``, ``ocn_data_type``, +``ice_data_type``, ``ice_data_conc``, ``ice_data_dist``. Variable grid spacing +is also supported with the namelist settings ``scale_dxdy`` which turns on +the option, and ``dxscale`` and ``dyscale`` which sets the variable grid scaling +factor. Values of 1.0 will produced constant grid spacing. For rectangular grids, +``lonrefrect`` and ``latrefrect`` define the lower left longitude and latitude +value of the grid, ``dx_rect`` and ``dy_rect`` define the base grid spacing, and +``dxscale`` and ``dyscale`` provide the grid space scaling. The base spacing +is set in the center of the rectangular domain and the scaling is applied symetrically +outward as a multiplicative factor in the x and y directions. + +Several predefined rectangular grids are available in CICE with +**cice.setup --grid** including ``gbox12``, ``gbox80``, ``gbox128``, and ``gbox180`` +where 12, 80, 128, and 180 are the number of gridcells in each direction. +Several predefined options also exist, set with **cice.setup --set**, to +establish varied idealized configurations of box tests including ``box2001``, +``boxadv``, ``boxchan``, ``boxnodyn``, ``boxrestore``, ``boxslotcyl``, and +``boxopen``, ``boxclosed``, and ``boxforcee``. See **cice.setup --help** for a current +list of supported settings. + ************** Vertical Grids ************** @@ -398,16 +426,16 @@ respectively) are useful in conditional statements. In addition to the land masks, two other masks are implemented in *dyn\_prep* in order to reduce the dynamics component’s work on a global -grid. At each time step the logical masks ``icetmask`` and ``iceumask`` are +grid. At each time step the logical masks ``iceTmask`` and ``iceUmask`` are determined from the current ice extent, such that they have the value “true” wherever ice exists. They also include a border of cells around the ice pack for numerical purposes. These masks are used in the dynamics component to prevent unnecessary calculations on grid points where there is no ice. They are not used in the thermodynamics component, so that ice may form in previously ice-free cells. Like the -land masks ``hm`` and ``uvm``, the ice extent masks ``icetmask`` and ``iceumask`` +land masks ``hm`` and ``uvm``, the ice extent masks ``iceTmask`` and ``iceUmask`` are for T-cells and U-cells, respectively. Note that the ice extent masks -``iceemask`` and ``icenmask`` are also defined when using the C or CD grid. +``iceEmask`` and ``iceNmask`` are also defined when using the C or CD grid. Improved parallel performance may result from utilizing halo masks for boundary updates of the full ice state, incremental remapping transport, diff --git a/icepack b/icepack index 3a039e598..18fc1c9b7 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 3a039e598e6395333a278bb1822f03e9bc954ac6 +Subproject commit 18fc1c9b79d81604eafdb1fac4ddd039b78ad390