generate_new_bdy_geom.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. from netCDF4 import Dataset
  2. import sys
  3. import time
  4. import numpy as np
  5. # Script for generating a NEMO BDY geometry file from a full 2D coordinates file.
  6. # Author: C. Pelletier
  7. # Note: this script just takes a NEMO grid file and "peels it off" (like a potato)
  8. # to get the input BDY file for a geometry.
  9. # All nodes are included, regardless of their mask status.
  10. # I think it's better to include them all, so that if the mask evolves (e.g. by changing
  11. # topography, or changing the minimum bathymetry), the BDY data and geometry will remain
  12. # relevant. The only drawback is that there are superfluous data (corresponding to masked cells),
  13. # but in practise it only means that data files are slightly larger. Since BDY stored are stored as
  14. # small 1D array, I do not consider this drawback a problem.
  15. data_folder="/storepelican/pelletie/nemo_bdy/grids"
  16. coord_file = "coord_paraso.nc"
  17. # must be the full domain and contain glamt, gphit, glamu, gphiu, glamv, gphiv
  18. # taking the "mesh mask" or the "coordinates" file from a run is OK
  19. is_size_bdy = [False, True, False, False]
  20. # boolean array: are the 4 sides of the domain an open bdy? Order: x=1, y=ny, x=nx, y=1 (in FORTRAN indexing convention; W/N/E/S if grid is orientated "normally", i.e. North upwards)
  21. nbr_out = 1
  22. # "rim width" of the BDY. NEMO documentation says it should be 8-9,
  23. # but at UCL et al. in 2018 - 2021, I've only seen 1 used (Huot, Van Achter, Pelletier).
  24. # N. Jourdain (IGE, Grenoble) also uses 1 so...
  25. # I've tried setting 8 over my config (circumpolar 0.25°), it ran a bit but was numerically unstable.
  26. # I am pretty confident about the "geometric" implementation of nbr_out > 1, though...
  27. out_file = "eORCA025_SO_BDY-COORDS_rim%d"%nbr_out+".nc"
  28. extra_dbg = False
  29. # output extra dbg vars in out_file
  30. ### END INPUT
  31. coord_nc = Dataset(data_folder+'/'+coord_file, mode='r')
  32. grids = ['t', 'u', 'v']
  33. lons = {}
  34. lats = {}
  35. for grid in grids:
  36. lons[grid] = coord_nc.variables['glam'+grid][:]
  37. lats[grid] = coord_nc.variables['gphi'+grid][:]
  38. coord_nc.close()
  39. ny, nx = np.shape(lons['t'])
  40. idx_ifort = np.arange(nx) + 1
  41. idx_jfort = np.arange(ny) + 1
  42. idx_ifort_2d = idx_ifort[None,:] * np.ones(shape=[ny], dtype=int)[:,None]
  43. idx_jfort_2d = idx_jfort[:,None] * np.ones(shape=[nx], dtype=int)[None,:]
  44. idx_ifort_flat = np.ndarray.flatten(idx_ifort_2d)
  45. idx_jfort_flat = np.ndarray.flatten(idx_jfort_2d)
  46. nbrt_3d_bdy = np.ma.array( data = np.ndarray(shape=[ny,nx,4], dtype=int), mask = False)
  47. if(is_size_bdy[0]):
  48. nbrt_3d_bdy[:,1:,0] = (idx_ifort[1:])[None,:] * (np.ones(shape=[ny], dtype=int))[:,None] - 1
  49. else:
  50. nbrt_3d_bdy.mask[:,:,0] = True
  51. if(is_size_bdy[1]):
  52. nbrt_3d_bdy[:-1,:,1] = ny - (idx_jfort[:-1])[:,None] * (np.ones(shape=[nx], dtype=int))[None,:]
  53. else:
  54. nbrt_3d_bdy.mask[:,:,1] = True
  55. if(is_size_bdy[2]):
  56. nbrt_3d_bdy[:,:-1,2] = nx - (idx_ifort[:-1])[None,:] * (np.ones(shape=[ny], dtype=int))[:,None]
  57. else:
  58. nbrt_3d_bdy.mask[:,:,2] = True
  59. if(is_size_bdy[3]):
  60. nbrt_3d_bdy[1:,:,3] = (idx_jfort[1:])[:,None] * (np.ones(shape=[nx], dtype=int))[None,:] - 1
  61. else:
  62. nbrt_3d_bdy.mask[:,:,3] = True
  63. which_bdy = np.ma.array(data = np.argmin(nbrt_3d_bdy, axis=-1), \
  64. mask = np.all(nbrt_3d_bdy.mask, axis=-1))
  65. nbrt_3d_bdy.mask[0,:,:] = True
  66. nbrt_3d_bdy.mask[-1,:,:] = True
  67. nbrt_3d_bdy.mask[:,0,:] = True
  68. nbrt_3d_bdy.mask[:,-1,:] = True
  69. nbr_flats = {}
  70. nbrs = {}
  71. # prendre j*nx*4 + i*4 + which_bdy[grid]
  72. idx_take = np.repeat(np.arange(ny), nx) * nx * 4 + np.tile(np.arange(nx), ny) * 4 + np.ndarray.flatten(which_bdy)
  73. nbr_flats['t'] = (np.ndarray.flatten(nbrt_3d_bdy))[idx_take]
  74. nbrs['t'] = np.ma.array( data = np.reshape( nbr_flats['t'], (ny,nx)), \
  75. mask = np.all(nbrt_3d_bdy.mask))
  76. nbrs['u'] = np.ma.array(data = np.ndarray(shape=[ny, nx], dtype=int), \
  77. mask = False)
  78. nbrs['u'][:,-1].mask = True
  79. nbrs['u'][:,:-1] = np.minimum(nbrs['t'][:,:-1], nbrs['t'][:,1:])
  80. nbrs['u'].mask[:,:-1] = np.logical_or(nbrs['t'].mask[:,:-1], nbrs['t'].mask[:,1:])
  81. nbrs['v'] = np.ma.array(data = np.ndarray(shape=[ny, nx], dtype=int), \
  82. mask = False)
  83. nbrs['v'][-1,:].mask = True
  84. nbrs['v'][:-1,:] = np.minimum(nbrs['t'][:-1,:], nbrs['t'][1:,:])
  85. nbrs['v'].mask[:-1,:] = np.logical_or(nbrs['t'].mask[:-1,:], nbrs['t'].mask[1:,:])
  86. out_nc = Dataset(data_folder+'/'+out_file, mode='w')
  87. if(extra_dbg):
  88. out_nc.createDimension('y', ny)
  89. out_nc.createDimension('x', nx)
  90. out_nc.createDimension('bdy', 4)
  91. tmp = out_nc.createVariable('which_'+'t', datatype='i2', dimensions=['y', 'x'], fill_value = -10)
  92. tmp[:] = which_bdy
  93. nbr_3d = out_nc.createVariable("nbr"+'t'+"_3d", datatype='i4', dimensions=['y', 'x', 'bdy'], fill_value = -10)
  94. nbr_3d[:] = nbrt_3d_bdy
  95. for grid in grids:
  96. tmp = out_nc.createVariable('nbr'+grid+'_2d', datatype='i4', dimensions=['y', 'x'], fill_value = -10)
  97. tmp[:] = nbrs[grid]
  98. n_xbs = {}
  99. out_nc.createDimension('yb', 1)
  100. for grid in grids:
  101. cnt = 0
  102. iidx_tmp = []
  103. jidx_tmp = []
  104. lat_tmp = []
  105. lon_tmp = []
  106. nbr_tmp = []
  107. curr_nbrflat = np.ndarray.flatten(nbrs[grid])
  108. for l in range(1,nbr_out+1):
  109. print(l)
  110. idx_take = np.nonzero(curr_nbrflat == l)[0].astype(int)
  111. idx_ifort_take = (np.ndarray.flatten(idx_ifort_2d))[idx_take]
  112. idx_jfort_take = (np.ndarray.flatten(idx_jfort_2d))[idx_take]
  113. sort_order = []
  114. n_take = np.size(idx_take)
  115. already_in = np.zeros(shape=[n_take], dtype=bool)
  116. idx_sorted = []
  117. check = np.logical_and(np.logical_not(already_in), \
  118. idx_ifort_take == np.amin(idx_ifort_take))
  119. if(np.any(check)):
  120. where_line = (np.nonzero(check))[0]
  121. tmp_idx = np.argsort(idx_jfort_take[where_line])
  122. idx_sorted = np.concatenate(( idx_sorted, (idx_take[where_line])[tmp_idx] ))
  123. already_in[where_line] = True
  124. check = np.logical_and(np.logical_not(already_in), \
  125. idx_jfort_take == np.amax(idx_jfort_take))
  126. if(np.any(check)):
  127. where_line = (np.nonzero(check))[0]
  128. tmp_idx = np.argsort(idx_ifort_take[where_line])
  129. idx_sorted = np.concatenate(( idx_sorted, (idx_take[where_line])[tmp_idx] ))
  130. already_in[where_line] = True
  131. check = np.logical_and(np.logical_not(already_in), \
  132. idx_ifort_take == np.amax(idx_ifort_take))
  133. if(np.any(check)):
  134. where_line = (np.nonzero(check))[0]
  135. tmp_idx = (np.argsort(idx_jfort_take[where_line]))[::-1]
  136. idx_sorted = np.concatenate(( idx_sorted, (idx_take[where_line])[tmp_idx] ))
  137. already_in[where_line] = True
  138. check = np.logical_and(np.logical_not(already_in), \
  139. idx_jfort_take == np.amin(idx_jfort_take))
  140. if(np.any(check)):
  141. where_line = (np.nonzero(check))[0]
  142. tmp_idx = (np.argsort(idx_ifort_take[where_line]))[::-1]
  143. idx_sorted = np.concatenate(( idx_sorted, (idx_take[where_line])[tmp_idx] ))
  144. already_in[where_line] = True
  145. check = np.logical_not(already_in)
  146. if(np.count_nonzero(check) > 0):
  147. print('error in reordering')
  148. sys.exit()
  149. idx_sorted = idx_sorted.astype(int)
  150. iidx_tmp = np.concatenate(( iidx_tmp, idx_ifort_flat[idx_sorted] ))
  151. jidx_tmp = np.concatenate(( jidx_tmp, idx_jfort_flat[idx_sorted] ))
  152. lon_tmp = np.concatenate(( lon_tmp, (np.ndarray.flatten(lons[grid]))[idx_sorted]))
  153. lat_tmp = np.concatenate(( lat_tmp, (np.ndarray.flatten(lats[grid]))[idx_sorted]))
  154. curr_size = np.size(idx_take)
  155. nbr_tmp = np.concatenate(( nbr_tmp, np.repeat(l, curr_size) ))
  156. cnt+=curr_size
  157. out_nc.createDimension('xb'+grid.upper(), cnt)
  158. tmp = out_nc.createVariable(varname='nbi'+grid, datatype='i4', dimensions=['yb', 'xb'+grid.upper()])
  159. tmp[:] = iidx_tmp
  160. tmp = out_nc.createVariable(varname='nbj'+grid, datatype='i4', dimensions=['yb', 'xb'+grid.upper()])
  161. tmp[:] = jidx_tmp
  162. tmp = out_nc.createVariable(varname='nbr'+grid, datatype='i4', dimensions=['yb', 'xb'+grid.upper()])
  163. tmp[:] = nbr_tmp
  164. tmp = out_nc.createVariable(varname='glam'+grid, datatype='f8', dimensions=['yb', 'xb'+grid.upper()])
  165. tmp[:] = lon_tmp
  166. tmp = out_nc.createVariable(varname='gphi'+grid, datatype='f8', dimensions=['yb', 'xb'+grid.upper()])
  167. tmp[:] = lat_tmp
  168. out_nc.close()