François Massonnet 1 year ago
commit
685af59a93
100 changed files with 22079 additions and 0 deletions
  1. 6 0
      diagnostics/README
  2. 184 0
      diagnostics/plot_maps.R
  3. 212 0
      diagnostics/plot_timeseries.R
  4. 1982 0
      eigentechniques/CFU_Rfunc.txt
  5. 71 0
      eigentechniques/unit_testing/test_EOF.R
  6. 31 0
      interpolation/README
  7. 94 0
      interpolation/extrap.py
  8. 99 0
      interpolation/extrap2d.py
  9. 119 0
      interpolation/interp
  10. 192 0
      interpolation/interp_vert.py
  11. 17 0
      interpolation/rotateUVorca_sources/README
  12. 189 0
      interpolation/rotateUVorca_sources/dom_oce.f90
  13. 287 0
      interpolation/rotateUVorca_sources/geo2ocean.f90
  14. 22 0
      interpolation/rotateUVorca_sources/handle_err.f90
  15. 137 0
      interpolation/rotateUVorca_sources/lbclnk.f90
  16. 189 0
      interpolation/rotateUVorca_sources/lbcnfd.f90
  17. 61 0
      interpolation/rotateUVorca_sources/makefile
  18. 49 0
      interpolation/rotateUVorca_sources/par_kind.f90
  19. 95 0
      interpolation/rotateUVorca_sources/phycst.f90
  20. 249 0
      interpolation/rotateUVorca_sources/rotateUVorca.f90
  21. 65 0
      interpolation/scrip_sources/constants.f
  22. 26 0
      interpolation/scrip_sources/copyright
  23. 831 0
      interpolation/scrip_sources/grids.f
  24. 797 0
      interpolation/scrip_sources/grids_one.f
  25. 154 0
      interpolation/scrip_sources/iounits.f
  26. 53 0
      interpolation/scrip_sources/kinds_mod.f
  27. 146 0
      interpolation/scrip_sources/makefile
  28. 79 0
      interpolation/scrip_sources/netcdf.f
  29. 276 0
      interpolation/scrip_sources/read_input_file.f
  30. 272 0
      interpolation/scrip_sources/read_input_file.save.fr
  31. 163 0
      interpolation/scrip_sources/remap.f
  32. 844 0
      interpolation/scrip_sources/remap_bicubic.f
  33. 781 0
      interpolation/scrip_sources/remap_bilinear.f
  34. 2197 0
      interpolation/scrip_sources/remap_conserv.f
  35. 499 0
      interpolation/scrip_sources/remap_distwgt.f
  36. 1027 0
      interpolation/scrip_sources/remap_read.f
  37. 302 0
      interpolation/scrip_sources/remap_vars.f
  38. 1763 0
      interpolation/scrip_sources/remap_write.f
  39. 214 0
      interpolation/scrip_sources/scrip.f
  40. 981 0
      interpolation/scrip_sources/scrip_test.f
  41. 612 0
      interpolation/scrip_sources/scrip_use.f
  42. 682 0
      interpolation/scrip_sources/scrip_use_extrap.f
  43. 343 0
      interpolation/scrip_sources/timers.f
  44. 69 0
      interpolation/vertextrap.py
  45. 20 0
      prep_nem_forcings/DFS5.2/README
  46. 188 0
      prep_nem_forcings/DFS5.2/mkpert.R
  47. 66 0
      prep_nem_forcings/DFS5.2/plot.py
  48. 165 0
      prep_nem_forcings/DFS5.2/postprocess.bash
  49. 151 0
      prep_nem_forcings/DFS5.2/preprocess.bash
  50. 7 0
      prep_nem_forcings/DFS5.2/script.bash
  51. 60 0
      prep_nem_forcings/eraint/orca1/README
  52. 55 0
      prep_nem_forcings/eraint/orca1/addpert_eraint.sh
  53. 51 0
      prep_nem_forcings/eraint/orca1/comp_q2.sh
  54. 40 0
      prep_nem_forcings/eraint/orca1/download_u10v10.py
  55. 61 0
      prep_nem_forcings/eraint/orca1/interp_eraint.sh
  56. 33 0
      prep_nem_forcings/eraint/orca1/prep_eraint_prlr_prsn_forcings.sh
  57. 33 0
      prep_nem_forcings/eraint/orca1/prep_eraint_qsw_qlw_forcings.sh
  58. 26 0
      prep_nem_forcings/eraint/orca1/prep_eraint_t2_q2_forcings.sh
  59. 63 0
      prep_nem_forcings/eraint/orca1/prep_eraint_u10_v10_forcings.sh
  60. 58 0
      prep_nem_forcings/eraint/orca1/rotate_perturb.sh
  61. 329 0
      prep_nem_forcings/eraint/orca1/wndpert.sh
  62. 20 0
      prep_restarts/README
  63. 51 0
      prep_restarts/build_Tofill.bash
  64. 27 0
      prep_restarts/comp_rhop/check_stat.f90
  65. 143 0
      prep_restarts/comp_rhop/comp_rhop.f90
  66. 185 0
      prep_restarts/comp_rhop/eosbn2.f90
  67. 25 0
      prep_restarts/comp_rhop/makefile
  68. 113 0
      prep_restarts/dispatch.bash
  69. 49 0
      prep_restarts/example/loop_outputs_oras4.sh
  70. 56 0
      prep_restarts/example/loop_restarts_oras4.sh
  71. 78 0
      prep_restarts/gener_perturbation.bash
  72. 76 0
      prep_restarts/library/library.bash
  73. 39 0
      prep_restarts/script_clim_restart.sh
  74. 72 0
      prep_restarts/script_interp_vert_ocean_outputs.bash
  75. 135 0
      prep_restarts/script_interp_vert_ocean_restart.bash
  76. 120 0
      regression/PoissonReg.R
  77. 174 0
      regression/PoissonRegwTrend.R
  78. 19 0
      regression/README
  79. 128 0
      regression/filteroutreg.R
  80. 123 0
      regression/multipleregress.R
  81. 128 0
      regression/regressedts.R
  82. 82 0
      transfer/chkexpjlt.sh
  83. 63 0
      transfer/chkexpout.sh
  84. 161 0
      transfer/data_transfer.sh
  85. 32 0
      transfer/download_atm_nudging.sh
  86. 32 0
      transfer/download_atm_perturb.sh
  87. 265 0
      transfer/download_hsm
  88. 33 0
      transfer/download_ic.sh
  89. 66 0
      transfer/exp_job_info.sh
  90. 148 0
      transfer/migrate_exp.sh
  91. 59 0
      transfer/plot_job_info.py
  92. 98 0
      transfer/prepare_atm_ic.cmd
  93. 53 0
      transfer/prepare_atm_ic_i05e.sh
  94. 55 0
      transfer/prepare_atm_ic_i05f.sh
  95. 54 0
      transfer/prepare_atm_ic_i05i.sh
  96. 53 0
      transfer/prepare_atm_ic_i05j.sh
  97. 62 0
      transfer/prepare_atm_nudging.cmd
  98. 51 0
      transfer/prepare_atm_perturb.cmd
  99. 39 0
      transfer/prepare_oce_ic.sh
  100. 45 0
      transfer/update_ic.sh

+ 6 - 0
diagnostics/README

@@ -0,0 +1,6 @@
+In this directory, you can find two example of scripts that 
+compute various prediction scores and plot them based on the 
+s2dverification package.
+
+More information can be found here:
+http://ic3.cat/wikicfu/index.php/Tools/s2dverification

+ 184 - 0
diagnostics/plot_maps.R

@@ -0,0 +1,184 @@
+#!/usr/bin/env Rscript
+
+library(s2dverification)
+args <- commandArgs(TRUE)
+
+comptrend <- T    # Trend as a function of the start date for each leadtime
+compcor <- T      # Correlation Coefficient
+comprms <- T      # Root Mean Square Error
+comprmsss <- T    # Root Mean Square Skill Score
+compratrms <- T   # Ratio RMSE expid1 / expid2
+
+var <- args[1]    # tos/tas/prlr
+season <- args[2] # Year/DJF/MAM/JJA/SON
+ly <- args[3]     # ly1/ly2-5/ly6-9 for Forecast year 1 / years 2 to 5 / years 
+                  # 6 to 9
+nltimemax <- 124  # number of leadtimes max in the experiments (in months)
+lstexpid <- c('i00k','b02p') # list of ids
+mon0 <- 11        # initial month
+year0 <- 1960     # first start date
+yearf <- 2005     # last start date
+intsdate <- 5     # interval between start dates
+
+obs <- switch(var, 'tas' = 'GHCNERSSTGISS', 'tos' = 'ERSST', 'prlr' = 'GPCC')
+syears <- seq(year0, yearf, intsdate)
+imon2 <- paste("0", as.character(mon0), sep = "")
+sdates <- paste(as.character(syears), substr(imon2, nchar(imon2) - 1, 
+                nchar(imon2)), '01', sep = "")
+
+savename <- paste(var, '_', season, '_', ly, sep = '')
+for (expid in lstexpid ) {
+  savename <- paste(savename, '_', expid, sep = '')
+}
+savename <- paste(savename, '.sav', sep = '')
+
+if (file.exists(savename)) {
+  load(savename)
+} else {
+  if (is.na(match('b02p', lstexpid)) == TRUE) {
+    lstload <- lstexpid
+  } else {
+    lstload <- lstexpid[-match('b02p', lstexpid)]
+  }
+  toto <- Load(var, lstload, obs,sdates, nleadtime = nltimemax,
+               leadtimemin = switch(ly, 'ly1' = 1, 'ly2-5' = 13, 'ly6-9' = 61),
+               leadtimemax = switch(ly, 'ly1' = switch(season, 'SON' = 13, 12),
+               'ly2-5' = switch(season, 'SON' = 61, 60), 
+               'ly6-9' = switch(season, 'SON' = 109, 108)), output = 'lonlat')
+  if (is.na(match('b02p', lstexpid)) == FALSE) {
+    toto1bis <- Load(var, 'b02p', obs = NULL, '19501101', output = 'lonlat')
+    toto1ter <- Histo2Hindcast(toto1bis$mod, '19501101', paste(as.character(
+                syears + switch(ly, 'ly1' = 0, 'ly2-5' = 1, 'ly6-9' = 5)),
+                substr(imon2, nchar(imon2) - 1, nchar(imon2)), '01', sep = ""),
+                nleadtimesout = switch(ly, 'ly1' = switch(season, 'SON' = 13,
+                12), switch(season, 'SON' = 49, 48)))
+    toto1beta <- array(dim = c(dim(toto$mod)[1] + dim(toto1ter)[1], 
+                 max(dim(toto$mod)[2], dim(toto1ter)[2]), dim(toto$mod)[3:6]))
+    toto1beta[1:dim(toto$mod)[1], 1:dim(toto$mod)[2], , , , ] <- toto$mod
+    toto1beta[(dim(toto$mod)[1] + 1):(dim(toto$mod)[1] + dim(toto1ter)[1]),
+              1:dim(toto1ter)[2], , , , ] <- toto1ter
+    toto$mod <- toto1beta
+    lstexpid <- c(lstload, 'b02p')
+  }
+  toto_exp <- InsertDim(Mean1Dim(Season(toto$mod, 4, mon0, switch(season,
+                        'Year' = mon0, 'DJF' = 12, 'MAM' = 3, 'JJA' = 6,
+                        'SON' = 9), switch(season, 
+                        'Year' = (mon0 + 12 - 2) %% 12 + 1, 'DJF' = 2, 
+                        'MAM' = 5, 'JJA' = 8, 'SON' = 11)), 4), 4, 1)
+  toto_obs <- InsertDim(Mean1Dim(Season(toto$obs, 4, mon0, switch(season,
+                        'Year' = mon0, 'DJF' = 12, 'MAM' = 3, 'JJA' = 6,
+                        'SON' = 9), switch(season, 
+                        'Year' = (mon0 + 12 - 2) %% 12 + 1, 'DJF' = 2, 
+                        'MAM' = 5, 'JJA' = 8, 'SON' = 11)), 4), 4, 1)
+  if (var == 'prlr') {
+    toto$mod <- toto$mod * 1000 * 3600 * 24
+    toto$obs <- toto$obs * 1000 * 3600 * 24
+  }
+  toto=list(mod=toto_exp,obs=toto_obs,lat=toto$lat,lon=toto$lon)
+  save(toto,file=savename)
+}
+
+clims <- Clim(toto$mod, toto$obs)
+ano_exp <- Ano(toto$mod, clims$clim_exp)
+ano_obs <- Ano(toto$obs, clims$clim_obs)
+
+if (compcor) {
+  cor <- Corr(Mean1Dim(ano_exp, 2), Mean1Dim(ano_obs, 2), 1, 2)
+  cols <- c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen",
+            "white", "white", "yellow", "orange", "red", "saddlebrown")
+  lims <- seq(-1, 1, 0.2)
+  for (jexp in 1:length(lstexpid)) {
+    flag <- array(F, dim = dim(cor[jexp, 1, 2, 1, , ]))
+    flag[which(cor[jexp, 1, 2, 1, , ] > cor[jexp, 1, 4, 1, , ])] <- T
+    postscript(paste('CorCoef2d_', var, '_', lstexpid[jexp], '_', season, '_',
+               ly, '.eps', sep = ''))
+    PlotEquiMap(cor[jexp, 1, 2, 1, , ], toto$lon, toto$lat, 
+                toptitle = paste('Correlation Coefficient', lstexpid[jexp],
+                switch(season, 'Year' = 'annual', season), switch(var, 
+                'tas' = 'near surface temperature', 
+                'tos' = 'sea surface temperature', 'prlr' = 'precipitation'),
+                switch(var, 'tas' = 'GHCNv2+ERSSTv3b+GISTEMP', 
+                'tas' = 'ERSSTv3b', 'prlr' = 'GPCC'), switch(ly, 
+                'ly1' = 'Year1', 'ly2-5' = 'Year2-5', 'ly6-9' = 'Year6-9')),
+                sizetit = 0.8, brks = lims, cols = cols, colNA = switch(var,
+                'prlr' = 'white', grey(0.4)), filled.continents = switch(var,
+                'tas' = F, 'tos' = T, 'prlr' = F), dots = t(flag), intylat = 45)
+    dev.off()
+  }
+}
+
+if (comprms) {
+  rmse <- RMS(Mean1Dim(ano_exp, 2), Mean1Dim(ano_obs, 2), 1, 2)
+  cols <- rev(c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen",
+                "white", "white", "yellow", "orange", "red", "saddlebrown"))
+  lims <- c(0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.7, 1, 1.5, 2)
+  lims <- switch(var, 'tas' = lims * 2, 'tos' = lims * 2, lims)
+  rmse[which(rmse > max(lims))] <- max(lims)
+  for (jexp in 1:length(lstexpid)) {
+    postscript(paste('RMSE2d_', var, '_', lstexpid[jexp], '_', season, '_', ly,
+               '.eps', sep = ''))
+    PlotEquiMap(rmse[jexp, 1, 2, 1, , ], toto$lon, toto$lat, 
+                toptitle = paste('RMSE', lstexpid[jexp], switch(season, 
+                'Year' = 'annual', season), switch(var, 
+                'tas' = 'near surface temperature', 
+                'tos' = 'sea surface temperature', 'prlr' = 'precipitation'),
+                switch(var, 'tas' = 'GHCNv2+ERSSTv3b+GISTEMP', 
+                'tas' = 'ERSSTv3b', 'prlr' = 'GPCC'), switch(ly, 
+                'ly1' = 'Year1', 'ly2-5' = 'Year2-5', 'ly6-9' = 'Year6-9')),
+                sizetit = 0.8, brks = lims, cols = cols, colNA = switch(var, 
+                'prlr' = 'white', grey(0.4)), filled.continents = switch(var,
+                'tas' = F, 'tos' = T, 'prlr' = F), intylat = 45)
+    dev.off()
+  }
+}
+
+if (comprmsss) {
+  rmsss <- RMSSS(Mean1Dim(ano_exp, 2), Mean1Dim(ano_obs, 2), 1, 2)
+  cols <- c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen", 
+            "white", "white", "yellow", "orange", "red", "saddlebrown")
+  lims <- seq(-1, 1, 0.2)
+  for (jexp in 1:length(lstexpid)) {
+    flag <- array(F, dim = dim(rmsss[jexp, 1, 2, 1, , ]))
+    flag[which(rmsss[jexp, 1, 2, 1, , ] < 0.05)] <- T
+    rmsss[which(-1 > rmsss)] = -1
+    postscript(paste('RMSSS2d_', var, '_', lstexpid[jexp], '_', season, '_', ly,
+               '.eps', sep = ''))
+    PlotEquiMap(rmsss[jexp, 1, 1, 1, , ], toto$lon, toto$lat, 
+                toptitle = paste('RMSSS', lstexpid[jexp], switch(season,
+                'Year' = 'annual', season), switch(var, 
+                'tas' = 'near surface temperature', 
+                'tos' = 'sea surface temperature', 'prlr' = 'precipitation'), 
+                switch(var, 'tas' = 'GHCNv2+ERSSTv3b+GISTEMP', 
+                'tas' = 'ERSSTv3b', 'prlr' = 'GPCC'), switch(ly, 
+                'ly1' = 'Year1', 'ly2-5' = 'Year2-5', 'ly6-9' = 'Year6-9')),
+                sizetit = 0.8, brks = lims, cols = cols, colNA = switch(var,
+                'prlr' = 'white', grey(0.4)), filled.continents = switch(var,
+                'tas' = F, 'tos' = T, 'prlr' = F), dots = t(flag), intylat = 45)
+    dev.off()
+  }
+}
+
+if (compratrms) { 
+  ratrms <- RatioRMS(Mean1Dim(ano_exp, 2)[1, , 1, , ], 
+                     Mean1Dim(ano_exp, 2)[2, , 1, , ], 
+                     Mean1Dim(ano_obs, 2)[1, , 1, , ], 1)
+  cols <- rev(c("dodgerblue4", "dodgerblue1", "forestgreen", "yellowgreen",
+                "white", "white", "yellow", "orange", "red", "saddlebrown"))
+  lims <- c(0, 0.5, 0.8, 0.9, 0.95, 1, 1.05, 1.1, 1.2, 2, 6)
+  flag <- array(F, dim = dim(ratrms[1, , ]))
+  flag[which(ratrms[2, , ] < 0.05)] <- T
+  postscript(paste('Rati_RMSE2d_', var, '_', lstexpid[1], '_', lstexpid[2], 
+                   '_', season, '_', ly, '.eps', sep = ''))
+  PlotEquiMap(ratrms[1, , ], toto$lon, toto$lat, toptitle = paste('RMSE',
+              lstexpid[1], '/ RMSE', lstexpid[2], switch(season, 
+              'Year' = 'annual', season), switch(var, 
+              'tas' = 'near surface temperature', 
+              'tos' = 'sea surface temperature', 'prlr' = 'precipitation'),
+              switch(var, 'tas' = 'GHCNv2+ERSSTv3b+GISTEMP', 'tas' = 'ERSSTv3b',
+              'prlr' = 'GPCC'), switch(ly, 'ly1' = 'Year1', 'ly2-5' = 'Year2-5',
+              'ly6-9' = 'Year6-9')), sizetit = 0.8, brks = lims, cols = cols,
+              colNA = switch(var, 'prlr' = 'white', grey(0.4)), 
+              filled.continents = switch(var, 'tas' = F, 'tos' = T, 'prlr' = F),
+              dots = t(flag), intylat = 45)
+  dev.off()
+}

+ 212 - 0
diagnostics/plot_timeseries.R

@@ -0,0 +1,212 @@
+#!/usr/bin/env Rscript
+
+library(s2dverification)
+args <- commandArgs(TRUE)
+
+comptrend <- T
+compcor <- T
+comprms <- T
+compspread <- T
+plotano <- T
+
+var <- args[1]   # sie/sia/siv/tos/tas/prlr/ohc/lohc/mohc/uohc/amoc
+pole <- args[2]  # N/S only for sia/sie
+nltimemax <- 124 # number of leadtimes max in the experiments (in months)
+nltimeout <- 60  # number of leadtimes to postprocess(in months)
+lstexpid <- c('i00k', 'b02p') # list of ids
+mon0 <- 11       # initial month
+year0 <- 1960    # first start date
+yearf <- 2005    # last start date
+intsdate <- 5    # interval between start dates
+runmeanlen <- 12 # length of the window for running mean (in months)
+
+obs <- switch(var, 'sia' = c('HadISST'), 'sie' = c('HadISST'), 
+              'tas' = c('NCEP', 'ERA40'), 'tos' = c('ERSST', 'HadISST'),
+              'prlr' = c('CRU', 'GPCC'), 'ohc' = c('NEMOVAR_S4'),
+              'mohc' = c('NEMOVAR_S4'), 'uohc' = c('NEMOVAR_S4'),
+              'lohc' = c('NEMOVAR_S4'), 'amoc' = c('NEMOVAR_S4'),
+              'siv' = 'PIOMAS')
+toptitle2 <- switch(var, 'sia' = "sea ice area", 'sie' = "sea ice extent",
+                    'siv' = "sea ice volume", 'tas' = "global T2m", 
+                    'tos' = "global SST (60S-65N)", 
+                    'prlr' = 'land precipitation (60S-65N)', 
+                    'ohc' = "global ocean heat content", 
+                    'lohc' = 'global 800m-bottom ocean heat content',
+                    'mohc' = 'global 350m-800m ocean heat content',
+                    'uohc' = 'global 0-350m ocean heat content',
+                    'amoc' = 'Atlantic Overturning Streamfunction (40-55N, 1-2km)'
+                    )
+ytitle1 <- switch(var, 'sia' = "Millions km2", 'sie' = "Millions km2", 
+                  'siv' = 'Thousands km3', 'tas' = 'K', 'tos' = 'K', 
+                  'prlr' = 'mm/day', 'ohc' = '10e22 Joules', 
+                  'lohc' = '10e22 Joules', 'mohc' = '10e22 Joules',
+                  'uohc' = '10e22 Joules', 'amoc' = 'Sv')
+
+syears <- seq(year0, yearf, intsdate)
+imon2 <- paste("0", as.character(mon0), sep = "")
+sdates <- paste(as.character(syears), substr(imon2, nchar(imon2) - 1, 
+                nchar(imon2)), '01', sep = "")
+toptitle1 <- paste(switch(pole, 'N' = "Arctic", 'S' = "Antarctic", ""),
+                   toptitle2)
+
+savename <- paste(var, switch(pole, 'N' = paste('_', pole, sep = ''),
+                  'S' = paste('_', pole, sep = ''), ''), sep = '')
+for (expid in lstexpid ) {
+  savename <- paste(savename, '_', expid, sep = '')
+}
+if (file.exists(paste(savename, '.sav', sep = ''))) {
+  load(paste(savename, '.sav', sep = ''))
+} else {
+  if (var == 'prlr' | var == 'tos' ) {
+    fnc <- open.ncdf('/home/vguemas/analysis_hui/constant_files/ecearth_mm_lsm.nc'
+                     )
+    mask <- get.var.ncdf(fnc, 'LSM')
+    close.ncdf(fnc)
+    if (var == 'prlr') {
+      fnc <- open.ncdf('/cfu/data/dwd/gpcc_combined1x1_v4/constant_fields/land_sea_mask.nc'
+                       )
+      mask_gpcc <- get.var.ncdf(fnc, 'lsm')
+      close.ncdf(fnc)
+      fnc <- open.ncdf('/cfu/data/cru/mask_cru_land.nc')
+      mask_cru <- get.var.ncdf(fnc, 'pre')
+      close.ncdf(fnc)
+      fnc <- open.ncdf('/cfu/data/noaa/gpcp_v2.2/constant_fields/land_sea_mask.nc'
+                       )
+      mask_gpcp <- get.var.ncdf(fnc, 'LSM')
+      close.ncdf(fnc)
+      lstmaskobs <- list(mask_cru, mask_gpcc)
+    } else {
+      mask <- 1 - mask
+      lstmaskobs <- list(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 
+                         NULL)
+    }
+    lstmask <- list()
+    for (iexp in 1:length(lstexpid)) {
+       lstmask[[iexp]] <- mask
+    }
+  } else {
+    lstmask <- list(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 
+                    NULL, NULL, NULL, NULL, NULL) 
+    lstmaskobs <- list(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+                       NULL)
+  }
+  latbnd <- switch(var, 'tos' = c(-60, 65), 'prlr' = c(-60, 65), c(-90, 90))
+  varname <- switch(var, 'sia' = paste(var, pole, sep = ''), 'sie' = paste(var, 
+             pole, sep = ''), 'siv' = paste(var, pole, sep = ''), 
+             'ohc' = 'heatc', 'uohc' = '0-315_heatc', 'mohc' = '373-657_heatc',
+             'lohc' = '800-5350_heatc', 'amoc' = 'moc_40N55N_1-2km', var)
+  if (is.na(match('b02p', lstexpid)) == TRUE) { 
+    lstload <- lstexpid
+  } else {
+    lstload <- lstexpid[-match('b02p', lstexpid)]
+  }       
+  toto1 <- Load(varname, lstload, obs, sdates, latmin = latbnd[1],
+                latmax = latbnd[2], nleadtime = nltimemax, 
+                leadtimemax = nltimeout, maskmod = lstmask, 
+                maskobs = lstmaskobs)
+  if (is.na(match('b02p', lstexpid)) == FALSE) {
+    toto1bis <- Load(varname, 'b02p', obs = NULL, '19501101', 
+                     latmin = latbnd[1], latmax = latbnd[2], maskmod = lstmask,
+                     maskobs = lstmaskobs)
+    toto1ter <- Histo2Hindcast(toto1bis$mod, '19501101', sdates, 
+                               nleadtimesout = nltimeout)
+    toto1beta <- array(dim = c(dim(toto1$mod)[1] + dim(toto1ter)[1], 
+                       max(dim(toto1$mod)[2], dim(toto1ter)[2]),
+                       dim(toto1$mod)[3:4]))
+    toto1beta[1:dim(toto1$mod)[1], 1:dim(toto1$mod)[2], , ] <- toto1$mod
+    toto1beta[(dim(toto1$mod)[1] + 1):(dim(toto1$mod)[1] + dim(toto1ter)[1]), 
+              1:dim(toto1ter)[2], , ] <- toto1ter
+    toto1$mod <- toto1beta
+    lstexpid <- c(lstload, 'b02p')
+  }
+  if (var == 'prlr') {
+    toto1$mod <- toto1$mod * 1000 * 3600 * 24
+    toto1$obs <- toto1$obs * 1000 * 3600 * 24
+  }
+  if (var == 'ohc' | var == 'lohc' | var == 'mohc' | var == 'uohc') {
+    toto1$mod <- toto1$mod / 1e22
+    toto1$obs <- toto1$obs / 1e22
+  }
+  if (var == 'sia' | var=='sie' | var=='siv') {
+    toto1$mod <- toto1$mod/1000
+    if (var == 'siv') {
+      toto1$obs <- toto1$obs/1000
+    }
+  }
+  save(toto1, file = paste(savename, '.sav', sep = ''))
+}
+
+toto2a <- Clim(toto1$mod, toto1$obs, memb = F)
+toto2b_ano_exp <- Ano(toto1$mod, toto2a$clim_exp)
+toto2b_ano_obs <- Ano(toto1$obs, toto2a$clim_obs)
+toto3 <- Smoothing(toto2b_ano_exp, runmeanlen, 4)
+toto4 <- Smoothing(toto2b_ano_obs, runmeanlen, 4)
+suf <- switch(pole, 'N' = paste('_', pole, sep = ''), 'S' = paste('_', pole,
+              sep = ''), '')
+PlotAno(toto1$mod, toto1$obs, sdates, toptitle = paste(lstexpid, toptitle1),
+        ytitle = c(ytitle1, ytitle1, ytitle1), legends = obs, biglab = F, 
+        fileout = paste(var, '_', lstexpid, suf, '.eps', sep = ''))
+PlotAno(Smoothing(toto1$mod, runmeanlen, 4), 
+        Smoothing(toto1$obs, runmeanlen, 4), sdates, 
+        toptitle = paste("smoothed", lstexpid, toptitle1),
+        ytitle = c(ytitle1, ytitle1, ytitle1), legends = obs, biglab = F, 
+        fileout = paste(var, '_', lstexpid, suf, '_smoothed.eps', sep = ''))
+
+if (plotano) {
+  PlotAno(toto3, toto4, sdates, toptitle = paste("smoothed", lstexpid,
+          toptitle1, "anomalies"), ytitle = c(ytitle1, ytitle1, ytitle1), 
+          legends = obs, biglab = F, fileout = paste(var, '_', lstexpid,suf, 
+          '_ano.eps', sep = ''))
+  PlotClim(toto2a$clim_exp, toto2a$clim_obs, toptitle = paste(switch(pole,
+           'N' = "Arctic", 'S' = "Antarctic", ""), toptitle2, "climatologies"),
+           ytitle = ytitle1, monini = mon0, listexp = lstexpid, listobs = obs,
+           biglab = F, fileout = paste(savename, '_clim.eps', sep = ''))
+} 
+
+if (compspread) {
+  toto5 <- toto3 - InsertDim(Mean1Dim(toto3, 2, narm = T), 2, dim(toto3)[2])
+  toto6 <- Spread(toto5, c(2, 3))
+  PlotVsLTime(toto6$iqr, toptitle = paste("InterQuartile Range", toptitle1),
+              ytitle = ytitle1, monini = mon0, listexp = lstexpid, biglab = F,
+              fileout = paste("IQR_", savename, ".eps", sep = ''))
+  PlotVsLTime(toto6$maxmin, toptitle = paste("Maximum-Minimum for", toptitle1),
+              ytitle = ytitle1, monini = mon0, listexp = lstexpid, biglab = F, 
+              fileout = paste("MaxMin_", savename, ".eps", sep = ''))
+  PlotVsLTime(toto6$sd, toptitle = paste("Standard Deviation for", toptitle1),
+              ytitle = ytitle1, monini = mon0, listexp = lstexpid, biglab = F,
+              fileout = paste("SD_", savename, ".eps", sep = ''))
+  PlotVsLTime(toto6$mad, toptitle = paste("Median Absolute Deviation for",
+              toptitle1), ytitle = ytitle1, monini = mon0, listexp = lstexpid,
+              biglab = F, fileout = paste("Mad_", savename, ".eps", sep = ''))
+}
+
+if (compcor) {
+  cor <- Corr(Mean1Dim(toto3, 2), Mean1Dim(toto4, 2), 1, 2, compROW = 3, 
+              limits = c(ceiling((runmeanlen + 1) / 2), 
+              nltimeout - floor(runmeanlen / 2)))
+  PlotVsLTime(cor, toptitle = paste("Correlations for", toptitle1), 
+              ytitle = "correlation", monini = mon0, limits = c(-1, 2),
+              listexp = lstexpid, listobs = obs, biglab = F, 
+              hlines = c(-1, 0, 1), fileout = paste("cor_", savename, ".eps",
+              sep = ''))
+}
+
+if (comprms) {
+  rms <- RMS(Mean1Dim(toto3, 2), Mean1Dim(toto4, 2), 1, 2, compROW = 3, 
+             limits = c(ceiling((runmeanlen + 1) / 2), 
+             nltimeout - floor(runmeanlen / 2)))
+  PlotVsLTime(rms, toptitle = paste("RMSE for", toptitle1), ytitle = ytitle1,
+              monini = mon0, listexp = lstexpid, listobs = obs, biglab = F,
+              fileout = paste("rms_", savename, ".eps", sep = ""))
+}
+
+if (comptrend) {
+  trends <- Consist_Trend(Mean1Dim(toto3, 2), Mean1Dim(toto4, 2), intsdate / 12)
+  PlotVsLTime(trends$trend, toptitle = paste("Trend for", toptitle1), 
+              ytitle = paste(ytitle1, "/ year"), monini = mon0,
+              listexp = c(lstexpid, obs), biglab = F, fileout = paste("trend_",
+              savename, ".eps", sep = ""))
+}
+
+rm(list = ls())
+quit()

+ 1982 - 0
eigentechniques/CFU_Rfunc.txt

@@ -0,0 +1,1982 @@
+CFU_PCA<-function(data, cl = 0.98, ofiles = FALSE) {
+#
+# Computes a Principal Component Analysis (PCA/EOF) of data.
+#
+# Description:
+#
+#      Returns an array of EOFs, an array of PCs, a vector of FVARs (fractions of explained variance), 
+#      an array of CORRs (correlation maps), and the threshold for the correlation coefficient based 
+#      on a two-tailed Student's t-test.
+#      If applied, also returns ascii files for PCs and FVARs.
+#
+# Usage:
+#
+#      CFU_PCA(data, sl = 0.98, ofiles = FALSE)
+#
+# Arguments:
+#
+#   data:     Array containing the anomalies field for the analysis 
+#
+#   cl:       Value of the confidence level for the correlation map.
+#             It is based on a two-tailed Student's t-test.
+#             Default is cl=0.98 (98% confidence level; 2% significance level).
+#
+#   ofiles:   Logical. 
+#
+# Output:
+#
+#   $PCs:    Array of principal components (nt, nm)
+#   $FVARs:  Vector of fractions of explained variance (nm)
+#   $EOFs:   Array of empirical orthogonal functions (c(dim), nm) 
+#   $CORRs:  Array of correlation maps associated with EOFs (c(dim), nm) 
+#   $Rtest:  Value of the statistically significant corr.coefficient at cl
+#
+# Author:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 17 September 2010
+#      Modified by jgarcia@ic3.cat (29 August 2011): The calculation is now done by a 
+#      singular value decomposition of the data matrix (prcomp), not by using 'eigen'
+#      on the covariance matrix (princomp).   
+#
+
+ 
+d=dim(data)
+if (length(d) == 3){
+    nlon=d[1]; nlat=d[2]; ns=nlon*nlat; nt=d[3]   
+} else {
+    nlon=1; nlat=d[1]; ns=nlon*nlat; nt=d[2]
+}
+
+M=array(dim=c(ns,nt))
+for (i in 1:nlat){
+    M[(nlon*(i-1)+1):(nlon*(i-1)+nlon),]=data[,i,]
+}
+not=is.na(M[,1])
+yes=array(dim=c(ns))
+counter=0
+for (i in 1:ns){
+    if (not[i]=='FALSE'){counter=counter+1; yes[counter]=i}
+}
+yes=yes[1:counter]
+Myes=M[yes,]          #(ns_with_data,nt)
+
+#covariance matrix
+
+#covM=(Myes%*%t(Myes))/(nt-1) 
+
+#PCA analysis
+
+#PCA=princomp(covmat=covM)
+PCA=prcomp(t(Myes))
+
+#outputs
+
+nm=10                  #number of modes to be retained (aprox 80% of FVAR)
+
+std=PCA$sdev
+lambda=std*std; fvar=lambda/sum(lambda)
+fvar=round(fvar,digits=4); fvar=fvar[1:nm]*100
+
+#U=PCA$loadings   #(ns_with_data,ns_with_data)
+U=PCA$rotation
+Un=U[,1:nm]
+
+PC=t(Myes)%*%Un       #(nt,nm)
+PCstd=array(dim=c(nt,nm))
+for (i in 1:nm){
+    S=sd(PC[,i]); PCstd[,i]=PC[,i]/S
+}
+
+EOFyes=(Myes%*%PCstd)/nt #(ns_with_data,nm)
+eof=array(dim=c(ns,nm))
+eof[yes,]=EOFyes
+EOF=array(dim=c(nlon,nlat,nm))
+for (i in 1:nlat){
+    EOF[,i,]=eof[(nlon*(i-1)+1):(nlon*(i-1)+nlon),]
+}
+
+CORRyes=array(dim=c(counter,nm))
+for (i in 1:nm){
+    for (j in 1:counter){
+        CORRyes[j,i]=cor(Myes[j,],PCstd[,i])
+    }
+}
+corr=array(dim=c(ns,nm))
+corr[yes,]=CORRyes
+CORR=array(dim=c(nlon,nlat,nm))
+for (i in 1:nlat){
+    CORR[,i,]=corr[(nlon*(i-1)+1):(nlon*(i-1)+nlon),]
+}
+
+#t-Test
+
+cl=cl; cl2=(1-cl)/2
+t=qt(cl+cl2, nt-2); R=sqrt((t*t)/((t*t)+nt-2))
+
+#options
+
+#if(ofiles) {
+#    save(PCstd, ascii=TRUE, file="$plotsdir/PCs_of_data.dat")
+#    save(fvar, ascii=TRUE, file="$plotsdir/FVARs_of_data.dat")
+#}
+
+invisible(list(PCs=PCstd,FVARs=fvar,EOFs=EOF,CORRs=CORR,Rtest=R))
+
+}
+
+
+CFU_MCA<-function(datax, datay, cl = 0.98, ofiles = FALSE) {
+#
+# Computes a Maximum Covariance Analysis (MCA) between datax and datay.
+#
+# Description:
+#
+#      Returns a vector of squared covariance fraction (SCFs) explained by each pair of
+#      covariability modes, a vector of correlation coefficient (RUVs) between expansion 
+#      coefficients (ECs) that measures their linear relationship, and a set of regression (MCAs)
+#      and correlation (CORRs) maps associated with the covariability modes. Note that both, 
+#      MCAs and CORRs, are 'homogeneous' patterns obtained as regression/correlation between
+#      each field (predictor, predictand) and its expansion coefficient. 
+#      The threshold for the correlation coefficient, based on a two-tailed Student's t-test, 
+#      is also produced.
+#      If applied, also returns ascii files for SCFs, RUVs, and ECs.
+#
+# Usage:
+#
+#      CFU_MCA(datax, datay, sl = 0.98, ofiles = FALSE)
+#
+# Arguments:
+#
+#   datax:    Array containing the anomalies field for the predictor 
+#
+#   datay:    Array containing the anomalies field for the predictand 
+#
+#   cl:       Value of the confidence level for the correlation map.
+#             It is based on a two-tailed Student's t-test.
+#             Default is cl=0.98 (98% confidence level; 2% significance level).
+#
+#   ofiles:   Logical. 
+#
+# Output:
+#
+#   $SCFs:   Vector of squared covariance fractions (nm)
+#   $RUVs:   Vector of correlations between expansion coefficients (nm)
+#   $ECs_U:  Array of expansion coefficients of predictor field (nt, nm) 
+#   $MCAs_U: Array of covariability patterns of predictor field (c(dim), nm) 
+#   $CORRs_U:Array of corr. maps associated with predictor field (c(dim), nm) 
+#   $ECs_V:  Array of expansion coefficients of predictand field (nt, nm) 
+#   $MCAs_V: Array of covariability patterns of predictand field (c(dim), nm) 
+#   $CORRs_V:Array of corr. maps associated with predictand field (c(dim), nm) 
+#   $Rtest:  Value of the statitically significant corr. coefficient at cl
+#
+# Author:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 24 September 2010
+#
+
+ 
+dx=dim(datax)
+if (length(dx) == 3){
+    nlonx=dx[1]; nlatx=dx[2]; nsx=nlonx*nlatx; nt=dx[3]   
+} else {
+    nlonx=1; nlatx=dx[1]; nsx=nlonx*nlatx; nt=dx[2]
+}
+
+X=array(dim=c(nsx,nt))
+for (i in 1:nlatx){
+    X[(nlonx*(i-1)+1):(nlonx*(i-1)+nlonx),]=datax[,i,]
+}
+not=is.na(X[,1])
+yesx=array(dim=c(nsx))
+counterx=0
+for (i in 1:nsx){
+    if (not[i]=='FALSE'){counterx=counterx+1; yesx[counterx]=i}
+}
+yesx=yesx[1:counterx]
+Xyes=X[yesx,]          #(nsx_with_data,nt)
+
+dy=dim(datay)
+if (length(dy) == 3){
+    nlony=dy[1]; nlaty=dy[2]; nsy=nlony*nlaty; nt=dy[3]   
+} else {
+    nlony=1; nlaty=dy[1]; nsy=nlony*nlaty; nt=dy[2]
+}
+
+Y=array(dim=c(nsy,nt))
+for (i in 1:nlaty){
+    Y[(nlony*(i-1)+1):(nlony*(i-1)+nlony),]=datay[,i,]
+}
+not=is.na(Y[,1])
+yesy=array(dim=c(nsy))
+countery=0
+for (i in 1:nsy){
+    if (not[i]=='FALSE'){countery=countery+1; yesy[countery]=i}
+}
+yesy=yesy[1:countery]
+Yyes=Y[yesy,]          #(nsy_with_data,nt)
+
+#covariance matrix
+
+covXY=(Xyes%*%t(Yyes))/(nt-1) 
+
+#MCA analysis
+
+MCA=svd(covXY)
+
+#outputs
+
+nm=10                  #number of modes to be retained
+
+SV=MCA$d               #singular values
+SV2=SV*SV; S=sum(SV2)
+SCF=SV2/S              #squared covariance fraction
+SCF=SCF[1:nm]*100
+
+Ux=MCA$u
+Vy=MCA$v
+Un=Ux[,1:nm]; Vn=Vy[,1:nm]
+ECu=t(Xyes)%*%Un; ECv=t(Yyes)%*%Vn   #(nt,nm)
+ECu_std=array(dim=c(nt,nm))
+ECv_std=array(dim=c(nt,nm))
+for (i in 1:nm){
+    Su=sd(ECu[,i]); ECu_std[,i]=ECu[,i]/Su
+    Sv=sd(ECv[,i]); ECv_std[,i]=ECv[,i]/Sv
+}
+
+RUV=array(dim=c(1,nm))
+for (i in 1:nm){
+    RUV[i]=cor(ECu_std[,i],ECv_std[,i])
+}
+
+Uyes=(Xyes%*%ECu_std)/nt            #(ns_with_data,nm)
+Vyes=(Yyes%*%ECv_std)/nt
+mcaU=array(dim=c(nsx,nm)); mcaU[yesx,]=Uyes
+mcaV=array(dim=c(nsy,nm)); mcaV[yesy,]=Vyes
+MCAU=array(dim=c(nlonx,nlatx,nm))
+MCAV=array(dim=c(nlony,nlaty,nm))
+for (i in 1:nlatx){
+    MCAU[,i,]=mcaU[(nlonx*(i-1)+1):(nlonx*(i-1)+nlonx),]
+}
+for (i in 1:nlaty){
+    MCAV[,i,]=mcaV[(nlony*(i-1)+1):(nlony*(i-1)+nlony),]
+}
+
+CORRUyes=array(dim=c(counterx,nm))
+CORRVyes=array(dim=c(countery,nm))
+for (i in 1:nm){
+    for (j in 1:counterx){
+        CORRUyes[j,i]=cor(Xyes[j,],ECu_std[,i])
+    }
+    for (j in 1:countery){
+        CORRVyes[j,i]=cor(Yyes[j,],ECv_std[,i])
+    }
+}
+corrU=array(dim=c(nsx,nm)); corrU[yesx,]=CORRUyes
+corrV=array(dim=c(nsy,nm)); corrV[yesy,]=CORRVyes
+CORRU=array(dim=c(nlonx,nlatx,nm))
+CORRV=array(dim=c(nlony,nlaty,nm))
+for (i in 1:nlatx){
+    CORRU[,i,]=corrU[(nlonx*(i-1)+1):(nlonx*(i-1)+nlonx),]
+}
+for (i in 1:nlaty){
+    CORRV[,i,]=corrV[(nlony*(i-1)+1):(nlony*(i-1)+nlony),]
+}
+
+#t-Test
+
+cl=cl; cl2=(1-cl)/2
+t=qt(cl+cl2, nt-2); R=sqrt((t*t)/((t*t)+nt-2))
+
+#options
+
+#if(ofiles) {
+#    save(SCF, ascii=TRUE, file="$plotsdir/SCFs_of_data.dat")
+#    save(RUV, ascii=TRUE, file="$plotsdir/RUVs_of_data.dat")
+#    save(ECu_std, ascii=TRUE, file="$plotsdir/ECsU_of_data.dat")
+#    save(ECv_std, ascii=TRUE, file="$plotsdir/ECsV_of_data.dat")
+#}
+
+invisible(list(SCFs=SCF,RUVs=RUV,ECs_U=ECu_std,MCAs_U=MCAU,CORRs_U=CORRU,ECs_V=ECv_std,MCAs_V=MCAV,CORRs_V=CORRV,Rtest=R))
+
+}
+
+
+CFU_lonlat2ns<-function(data, miss.val = FALSE){
+#
+# Reshape a 3D array into a 2D array handling only spatial dimensions
+#
+# Description:
+#
+#       Returns a 2D array of 3D data placing the spatial dimensions into a vector according
+#       to GrADS protocol, and keeps the temporal dimension with no change.   
+#       It localizes missing values and returns an (spatial) index with no-missing values positions.   
+#
+# Usage: CFU_latlon2ns(data, miss.val = FALSE)
+#
+# Input:
+#
+#       data:   3-dimensional array, e.g. (longitude, latitude, time)
+#
+# Output:
+#
+#       matrix: 2-dimensional array with 'ns' spatial dimensions, e.g. (ns, time)
+#
+# Authors:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 30 September 2010
+
+
+d=dim(data)
+nlon=d[1]; nlat=d[2]; ns=nlon*nlat; nt=d[3]   
+
+M=array(dim=c(ns,nt))
+for (i in 1:nlat){
+    M[(nlon*(i-1)+1):(nlon*(i-1)+nlon),]=data[,i,]
+}
+not=is.na(M[,1])
+yes=array(dim=c(ns))
+counter=0
+for (i in 1:ns){
+    if (not[i]=='FALSE'){counter=counter+1; yes[counter]=i}
+}
+yes=yes[1:counter]
+Myes=M[yes,]          #(ns_with_data,nt)
+
+#printed in screen
+
+print(ns)
+print(counter) 
+
+#outputs
+
+if(miss.val) { Myes <- M } 
+
+invisible(list(matrix=Myes,nt=nt,ns_original=ns,ns_noNaN=counter,index_noNaN=yes))
+
+}
+
+
+CFU_ns2lonlat<-function(data, nlon=1, nlat=1, yes=NULL){
+#
+# Reshape a 2D array into a 3D array handling only spatial dimensions
+#
+# Description:
+#
+#       Returns a 2D array of 3D data placing the spatial dimensions into a vector according
+#       to GrADS protocol, and keeps the temporal dimension with no change.   
+#       It localizes missing values and returns an (spatial) index with no-missing values positions.   
+#
+# Usage: CFU_latlon2ns(data, miss.val = FALSE)
+#
+# Input:
+#
+#       data:   3-dimensional array, e.g. (longitude, latitude, time)
+#
+# Output:
+#
+#       matrix: 2-dimensional array with 'ns' spatial dimensions, e.g. (ns, time)
+#
+# Authors:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 06 October 2010
+
+
+nlon=max(abs(nlon), 1); nlat=max(abs(nlat), 1); ns=nlon*nlat
+d=dim(data)
+nt=d[2]   
+
+miss.val=is.null(yes)
+if(miss.val=='TRUE'){
+    M=array(dim=c(nlon,nlat,nt))
+    for (i in 1:nlat){
+        M[,i,]=data[(nlon*(i-1)+1):(nlon*(i-1)+nlon),]    
+    }
+} else {
+    Myes=array(dim=c(ns,nt)); Myes[yes,]=data
+    M=array(dim=c(nlon,nlat,nt))
+    for (i in 1:nlat){
+        M[,i,]=Myes[(nlon*(i-1)+1):(nlon*(i-1)+nlon),]    
+    }
+}
+
+matrix=M
+matrix
+
+}
+
+
+CFU_detrend<-function(vector){
+#
+# Remove the linear trend in a time-series
+#
+# Description:
+#
+#       Returns a vector with the same length as the original (nt) but in which
+#       the linear trend has been removed.   
+#       No change is produced if vector is a vector of missing values.   
+#
+# Usage: CFU_detrend(vector)
+#
+# Input:
+#
+#       vector:   1-dimensional array, time (nt dimension)
+#
+# Output:
+#
+#       1-dimensional array without the linear trend (nt)
+#
+# Authors:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 05 October 2010
+#      Modified by jgarcia & vguemas@ic3.cat (27 April 2011) : to convert output into a vector (before, a list)
+
+nt=length(vector)
+
+if (is.na(vector[1])=='TRUE'){
+    print("vector contains missing values")
+    detrend=array(dim=nt)
+} else {
+detrend=resid(lm(vector ~ seq(vector)))
+}
+detrend=as.vector(detrend)
+
+#outputs
+
+detrend
+
+}
+
+
+CFU_testRR<-function(R1, N1, R2, N2, cl = 0.95){
+#
+# Testing the equality of correlation coefficients from independent samples 
+#
+# Description:
+#
+#       Statistical test for the equality of correlation coefficients (R1, R2)
+#       from two independent samples. These samples should belong to two populations,    
+#       e.g. dynamical prediction (GCM) and statistical prediction; and the hypothesis
+#       will test the equality of the population correlation (bilateral test).
+#
+#                     H0: R1==R2       vs.       H1:  R1=/=R2
+#
+#       The statistic Z follows a N(0,1) distribution; if |Z| > Z_alfa/2, H0 is rejected.
+#
+# Usage: CFU_testRR(R1, N1, R2, N2, cl)
+#
+# Input:
+#
+#       R1:   number, correlation of samples #1
+#
+#       N1:   number, nº of elements in the sample (length #1)
+#
+#       R2:   number, correlation of samples #2
+#
+#       N2:   number, nº of elements in the sample (length #2)
+#
+#       cl:   Value of the confidence level.
+#             Default is cl=0.95 (95% confidence level; 5% significance level).
+#
+# Output:
+#
+#       value:   binary output (0,1) / 0 = H0 accepted, 1 = H0 rejected
+# 
+#       p_value: area under the N(0,1) density curve to the right of endpoint |Z|   
+#
+# Authors:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 02 December 2010
+#      Modified by jgarcia & lrodrigues@ic3.cat (13 December 2011) : Correcting p-values 
+#      in two-tailed test (alfa is now multiplied by 2).
+
+Y1=0.5*log((1+R1)/(1-R1)); Y2=0.5*log((1+R2)/(1-R2))
+
+B=(1/(N1-3)) + (1/(N2-3)); Z=abs(Y1-Y2)/sqrt(B)      #abs() -> two-tailed test
+
+cl=cl; cl2=(1-cl)/2; Z_cl=qnorm(cl+cl2)
+
+if (is.na(Z)==FALSE){
+    if (Z > Z_cl){ value=1 } else { value=0 }
+} else {
+    value=NA
+}
+
+#
+#p=pnorm(Z)-pnorm(-Z)    #two-tailed / bilateral --> confidence level 
+#p=1-p                   # --> significance level
+#p=p/2                   # alfa/2 --> p_value
+#
+
+if (is.na(Z)==FALSE){
+    p=2*pnorm(Z,lower.tail=FALSE)
+} else {
+    p=NA
+}
+
+#outputs
+
+invisible(list(value=value, p_value=p))
+
+}
+
+
+CFU_testR<-function(R, N, cl = 0.95, test="two"){
+#
+# Testing the Null-Hypothesis of population correlation equal to zero 
+# 
+# Description:
+# 
+#       Statistical test for sample correlation coefficient (R) according to 
+#       the Null Hypothesis of population correlation coefficient (RO) equal to zero.
+# 
+#                     H0: RO==0       vs.       H1:  RO=/=0
+# 
+#       The statistic t follows a Student's (T-test) distribution: 
+#
+#                     if t > t_alfa,N-2   (one-tailed) H0 is rejected
+#
+#                     if |t| > t_alfa/2,N-2 (two-tailed) H0 is rejected
+# 
+# Usage: CFU_testR(R, N, cl, test=c("one","two"))
+# 
+# Input:
+# 
+#       R:    number, sample correlation
+# 
+#       N:    number, nº of elements in the sample
+# 
+#       cl:   Value of the confidence level.
+#             Default is cl=0.95 (95% confidence level; 5% significance level).
+#
+#       test: string of characters that can be "one" or "two" (by default) according 
+#             to required test; one-tailed, unilateral (the former) o two-tailed, 
+#             bilateral (the latter). 
+# 
+# Output:
+# 
+#       value:   binary output (0,1) / 0 = H0 accepted, 1 = H0 rejected
+#  
+#       p_value: area under the Student's T density curve to the right of endpoint |t|   
+# 
+# Authors:
+# 
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 08 December 2010
+#
+#      Modified by jgarcia & lrodrigues@ic3.cat (13 July 2011) : Correcting p-values 
+#      in two-tailed test (alfa is now multiplied by 2).
+
+t=R*sqrt((N-2)/(1-R**2))
+
+cl=cl
+
+if (test=="one"){
+    t_cl=qt(cl, N-2)     
+    if (abs(t) > t_cl){ value=1 } else { value=0 }
+#
+    if (t > 0){
+        p=pt(t, N-2, lower.tail=FALSE) 
+    } else { p=pt(t, N-2, lower.tail=TRUE) }
+}
+
+if (test=="two"){
+    cl2=(1-cl)/2; t_cl=qt(cl+cl2, N-2)
+    if (abs(t) > t_cl){ value=1 } else { value=0 }
+#
+    if (t > 0){
+        p=pt(t, N-2, lower.tail=FALSE)*2 
+    } else { p=pt(t, N-2, lower.tail=TRUE)*2 }
+}
+
+#ouputs
+
+invisible(list(value=value, p_value=p))
+
+}
+
+
+CFU_AA<-function(data, lat, yes=NULL){
+#
+# Compute the area-average of a 2D (longitude x latitude) array 
+#
+# Description:
+#
+#       Area-average (AA) of "data" over the spatial domain described by 'longitude' and 
+#       'latitude'.
+#       The weights for the field-mean (AA) are based on the cosine of the latitude at each    
+#       gridpoint, i.e. all data in a latitudinal circle (all longitudes) have the same
+#       weight, cosine("lat").
+#
+# Usage: CFU_AA(data, lat, yes)
+#
+# Input:
+#
+#       data: 2-dimensional array (longitude, latitude; in this order)
+#
+#       lat:  vector containing the latitude coordinates
+#
+#       yes:  vector containing the position of non-NaN values, so 'good' values 
+#             (in a ns=nlon*nlat, GrADS format) 
+#
+# Output:
+#
+#       data_ave; value of the area-average
+#
+# Authors:
+#
+#      Javier Garcia-Serrano <jgarcia@ic3.cat> 03 December 2010
+#      Modified by Virginie Guemas <vguemas@ic3.cat> (March 2011) : Weight = 0 for NA grid point
+
+
+ycos=cos((lat*pi)/180); nlat=dim(data)[2]; nlon=dim(data)[1]; ns=nlon*nlat
+weight=array(dim=c(nlon,nlat))
+for (i in 1:nlon){
+    weight[i,]=ycos
+}
+#weight=weight/sum(weight)                    #normalization
+
+data=data*weight                             #weightening
+
+miss.val=is.null(yes)
+if(miss.val=='TRUE'){
+    M=array(dim=c(ns))
+    weight2=array(dim=c(ns))
+    for (i in 1:nlat){
+        M[(nlon*(i-1)+1):(nlon*(i-1)+nlon)]=data[,i]
+        weight2[(nlon*(i-1)+1):(nlon*(i-1)+nlon)]=weight[,i]
+    }
+    not=is.na(M)
+    yes=array(dim=c(ns))
+    counter=0
+    for (i in 1:ns){
+        if (not[i]=='FALSE'){counter=counter+1; yes[counter]=i}
+    }
+    yes=yes[1:counter]
+    tmp=sum(M[yes])
+    sweight=sum(weight2[yes])
+    data_ave=tmp/sweight
+    #data_ave=sum(M[yes])    
+} else {
+    yes=yes
+    M=array(dim=c(ns))
+    weight2=array(dim=c(ns))
+    for (i in 1:nlat){
+        M[(nlon*(i-1)+1):(nlon*(i-1)+nlon)]=data[,i]    
+        weight2[(nlon*(i-1)+1):(nlon*(i-1)+nlon)]=weight[,i]
+    }
+    tmp=sum(M[yes])
+    sweight=sum(weight2[yes])
+    data_ave=tmp/sweight
+    #data_ave=sum(M[yes])
+}
+
+#outputs
+
+data_ave
+
+}
+
+CFU_neff<-function(vector, fig=FALSE){
+#
+# Compute the effective degrees of freedom of a time series 
+#
+# Description:
+#
+#       Number of effective degrees of freedom of "vector".
+#       Based on: Zieba, A. (2010): Effective number of observations and unbiased estimators    
+#       of variance for autocorrelated data - an overview. Metrol. Meas. Syst. Vol XVII, No. 1,
+#       pp. 3-16; index 330930, ISSN 0860-8229. www.metrology.pg.gda.pl
+#
+# Usage: CFU_neff(vector)
+#
+# Input:
+#
+#       vector: 1-dimensional array, time (nt dimension)
+#
+#       fig: logical, if plot in the autocorrelation of "vector" is desired 
+#
+# Output:
+#
+#       neff; number of effective degrees of freedom
+#
+# Authors:
+#
+#      Created by Caio A. S. Coelho <caio.coelho@cptec.inpe.br>
+#      Implemented in CFU_Rfunc by Javier García-Serrano <jgarcia@ic3.cat> (August 2011)
+
+
+dof=length(vector)
+
+a=acf(vector,lag.max=dof-1,plot=fig)$acf[2:dof,1,1]
+
+s=0
+for (k in 1:(dof-1)){
+    s=s+(((dof-k)/dof)*(a[k]**2))
+}
+
+neff=(dof/(1+(2*s)))-1
+
+#if (neff>dof){neff=dof}
+
+#outputs
+
+#list(neff=neff)
+neff
+
+}
+
+CFU_BS <- function(obs,pred,thresholds = seq(0,1,0.1)) {
+#
+# Compute the BS and its decomposition given a set of probability predictions of binary event
+#
+# Description:
+#
+#      Returns the values of the BS and its standard decomposition as well as the addition of the
+#      two winthin-bin extra components (Stephenson et al., 2008). It also solves the bias-corrected 
+#      decomposition of the BS (Ferro and Fricker, 2012). BSS having the climatology as the reference
+#      forecast.
+#
+# Usage:
+#
+#      CFU_BS(obs,pred,thresholds = seq(0,1,0.1))
+#
+# Input:
+#
+#      obs: Vector of binary observations (1 or 0) 
+#
+#      pred: Vector of probablistic predictions [0,1]
+#
+#      thresholds: Values used to bin the forecasts.  By default the bins are 
+#      {[0,0.1), [0.1, 0.2), ... [0.9, 1]}.
+#
+# Output:
+#
+#      $rel:                 standard reliability
+#      $res:                 standard resolution
+#      $unc:                 standard uncertainty
+#      $bs:                  Brier score
+#      $bs_check_res:        rel-res+unc
+#      $bss_res:             res-rel/unc
+#      $gres:                generalized resolution
+#      $bs_check_gres:       rel-gres+unc
+#      $bss_gres:            gres-rel/unc
+#      $rel_bias_corrected:  bias-corrected rel
+#      $gres_bias_corrected: bias-corrected gres
+#      $unc_bias_corrected:  bias-corrected unc
+#      $bss_bias_corrected:  gres_bias_corrected-rel_bias_corrected/unc_bias_corrected
+#      $nk:                  number of forecast in each bin
+#      $fkbar:               average probability of each bin
+#      $okbar:               relative frequency that the observed event occurred
+#      $bins:                bins used
+#      $pred:                values with which the forecasts are verified
+#      $obs:                 probability forecasts of the event 
+#
+# References:
+#
+#      Wilks (2006) Statistical Methods in the Atmospheric Sciences.
+#      Stephenson et al. (2008). Two extra components in the Brier score decomposition. Weather
+#      and Forecasting, 23: 752-757.
+#      Ferro and Fricker (2012). A bias-corrected decomposition of the BS. Quarterly Journal of 
+#      the Royal Meteorological Society, DOI: 10.1002/qj.1924.
+#
+# Example:
+#
+#      a=runif(10)
+#      b=round(a)
+#      x=CFU_BS(b,a)
+#      x$bs-x$bs_check_res
+#      x$bs-x$bs_check_gres
+#      x$rel_bias_corrected-x$gres_bias_corrected+x$unc_bias_corrected
+#
+# Author:
+#
+#      Luis Rodrigues <lrodrigues@ic3.cat> 16 April 2012
+#
+  if (max(pred) > 1 | min(pred) < 0) {
+      cat("Predictions outside [0,1] range.  \n Are you certain this is a probability forecast? \n")
+  } else if (max(obs) != 1 & min(obs) != 0) {
+      cat("Binary events must be either 0 or 1.  \n Are you certain this is a binary event? \n")
+  } else {
+    nbins=length(thresholds)-1                     # Number of bins
+    n=length(pred)
+    bins=as.list(paste("bin",1:nbins,sep=""))
+    for (i in 1:nbins) {
+      if (i == nbins) {
+        bins[[i]]=list(which(pred>=thresholds[i]   & pred<=thresholds[i+1]))
+      } else {
+        bins[[i]]=list(which(pred>=thresholds[i]   & pred<thresholds[i+1]))
+      }
+    }
+#
+    fkbar=okbar=nk=array(0,dim=nbins)
+    for (i in 1:nbins) {
+      nk[i]=length(bins[[i]][[1]])
+      fkbar[i]=sum(pred[bins[[i]][[1]]])/nk[i]
+      okbar[i]=sum(obs[bins[[i]][[1]]])/nk[i]
+    }
+#
+    obar=sum(obs)/length(obs)
+    relsum=ressum=term1=term2=0
+    for (i in 1:nbins) {
+      if (nk[i] > 0) {
+        relsum=relsum+nk[i]*(fkbar[i]-okbar[i])^2
+        ressum=ressum+nk[i]*(okbar[i]-obar)^2
+        for (j in 1:nk[i]) {
+          term1=term1+(pred[bins[[i]][[1]][j]]-fkbar[i])^2
+          term2=term2+(pred[bins[[i]][[1]][j]]-fkbar[i])*(obs[bins[[i]][[1]][j]]-okbar[i])
+        }
+      }
+    }
+    rel=relsum/n
+    res=ressum/n
+    unc=obar*(1-obar)
+    bs=sum((pred-obs)^2)/n
+    bs_check_res=rel-res+unc
+    bss_res=(res-rel)/unc
+    gres=res-term1*(1/n)+term2*(2/n)   # Generalized resolution
+    bs_check_gres=rel-gres+unc         # BS using GRES
+    bss_gres=(gres-rel)/unc            # BSS using GRES
+#
+# Estimating the bias-corrected components of the BS 
+#
+    term3=array(0,nbins)
+    for (i in 1:nbins) {
+      term3[i]=(nk[i]/(nk[i]-1))*okbar[i]*(1-okbar[i])
+    }
+    term_a=sum(term3,na.rm=T)/n
+    term_b=(obar*(1-obar))/(n-1)
+    rel_bias_corrected=rel-term_a
+    gres_bias_corrected=gres-term_a+term_b
+    if (rel_bias_corrected < 0 || gres_bias_corrected < 0 ) {
+      rel_bias_corrected2=max(rel_bias_corrected,rel_bias_corrected-gres_bias_corrected,0)
+      gres_bias_corrected2=max(gres_bias_corrected,gres_bias_corrected-rel_bias_corrected,0)
+      rel_bias_corrected=rel_bias_corrected2
+      gres_bias_corrected=gres_bias_corrected2
+    }
+    unc_bias_corrected=unc+term_b
+    bss_bias_corrected=(gres_bias_corrected-rel_bias_corrected)/unc_bias_corrected
+#
+    if (round(bs,8)==round(bs_check_gres,8) & round(bs_check_gres,8)==round((rel_bias_corrected-gres_bias_corrected+unc_bias_corrected),8)) {
+#      cat("No error found \n")
+#      cat("BS = REL-GRES+UNC = REL_lessbias-GRES_lessbias+UNC_lessbias \n")
+    }
+#
+    invisible(list(rel=rel,res=res,unc=unc,bs=bs,bs_check_res=bs_check_res,bss_res=bss_res,gres=gres,bs_check_gres=bs_check_gres,bss_gres=bss_gres,rel_bias_corrected=rel_bias_corrected,gres_bias_corrected=gres_bias_corrected,unc_bias_corrected=unc_bias_corrected,bss_bias_corrected=bss_bias_corrected,nk=nk,fkbar=fkbar,okbar=okbar,bins=bins,pred=pred,obs=obs))
+#
+  }      # end of if
+#
+}
+
+CFU_plotmapmostliktercile <- function(lon,lat,ptc,
+                    tit='Probability forecast of most likely tercile (%)',file='G_PFC_map.ps') {
+#  
+# Produce probabilistic forecast map of most likely tercile of a forecast.
+#
+# Description:
+#
+#      Generates vector graphics file of a probabilistic forecast map showing the most likely tercile.
+#      Based on an adapted version of Caio Coelho's EURO-BRISA function.
+#
+# Usage:
+#
+#      CFU_plotmapmostliktercile(lon,lat,ptc,tit='Probability forecast of most likely tercile (%)',file='G_PFC_map.ps')
+#
+# Arguments:
+#
+#   	 	lon: A vector of longitudes.
+#
+#	 	lat: A vector of latitudes.
+#  
+#               ptc: Array of forecast terciles information for each ensemble member, output of CFU_PB().
+#
+#               tit: Title of the Figure.
+#
+#              file: Name of the postscript output file.
+#
+# Output:
+#
+#              Vector graphics file of a probabilistic forecast map showing the most likely tercile.
+#
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  29 Oct 2012.
+#
+  source('/cfu/pub/scripts/R/rclim.txt')
+  nlon <- length(lon); nlat <- length(lat)
+  nfc <- dim(ptc)[1]
+  nlonr <- nlon-ceiling(length(lon[lon<180 & lon > 0]))
+  # Probability of most likely tercile.
+  PMLT <- (ptc[1,1,,]*NA)
+  for (i in 1:nlon) {
+    for (j in 1:nlat) {
+      lo <- sum(ptc[,1,j,i],na.rm=T)/nfc
+      mid <- sum(ptc[,2,j,i],na.rm=T)/nfc
+      up <- sum(ptc[,3,j,i],na.rm=T)/nfc
+      if (all(up > c(mid,lo))) {
+        PMLT[j,i] <- up
+      } else if (all(mid > c(up,lo))) {
+        PMLT[j,i] <- 0
+      } else if (all(lo > c(mid,up))) {
+        PMLT[j,i] <- -lo
+      }
+    }
+  }
+  # EURO-BRISA figure. Modified Caio Coelho's function.
+  plotmapmostliktercile <- function(lon,lat,data,maintit='',legtit='',equi=TRUE,bw=FALSE, 
+                    cont=FALSE,reg=FALSE,...,lonlim=c(min(floor(lon)),max(ceiling(lon))),latlim=c(min(floor(lat)),max(ceiling(lat))),orientation=NULL,
+		    mapdat="world",xmaplim=c(min(floor(lon)),max(ceiling(lon))),ymaplim=c(min(floor(lat)),max(ceiling(lat))),
+		    longrds=NULL,latgrds=NULL,
+		    breaks=NULL,n=11,colours=NULL,
+		    roundleg=1,invert=FALSE) {
+    rg<-range(data, na.rm=T)
+    lowerlim <- rg[1]; upperlim <- rg[2]
+    maximum <- max(abs(c(lowerlim,upperlim)))
+    if(is.null(breaks)){
+      if(lowerlim<0) breaks <- seq(-maximum,maximum,(maximum-(-maximum))/n)
+      else breaks <- seq(lowerlim,upperlim,(upperlim-(lowerlim))/n)
+    }
+    if(is.null(colours)){    
+      if(!bw){
+        # Check if range includes negative values to use appropriate colour scale.
+        if (rg[1] <0) {
+          if (!invert) { 
+            colours <- bluered(seq(0,n-1,by=1), "linear", yellow =TRUE)
+          } else {
+            colours <- rev(bluered(seq(0,n-1,by=1), "linear", yellow =TRUE))
+          }
+        } else {
+          colours <- bluered(seq(0,n-1,by=1), "linear", white=0, invert=T)
+        }
+      } else {
+        if (rg[1] <0) {
+          colours <- grey(seq(0, 1, length = length(breaks)-1))
+          colours <- c(colours[1:((n-1)/2)],rev(colours[(((n-1)/2)+1):n]))
+        } else {
+          colours <- grey(seq(0, 1, length = length(breaks)-1))
+        }
+      }
+    }  
+    if (equi) {
+      layout(matrix(1:2,ncol=1,nrow=2),heights=c(5,1))
+      par(mar=c(6,2,6.5,1),cex.axis=1.0)
+      if (reg) {
+        layout(matrix(1:2,ncol=1,nrow=2),heights=c(5,1))
+        par(mar=c(2,3,4,1),las=1,cex=1.4)
+      }
+      # Create probabilistic map.
+      image(lon,lat,data,axes=F,col=colours,xlab='',ylab='',breaks=breaks)
+      par(cex=2.1)
+      if (cont) {contour(lon,lat,data,levels=round(breaks,1),labels=round(breaks,1),add=T,...)}
+      # Check if lon is from 0 to 360 or -180 to 180 to use appropriate world map.
+      if (min(lon)<0) {
+        map('world',interior = F,add = T, lwd=2) # Low resolution world map (lon -180 to 180).
+      } else {
+        map('world2',interior = F,add = T, lwd=2) # Low resolution world map (lon 0 to 360).
+      }
+      box()
+      map.axes()
+      par(font.main=1,cex=1.4)
+      title(maintit)
+      # Adding colorbar.
+      par(mar=c(4.5,2,0,1),mgp=c(1.5,0.3,0),las=1)
+      if (reg) {
+        par(mar=c(3,0.3,1.5,0.3),mgp=c(1.5,0.3,0),las=1)
+      }
+      image(c(1:n),1,t(t(c(1:n))),axes=F,col=colours,xlab='',ylab='')
+      box()
+      par(font.main=1,cex=1.3)
+      if (maximum > 1) {
+        axis(1,at=seq(1,9),tick=F,pos=0.8,cex.axis=1.5,
+             labels = c("85-100","70-85","55-70","40-55"," ","40-55","55-70","70-85","85-100"))
+        axis(3,at=5,tick=F,cex.axis=1.5,pos=-1.3,labels=c("White=central tercile most likely"))
+        axis(3,at=2.5,tick=F,cex.axis=1.5,pos=1.3,labels=c("Below normal"))
+        axis(3,at=7.5,tick=F,cex.axis=1.5,pos=1.3,labels=c("Above normal"))
+      } else {
+        axis(1,at=seq(0.5, length(breaks)-0.5),tick=F,
+             labels=round(breaks[1:(length(breaks))],roundleg))
+      }
+      # Redefine font size.
+      par(cex=1.4)
+      # Add the title.
+      title(xlab = legtit,cex.lab=1.4)
+    }
+  }
+  postscript(paste(file,sep=''),paper="special",width=12,height=9.25,horizontal=F)
+  plotmapmostliktercile(c(lon[c((nlonr+1):nlon)]-360,lon[1:nlonr]),rev(lat),t(PMLT[nlat:1,c((nlonr+1):nlon,1:nlonr)]*100),
+                        reg=T,maintit=tit,invert=F,breaks=c(-100,-85,-70,-55,-40,40,55,70,85,100),n=9)
+  dev.off()
+}
+
+CFU_PB <- function(lon,lat,ano,fcavt=2:4,fcyr,fcsys=1,qt=3,ncpu=4) {
+#  
+# Compute probabilistic bins (qt) of a forecast (fcyr) relative to the forecast climatology of the residual
+# forecasts excluding the selected forecast year (fcyr).
+# The maximal values of the lowest/highest quantiles is +/-10^+06 due to limitation in the function hist().
+# By default (qt=3) CFU_PB() computes terciles that can be plotted with CFU_plotmapmostliktercile().
+#
+# Description:
+#
+#      Returns probabilistic bins of a forecast (fcyr) relative to the forecast climatology without selected forecast.
+#
+# Usage:
+#
+#      CFU_PB(lon,lat,ano,fcavt=2:4,fcyr,fcsys=1,qt=3,ncpu=4) 
+#
+# Arguments:
+#
+#   	 	lon: A vector of longitudes.
+#
+#	 	lat: A vector of latitudes.
+#  
+#               ano: Array of forecast anomalies in CFU_load format.
+#
+#             fcavt: A vector of time steps to average across.
+#
+#              fcyr: Number of the forecast start year of the forecast.
+#
+#             fcsys: Number of the forecast system.
+#
+#                qt: Number of probabilistic bins 3: terciles, 4: quartiles, etc.
+#
+#              ncpu: Number of CPUs to be used.
+#
+# Output:
+#
+#              Probabilistic forecast information (1 or 0) at each grid point with
+#              dimensions (number of ensemble members, qt, number of longitudes, latitudes).
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  29 Oct 2012.
+#
+  library(doMC)
+  registerDoMC(ncpu)
+  # Probabilistic forecast.
+  nlon <- length(lon); nlat <- length(lat)
+  nyr <- dim(ano)[3]; nfc <- dim(ano)[2]; lt <- dim(ano)[4]
+  PTC <- array(NA, dim=c(nfc,3,nlat,nlon))
+  for (im in 1:nfc) {
+    XY.p <- foreach(i=1:nlon) %dopar% {
+      PL <- array(NA, dim=c(qt,nlat))
+      for (j in 1:nlat) {
+        # Mean over forecast range.
+        f.p <- apply(ano[fcsys,im,-fcyr,fcavt,j,i],c(1),mean)
+        # PDF of model climatology without selected forecast. 
+        qum <- quantile(f.p,probs=c((1:(qt-1))/qt),prob,na.rm=F,names=F,type=8)
+        # Bins of selected forecast.
+        f.p <- mean(ano[fcsys,im,fcyr,fcavt,j,i])
+        if (all(c(qum,f.p)==0)) {
+          f0 <- rep(0,qt)
+          if (!is.integer(qt/2)) { f0[median(1:qt)] <- 1 }
+          PL[,j] <- f0
+        } else {
+          PL[,j] <- hist(f.p,breaks=c(-1e+06,qum,1e+06),plot=F)$counts
+        }
+      }
+      PL
+    }
+    for (i in 1:nlon) { PTC[im,,,i] <- XY.p[[i]] }
+  }
+  PTC
+}
+
+CFU_MODE <- function(ano,eof,mode=1) {
+#  
+# Description:
+#  
+#      Project anomalies onto modes to get temporal evolution of the EOF mode selected.
+#      Returns principal components (PCs) by area-weighted projection onto EOF pattern (from CFU_EOF()).
+#      Able to handle NAs.
+#
+# Arguments:
+#
+#               ano: Array of observed or forecast anomalies from CFU_ano() with dimensions
+#                    (number of forecast systems, ensemble members, start years, forecast months, latitudes, longitudes).  
+#  
+#               eof: EOF object from CFU_EOF() 
+#
+#              mode: mode number in EOF object onto which to project
+#
+# Output:
+#   
+#    	     PC.ver: Array of PCs in verification format
+#                    (number of forecast systems, ensemble members, start years, forecast months).  
+#    	  PC.ver.em: Array of ensemble-mean PCs in verification format
+#                    (number of forecast systems, start years, forecast months).  
+#
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  19 Nov 2012.
+#        
+#              Lauriane Batte <lauriane.batte@ic3.cat> March 2014. Bug-fixes: 1-Extra weighting of the anomalies before projection.
+#                                                                             2-Reversion of the anomalies along latitudes.                            
+#                                                                             3-Extra-normalisation not necessary. 
+#
+#              Virginie Guemas <virginie.guemas@ic3.cat> 17 March 2014. Bug-fixes:1-Another extra-normalisation.
+#                                                                                 2-15 lines to compute the em reduced to 1.
+#              Lauriane Batte <lauriane.batte@ic3.cat> 18 March 2014. Normalization by std before returning PCs to be coherent with CFU_EOF
+#
+#              Virginie Guemas <virginie.guemas@ic3.cat> 11 April 2014. 1 - Removal of lon, lat, ncpu and neofs argument unused
+#                                                                       2 - Security checks ano and eof consistency
+#                                                                       3 - Removal of the mask which is already contained in the EOFs
+#                                                                       4 - Removal of the PC normalization since we have chosen in CFU_EOF
+#                                                                           to normalize the EOFs and multiply the PCs by the normalization
+#                                                                           factor and the eigenvalue so that the restitution of the original
+#                                                                           field is done simply by PC * EOFs 
+#                                                                       5 - The new convention in CFU_EOF is to divide by the weights so that the
+#                                                                           reconstruction of the original field rather than the weighted field 
+#                                                                           is obtained by PC * EOFs. The EOFs need therefore to be multiplied
+#                                                                           back by the weights before projection so that EOF * t(EOF) = 1
+#                                                                       6 - Since W *X = PC * EOF if EOF is multiplied back by the weights,
+#                                                                           PC = W * X * t(EOF) and X the input field to be projected (X)
+#                                                                           needs to be multiplied by W.
+#                                                                     
+# library(s2dverification) # To simplify some lines. Virginie
+# Getting input dimensions. Fabian
+  nlon<- dim(ano)[6]; nlat <- dim(ano)[5]
+  nyr <- dim(ano)[3]; lt <- dim(ano)[4]
+  nfc <- dim(ano)[2]; nmod <- dim(ano)[1]
+  # Security checks. Virginie
+  if ( dim(eof$EOFs)[2] != nlat ) {
+    stop("Inconsistent number of latitudes between eof and input field")
+  }
+  if ( dim(eof$EOFs)[3] != nlon ) {
+    stop("Inconsistent number of longitudes between eof and input field")
+  }
+  # Initialization of pc.ver. Fabian.  
+  pc.ver <- array(NA,dim=c(nmod,nfc,nyr,lt))
+         # Weights are already in EOF and shouldn't be accounted for twice. Lauriane
+         #e.1 <- eof$EOFs[mode,,]; mask <- eof$mask; wght <- eof$wght
+         # Now the EOF outputs from CFU_EOF are masked . Virginie
+         #e.1 <- eof$EOFs[mode,,]; mask <- eof$mask; wght <- rep(1,nlon*nlat)
+         #dim(wght) <- c(nlon,nlat)
+         # Since we have chosen as a new convention in CFU_EOF to divide by the
+         # weights so that the restitution of the original field is only obtained
+         # by PCs*EOFs, I multiply here back by the weigths to ensure that the
+         # projection is correct. Virginie
+  e.1 <- eof$EOFs[mode,,]*eof$wght
+         # Since W *X = PC * EOF after such multiplication by the weights,
+         # the PC is obtained by W * X * t(EOF) so we multiply the field to project
+         # by the weights. Virginie
+  ano <- ano*InsertDim(InsertDim(InsertDim(InsertDim(eof$wght,1,nmod),2,nfc),3,nyr),4,lt)
+  # The 100 lines below can be simplified in a few lines as done below. 
+  # Indeed, we can loop on nmod, nfc, nyr and lt even if the length of these dimentions is only 1.
+  # Furthermore, the PC of the ensemble mean anomaly equals the ensemble means of the PC of single
+  # members. [ 1/n sum(Xi) ] * t(EOF) = 1/n sum [ Xi * t(EOF) ]. Virginie
+         #  # For one realization only. Fabian
+         #  if (nfc==1) {
+         #    # Case for only one member should not be separate from several members. We can loop over 1 member only. Virginie.
+         #    pc.ver.em <- FALSE
+         #    # Why is there no loop over the models ? Virginie.
+         #    for (iy in 1:nyr) {
+         #      for (il in 1:lt) {
+         #        if (!all(is.na(ano[1,1,iy,il,,]))) {
+         #          #t1 <- (t(ano[nmod,nfc,iy,il,nlat:1,])*e.1)
+         #          # Latitudes should not be reversed to stay coherent with CFU_EOF. Lauriane
+         #          t1 <- (ano[nmod,nfc,iy,il,,]*e.1)
+         #          # The division by sum(mask) is an additional extra-normalisation. Virginie
+         #          #nominator <- (sum(t1*wght*mask,na.rm=TRUE)/sum(wght*mask,na.rm=TRUE))
+         #          # The EOFs are already masked now in CFU_EOF. Virginie
+         #          #nominator <- sum(t1*mask,na.rm=TRUE)
+         #          nominator <- sum(t1,na.rm=TRUE)
+         #          # Normalization should not be necessary. Projection is a simple scalar product. Lauriane
+         #          #denominator <- (sum(e.1^2*wght*mask,na.rm=TRUE)/sum(wght*mask,na.rm=TRUE))
+         #          denominator <- 1.
+         #          pc.ver[1,1,iy,il] <- nominator/denominator
+         #        }
+         #      }
+         #    }
+         #    # Normalization by standard deviation. Lauriane
+         #    #for (il in 1:lt) {
+         #    #    pc.ver[1,1,,il] <- pc.ver[1,1,,il]/sd(pc.ver[1,1,,il])
+         #    #}
+         #    # I have removed this normalization in agreement with the conventions chosen for CFU_EOF.
+         #    # The EOFs are normalized to 1 and the simple product EOF*PC should restitute the original field.
+         #    # Virginie
+         #  } else {
+         #    pc.ver.em <- array(NA,dim=c(nmod,nyr,lt))
+         #    # Virginie. This single line below gives the same result as l. 1162-1176
+         #    ano.em <- apply(ano,c(1,3,4,5,6),mean,na.rm=TRUE)
+         #    for (imo in 1:nmod) {
+         #      # Ensemble mean anomalies.
+         #      #if (lt==1 | nyr==1 ) {
+         #      #  if (lt==1 & !nyr==1 ) {          
+         #      #    ano.em <- apply(ano[imo,,,,,],c(2,3,4),mean,na.rm=TRUE)
+         #      #    dim(ano.em) <- c(nyr,1,nlat,nlon)
+         #      #  } else if (!lt==1 & nyr==1 ) {
+         #      #    ano.em <- apply(ano[imo,,,,,],c(2,3,4),mean,na.rm=TRUE)
+         #      #    dim(ano.em) <- c(1,lt,nlat,nlon)
+         #      #  } else if (lt==1 & nyr==1 ) {
+         #      #    ano.em <- apply(ano[imo,,,,,],c(2,3),mean,na.rm=TRUE)
+         #      #    dim(ano.em) <- c(1,1,nlat,nlon)
+         #      #  }
+         #      #} else {
+         #      # ano.em <- apply(ano[imo,,,,,],c(2,3,4,5),mean,na.rm=TRUE)
+         #      #}
+         #      for (iy in 1:nyr) {
+         #        for (il in 1:lt) {
+         #          if (!all(is.na(ano.em[imo,iy,il,,]))) {
+         #            #t1 <- (t(ano.em[imo,iy,il,nlat:1,])*e.1)
+         #            # Latitudes should not be reversed to stay coherent with CFU_EOF. Lauriane.
+         #            t1 <- (ano.em[imo,iy,il,,]*e.1)
+         #            # The division by sum(mask) is an additional extra-normalisation. Virginie
+         #            #nominator <- (sum(t1*wght*mask,na.rm=TRUE)/sum(wght*mask,na.rm=TRUE))
+         #            # The EOFs are already masked now in CFU_EOF. Virginie
+         #            #nominator <- sum(t1*mask,na.rm=TRUE)
+         #            nominator <- sum(t1,na.rm=TRUE)
+         #            # Normalization should not be necessary. Projection is a simple scalar product. Lauriane
+         #            #denominator <- (sum(e.1^2*wght*mask,na.rm=TRUE)/sum(wght*mask,na.rm=TRUE))
+         #            denominator <- 1.
+         #            pc.ver.em[imo,iy,il] <- nominator/denominator
+         #          }
+         #        }
+         #      }
+         #      # Normalization by standard deviation: done for each model independently. Lauriane
+         #      #for (il in 1:lt) {
+         #      #  pc.ver.em[imo,,il] <- pc.ver.em[imo,,il]/sd(pc.ver.em[imo,,il])
+         #      #}
+         #      # I have removed this normalization in agreement with the conventions chosen for CFU_EOF.
+         #      # The EOFs are normalized to 1 and the simple product EOF*PC should restitute the original field.
+         #      # Virginie
+         #      # Ensemble member anomalies.
+         #      for (im in 1:nfc) {
+         #        for (iy in 1:nyr) {
+         #          for (il in 1:lt) {
+         #            if (!all(is.na(ano[imo,im,iy,il,,]))) {
+         #              #t1 <- (t(ano[imo,im,iy,il,nlat:1,])*e.1)
+         #              # Latitudes should not be reversed to stay coherent with CFU_EOF. Lauriane.
+         #              t1 <- (ano[imo,im,iy,il,,]*e.1)
+         #              # The division by sum(mask) is an additional extra-normalisation. Virginie
+         #              #nominator <- (sum(t1*wght*mask,na.rm=TRUE)/sum(wght*mask,na.rm=TRUE))
+         #              # The EOFs are already masked now in CFU_EOF. Virginie
+         #              #nominator <- sum(t1*mask,na.rm=TRUE)
+         #              nominator <- sum(t1,na.rm=TRUE)
+         #              # Normalization should not be necessary. Projection is a simple scalar product. Lauriane
+         #              #denominator <- (sum(e.1^2*wght*mask,na.rm=TRUE)/sum(wght*mask,na.rm=TRUE))
+         #              denominator <- 1.
+         #              pc.ver[imo,im,iy,il] <- nominator/denominator
+         #            }
+         #          }
+         #        }
+         #      # Normalization by standard deviation: done for each model and ensemble member independently. Lauriane
+         #      #for (il in 1:lt) {
+         #      #   pc.ver[imo,im,,il] <- pc.ver[imo,im,,il]/sd(pc.ver[imo,im,,il])
+         #      #}
+         #      # I have removed this normalization in agreement with the conventions chosen for CFU_EOF.
+         #      # The EOFs are normalized to 1 and the simple product EOF*PC should restitute the original field.
+         #      # Virginie
+         #    }
+         #  }
+         #  }
+  for (imo in 1:nmod) {
+    for (im in 1:nfc) {
+      for (iy in 1:nyr) {
+        for (il in 1:lt) {
+          if (!all(is.na(ano[imo,im,iy,il,,]))) {
+            pc.ver[imo,im,iy,il] <- sum(ano[imo,im,iy,il,,]*e.1,na.rm=TRUE)
+          }
+        }
+      }
+    }
+  }
+  # For compatibility with previous version, I keep the output of the ensemble mean although it is useless
+  # because the operation can be done easily outside of CFU_MODE if necessary. This should be removed in 
+  # future. Virginie
+  pc.ver.em=Mean1Dim(pc.ver,2)
+
+  return(list(PC.ver=pc.ver,PC.ver.em=pc.ver.em))
+}
+
+
+
+CFU_DTF <- function(lon,lat,ano,gmt,ncpu=4) {
+#
+# Linearly regress out forecast global mean temperature trend (GMT) from forecast monthly anomalies
+# of ten-year hindcasts started each year. The regression depends on the forecast lead month across all forecasts.
+# NAs are excluded from the linear model fit, but kept in the output.
+#  
+# Description:
+#  
+#      Returns linearly detrended forecast anomalies in the verification format (from CFU_ano()).
+#
+# Usage:
+#
+#      CFU_DTF(lon,lat,ano,gmt)
+#    
+# Arguments:
+#
+#   	 	lon: A vector of longitudes.
+#
+#	 	lat: A vector of latitudes.
+#
+#               ano: Array of forecast anomalies from CFU_ano() with dimensions
+#                    (number of forecast systems, ensemble members, start years, forecast months, latitudes, longitudes).
+#  
+#               gmt: Array of forecast global mean temperature anomalies with dimensions 
+#                    (number of forecast systems, ensemble members, start years, forecast months).
+#                      
+#              ncpu: Number of CPUs to be used.
+#
+# Output:
+#   
+#    	 ano.dt.ver: Array of detrended forecast anomalies from CFU_ano() with dimensions
+#                    (number of forecast systems, ensemble members, start years, forecast months, latitudes, longitudes).
+#
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  16 Nov 2012.
+#
+  library(doMC)
+  registerDoMC(ncpu)
+  nlon <- length(lon); nlat <- length(lat)
+  nyr <- dim(ano)[3]; lt <- dim(ano)[4]
+  nfc <- dim(ano)[2]; nmod <- dim(ano)[1]
+  ano.dt.ver <- array(NA,dim=c(nmod,nfc,nyr,lt,nlat,nlon))
+  ## Map of regression slopes.
+  r.XY <- array(NA,dim=c(nmod,lt,nlat,nlon))
+  for (imo in 1:nmod) {
+      XY.p <- foreach(i=1:nlon, .verbose=F) %dopar% {
+        PL1 <- array(NA,dim=c(nfc,nyr,lt,nlat))
+        PL2 <- array(NA,dim=c(lt,nlat))
+        for (j in 1:nlat) {
+          for (iy in 1:lt) {
+            # Forecast anomaly.
+            fctpr <- as.vector(ano[imo,,,iy,j,i])
+            # Forecast trend wrt. 1961-2011.
+            trend.f <- as.vector(gmt[imo,,,iy])
+            trend <- trend.f-mean(trend.f)
+            # Regress forecast anomaly onto trend and remove it from the forecast. Keep residual forecast anomaly.
+            if (!all(is.na(fctpr))) {
+              fit <- lm(fctpr~trend,na.action=na.exclude)
+              fctpr.dt <- residuals(fit)
+              dim(fctpr.dt) <- c(nfc,nyr)
+              PL1[,,iy,j] <- fctpr.dt
+              # Regression coefficient map.
+              PL2[iy,j] <- coefficients(fit)[[2]] 
+            }
+          }
+        }
+        return(list(DFC=PL1,REG=PL2))
+      }
+      for (i in 1:nlon) {
+        ano.dt.ver[imo,,,,,i] <- XY.p[[i]]$DFC
+        r.XY[imo,,,i] <- XY.p[[i]]$REG
+      }
+  }
+  gc()
+  return(list(ano.dt.ver=ano.dt.ver,reg=r.XY))
+}
+
+CFU_DTO <- function(lon,lat,ano,yr1,yr2,yrint,gmt=F) {
+#
+# Linearly regress out global mean temperature trend (GMT) from observed monthly anomalies
+# that coincide with hindcasts started on November 1st every 1,2,3, etc. years.
+# The anomalies are selected from the first year that coincide with each hindcast.
+# NAs are excluded from the linear model fit, but kept in the output.
+#  
+# Description:
+#  
+#      Returns linearly detrended and original anomalies in a format to be used for CFU_EOF().
+#
+# Usage:
+#
+#      CFU_DTO(lon,lat,ano,yr1,yr2,yrint,gmt=F)
+#    
+# Arguments:
+#
+#   	 	lon: A vector of longitudes.
+#
+#	 	lat: A vector of latitudes.
+#
+#               ano: Array of anomalies from CFU_ano() with dimensions
+#                    (1, 1, number of start years, forecast months, latitudes, longitudes).
+#  
+#               yr1: First start year.
+#
+#               yr2: Last start year.
+#
+#             yrint: Interval between hindcasts in years.
+#
+#               gmt: Global mean temperature time series. (optional)
+#  
+# Output:
+#  
+#        ano.dt.ver: An array of the linearly detrended anomalies in CFU verification format.
+#   
+#    	     ano.dt: An array of the linearly detrended anomalies with dimensions
+#                    (number of months, latitudes, longitudes).
+#
+#   	        ano: An array of the original input anomalies with dimensions
+#                    (number of months, latitudes, longitudes).
+#  
+# 	 	reg: An array containing a map of regression coefficients
+#                    (number of latitudes, longitudes).
+#
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  19 Apr 2013.
+#
+  nlon <- length(lon); nlat <- length(lat)
+  nyr <- dim(ano)[3]; lt <- dim(ano)[4]
+  print(nyr); print(lt)
+  if (all(gmt==F)) {
+    # Global mean temperature GMT from HadCRUT, climate explorer.
+    # http://climexp.knmi.nl/data/icrutem3_hadsst2_0-360E_-90-90N_n.dat
+    T1 <- read.table("/cfu/data/ukmo/hadcrut/icrutem3_hadsst2_0-360E_-90-90N_n.dat",row.names=1)
+    XT <- array(NA,dim=c(12,113))
+    for (im in 1:12) {
+      XT[im,] <- T1[[im]]
+    }
+    XT[XT==-9.999000e+02] <- NA
+    # GMT anomaly wrt 1356 mo, 113 yr.
+    tgl.obs <- ts(c(as.vector(XT),rep(NA,120)),deltat=1/12,start=c(1900,1),end=c(2022,12))
+    # GMT anomaly wrt verification period.
+    tgl.vp.nov.obs <- window(tgl.obs,start=c(yr1,11),end=c((yr2+lt/12),10))
+    tgl.vp.nov.obs <- tgl.vp.nov.obs*((yr2-yr1+lt/12)/113)
+    gmt <- tgl.vp.nov.obs
+  }
+  o.XY <- array(NA,dim=c(((yr2-yr1+lt/12)*12),nlat,nlon))
+  TP1 <- ano[1,1,,1:(yrint*12),nlat:1,]
+  TP1 <- aperm(TP1, c(2,1,3,4))
+  dim(TP1) <- c(nyr*yrint*12,nlat,nlon)
+  o.XY[1:(nyr*yrint*12),,] <- TP1
+  o.XY[((nyr*yrint*12)+1):((yr2-yr1+lt/12)*12),,] <- ano[1,1,nyr,((yrint*12)+1):lt,nlat:1,]
+  if (length(gmt)!=((yr2-yr1+lt/12)*12)) {
+    print(paste('GMT time series has not the same length than the array of anomalies.',length(gmt),((yr2-yr1+lt/12)*12)))
+    stop()
+  }
+  # Remove trend from the obs for the whole period.
+  o.XY.dt <- array(NA,dim=c(((yr2-yr1+lt/12)*12),nlat,nlon))
+  r.XY <- (o.XY.dt[1,,]*NA)
+  for (i in 1:nlon) {
+    for (j in 1:nlat) {
+      if (!is.na(o.XY[1,j,i])) {
+        # Regress anomaly onto GMT and remove it from the anomaly.
+        a1 <- o.XY[,j,i] 
+        # Exclude NAs from linear model estimate, but keep them in the residual time series. (!=$residuals)
+        fit <- lm(a1~as.vector(gmt),na.action=na.exclude)
+        a1.dt <- residuals(fit)
+        o.XY.dt[,j,i] <- a1.dt
+        # Regression coefficient map.
+        r.XY[j,i] <- coefficients(fit)[[2]]
+      }
+    }
+  }
+  # Put detrended obs in verification format.
+  ano.dt.ver <- array(NA,dim=c(1,1,nyr,lt,nlat,nlon))
+  # Works for forecast start date in each year.
+  for (i in 1:nlon) {
+    for (j in 1:nlat) {
+      d.all <- ts(o.XY.dt[,j,i],deltat=1/12,start=c(yr1,11),end=c((yr2+lt/12),10))
+      if (!is.na(o.XY[1,j,i])) {
+        for (iy in 1:nyr) {
+          d.sl <- window(d.all,start=c((yr1+iy*yrint-yrint),11),end=c((yr1+lt/12+iy*yrint-yrint),10))
+          ano.dt.ver[1,1,iy,,((nlat+1)-j),i] <- d.sl
+        }
+      }
+    }
+  }
+  return(list(ano.dt=o.XY.dt,ano=o.XY,reg=r.XY,ano.dt.ver=ano.dt.ver,gmt=gmt,tgl.obs=tgl.obs))
+}
+
+CFU_DTO_an <- function(lon,lat,ano,yr1,yr2,yrint,gmt=F) {
+#
+# Linearly regress out global mean temperature trend (GMT) from observed annual anomalies
+# that coincide with hindcasts started on November 1st every 1,2,3, etc. years.
+# The anomalies are selected from the first year that coincide with each hindcast.
+# NAs are excluded from the linear model fit, but kept in the output.
+#  
+# Description:
+#  
+#      Returns linearly detrended and original anomalies in a format to be used for CFU_EOF().
+#
+# Usage:
+#
+#      CFU_DTO_an(lon,lat,ano,yr1,yr2,yrint,gmt=F)
+#    
+# Arguments:
+#
+#   	 	lon: A vector of longitudes.
+#
+#	 	lat: A vector of latitudes.
+#
+#               ano: Array of anomalies from CFU_ano() with dimensions
+#                    (1, 1, number of start years, forecast years, latitudes, longitudes).
+#  
+#               yr1: First start year.
+#
+#               yr2: Last start year.
+#
+#             yrint: Interval between hindcasts in years.
+#
+#               gmt: Global mean temperature time series. (optional)
+#  
+# Output:
+#  
+#        ano.dt.ver: An array of the linearly detrended anomalies in CFU verification format.
+#  
+#    	     ano.dt: An array of the linearly detrended anomalies with dimensions
+#                    (number of years, latitudes, longitudes).
+#
+#   	        ano: An array of the original input anomalies with dimensions
+#                    (number of years, latitudes, longitudes).
+#  
+# 	 	reg: An array containing a map of regression coefficients
+#                    (number of latitudes, longitudes).
+#
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  15 May 2013.
+#
+  nlon <- length(lon); nlat <- length(lat)
+  nyr <- dim(ano)[3]; lt <- dim(ano)[4]
+  if (all(gmt==F)) {
+    # Global mean temperature GMT from HadCRUT, climate explorer.
+    # http://climexp.knmi.nl/data/icrutem3_hadsst2_0-360E_-90-90N_n.dat
+    T1 <- read.table("/cfu/data/ukmo/hadcrut/icrutem3_hadsst2_0-360E_-90-90N_n.dat",row.names=1)
+    XT <- array(NA,dim=c(12,113))
+    for (im in 1:12) {
+      XT[im,] <- T1[[im]]
+    }
+    XT[XT==-9.999000e+02] <- NA
+    # GMT anomaly wrt 1356 mo, 113 yr.
+    tgl.obs <- ts(c(as.vector(XT),rep(NA,120)),deltat=1/12,start=c(1900,1),end=c(2022,12))
+    # GMT anomaly wrt verification period.
+    tgl.vp.nov.obs <- window(tgl.obs,start=c(yr1,11),end=c(((yr2+lt)),10))
+    tgl.vp.nov.obs <- tgl.vp.nov.obs*((yr2-yr1+lt)/113)
+    gmt <- aggregate(tgl.vp.nov.obs,FUN=mean)
+  }
+  o.XY <- array(NA,dim=c(((yr2-yr1+lt)),nlat,nlon))
+  if (yrint>1) {
+    TP1 <- ano[1,1,,1:(yrint),nlat:1,] 
+    TP1 <- aperm(TP1, c(2,1,3,4))
+  } else {
+    TP1 <- ano[1,1,,1,nlat:1,]
+  }
+  dim(TP1) <- c(nyr*yrint,nlat,nlon)
+  o.XY[1:(nyr*yrint),,] <- TP1
+  o.XY[((nyr*yrint)+1):((yr2-yr1+lt)),,] <- ano[1,1,nyr,((yrint)+1):lt,nlat:1,]
+  if (length(gmt)!=((yr2-yr1+lt))) {
+    print(paste('GMT time series has not the same length than the array of anomalies.',length(gmt),((yr2-yr1+lt))))
+    stop()
+  }
+  # Remove trend from the obs for the whole period.
+  o.XY.dt <- array(NA,dim=c(((yr2-yr1+lt)),nlat,nlon))
+  r.XY <- (o.XY.dt[1,,]*NA)
+  for (i in 1:nlon) {
+    for (j in 1:nlat) {
+      if (!is.na(o.XY[1,j,i])) {
+        # Regress anomaly onto GMT and remove it from the anomaly.
+        a1 <- o.XY[,j,i] 
+        # Exclude NAs from linear model estimate, but keep them in the residual time series. (!=$residuals)
+        fit <- lm(a1~as.vector(gmt),na.action=na.exclude)
+        a1.dt <- residuals(fit)
+        o.XY.dt[,j,i] <- a1.dt
+        # Regression coefficient map.
+        r.XY[j,i] <- coefficients(fit)[[2]]
+      }
+    }
+  }
+  # Put detrended obs in verification format.
+  ano.dt.ver <- array(NA,dim=c(1,1,nyr,lt,nlat,nlon))
+  # Works for forecast start date in each year.
+  for (i in 1:nlon) {
+    for (j in 1:nlat) {
+      d.all <- ts(o.XY.dt[,j,i],deltat=1,start=c(yr1,11),end=c((yr2+lt),10))
+      if (!is.na(o.XY[1,j,i])) {
+        for (iy in 1:nyr) {
+          d.sl <- window(d.all,start=c((yr1+iy*yrint-yrint),11),end=c((yr1+lt+iy*yrint-yrint),10))
+          ano.dt.ver[1,1,iy,,((nlat+1)-j),i] <- d.sl
+        }
+      }
+    }
+  }
+  return(list(ano.dt=o.XY.dt,ano=o.XY,reg=r.XY,ano.dt.ver=ano.dt.ver,gmt=gmt))
+}
+
+CFU_EOF <- function(lon,lat,ano,neofs=15,corr=FALSE) {
+# 
+# Description:
+#
+#      Performs an area-weighted EOF analysis using SVD based on a covariance matrix by default, 
+#             based on a correlation matrix if corr argument is set to TRUE.
+#   
+# Arguments:
+#
+#   	 	   lon: A vector of longitudes.
+#
+#              lat: A vector of latitudes.
+#  
+#              ano: Array of anomalies with dimensions
+#                    (number of timesteps, number of latitudes, number of longitudes).
+#
+#              neofs: Number of modes to be kept. Default = 15
+#
+#              corr: based on correlation matrix rather than covariance matrix. Default = FALSE
+#
+# Output:
+#
+#   	       EOFs: An array of EOF patterns normalized to 1 with dimensions
+#                    (number of modes, number of latitudes, number of longitudes)
+#  
+#   	       PCs: An array of principal components with dimensions
+#                    (number of timesteps, number of modes)
+#  
+#              Var: Percentage (%) of variance fraction of total variance explained by each mode (number of modes).
+#  
+#              mask: Mask with dimensions (number of latitudes, number of longitudes)
+#  
+#              wght: Weights with dimensions (number of latitudes, number of longitudes)
+#                    
+# Author:
+#
+#              Fabian Lienert <flienert@ic3.cat>  29 Oct 2012. Inspired by R. Benestad's EOF() in R-package clim.pact.
+#
+#              Lauriane Batte <lauriane.batte@ic3.cat> March 2014. Bug-fixes : 1-reversion of latitudes in the weights
+#                                                                              2-correlation matrix was used instead of covariance
+#                                                                              3-double use of the weights
+#
+#              Virginie Guemas <virginie.guemas@ic3.cat> 17 March 2014. Bug-fixes: 1-weight computation - division by sum of cos(lat)
+#                                                                                  2-shuffling of EOFs in EOF.2 intermediate vector                   
+#                                                                                  3-crash when neofs=1 sorted out
+#                                                                                  4-crash when neofs>nt sorted out
+#
+#              Lauriane Batte <lauriane.batte@ic3.cat>  19 March 2014 : BIG cleanup of code and clarification
+#                                                                       Reduction of the number of transpositions and bug-fixes associated
+#                                                                       Remove of the obsolete LINPACK options
+#                             
+#              Virginie Guemas <virginie.guemas@ic3.cat> 11 April 2014.
+#                                                       1 - Bug-fix in dimensions handling 
+#                                                           EOF composition restitutes now the original field in all cases
+#                                                       2 - Simplification of the convention transpose
+#                                                       3 - Options to use the correlation matrix rather than the covariance matrix
+#                                                       4 - Security checks
+#                                                       5 - New normalization of PCs so that PC*EOF only reconstruct the original file
+#                                                       6 - Weights = sqrt(cos(lat)) for ano so that covariance matrice weighted by cos(lat)
+#                                                       7 - Division of EOF by weights so that the reconstruction is simply EOF * PC
+#
+  library(s2dverification)
+  nlon <- length(lon); nlat <- length(lat)
+  # The two lines below have been moved up for the security checks. Virginie
+  dim.dat <- dim(ano)
+  ny <- dim.dat[2]; nx <- dim.dat[3]; nt <- dim.dat[1]
+  # Security check. Virginie
+  if ( ny != nlat ) {
+    stop("Inconsistent number of latitudes and input field dimensions")
+  }
+  if ( nx != nlon ) {
+    stop("Inconsistent number of longitudes and input field dimensions")
+  }
+  # Buildup of the mask. Fabian.
+  mask <- ano[1,,]; mask[!is.finite(mask)] <- NA
+  mask[is.finite(mask)] <- 1
+  # Replace mask of NAs with 0s for EOF analysis. Fabian
+  ano[!is.finite(ano)] <- 0
+  # Area weighting. Weights for EOF. Fabian. Bug below. Lauriane
+       # wght <- array(cos(rev(lats)*pi/180),dim=(c(nlat,nlon)))
+  # The reversion of latitudes is not suitable. Lauriane
+  wght <- array(cos(lat*pi/180),dim=(c(nlat,nlon)))
+  # The sum of the weights should equal to 1. Virginie
+  wght <- wght/sum(wght,na.rm=T)
+  # We want the covariance matrix to be weigthed by the grid cell area
+  # so the anomaly field should be weighted by its square root since
+  # the covariance matrix equals transpose(ano) times ano. Virginie.
+  wght <- sqrt(wght)
+  # Fabian was initially dividing by stdv to get the correlation matrix.
+  # Weighting of the anomalies that corresponds to the use of a covariance matrix. Lauriane.
+       #for (it in 1:nt) {ano[it,,] <- ano[it,,]*wght/stdv}
+  ano <- ano*InsertDim(wght,1,nt)  
+  # The use of the correlation matrix is done under the option corr. Virginie. 
+  if (corr==TRUE) {
+    stdv <- sd(ano,na.rm=T)
+    ano <- ano/InsertDim(stdv,1,nt)
+  }
+  # Time/space matrix for SVD. Fabian
+  dim(ano) <- c(nt,ny*nx)
+  dim.dat <- dim(ano)
+
+  # "transposed" means already transposed. Fabian's convention. 
+  # This is very misleading because the user has actually not transposed its array.
+  # Only by chance, the spatial dimension is smaller than the time dimension.
+  # I therefore change the convention to:
+  # "transpose" means the array needs to be transposed before calling La.svd for
+  # computational efficiency because the spatial dimension is larger than the time 
+  # dimension. This goes with transposing the outputs of LA.svd also. Virginie
+  #print(c('dim (nt,npts):',dim.dat))
+       #if (dim.dat[2] < dim.dat[1]) {transposed <- TRUE} else {transposed <- FALSE}
+  if (dim.dat[2] > dim.dat[1]) {transpose <- TRUE} else {transpose <- FALSE}
+  if (transpose) {
+    pca <- La.svd(t(ano))
+  } else {
+    pca <- La.svd(ano)
+  }
+  # Remove of the obsolete LINPACK cases. Lauriane.
+
+  # The number of output eofs might be lower than 15 if less than 15 grid points are used. Fabian.
+       #if (neofs > dim(ano)[2]) {neofs <- dim(ano)[2]}
+  # Whatever the spatial or temporal dimension is smaller than neofs, neofs should be bounded. Virginie.
+  neofs=min(dim(ano),neofs) 
+  # The line below by Fabian is not used. Virginie
+       #dim.v <- dim(pca$v); dim.u <- dim(pca$v); dim.x <- dim(ano)
+  # 
+  # Lauriane :
+  # La.svd conventions: decomposition X = U D t(V)
+  # La.svd$u returns U
+  # La.svd$d returns diagonal values of D
+  # La.svd$v returns t(V) !!
+  # The usual convention is PC=U and EOF=V. 
+  # If La.svd is called for ano (transpose=FALSE case):
+  #      EOFs: $v
+  #      PCs:  $u
+  # If La.svd is called for t(ano) (transposed=TRUE case):
+  #      EOFs: t($u)
+  #      PCs:  t($v)
+  if (transpose) {
+      pca.EOFs <- t(pca$u)
+      pca.PCs <- t(pca$v)
+      #print('transpose')
+      #print(c('Dim(pca.EOFs) :',dim(pca.EOFs)))
+      #print(c('Dim(pca.PCs) :',dim(pca.PCs)))
+  } else {
+      pca.EOFs <- pca$v
+      pca.PCs <- pca$u
+      #print('not transpose')
+      #print(c('Dim(pca.EOFs) :',dim(pca.EOFs)))
+      #print(c('Dim(pca.PCs) :',dim(pca.PCs)))
+  }
+  # The numbers of transposition has been reduced. Virginie
+       #PC <- pca.PCs[1:neofs,]
+  PC <- pca.PCs[,1:neofs]
+  EOF <- pca.EOFs[1:neofs,]
+       # The lines seem to be at the origin of the lack of recomposition in last version. Virginie.
+       #dim(EOF) <- c(neofs,nx,ny)
+       #EOF <- aperm(EOF, c(1,3,2)) 
+  dim(EOF) <- c(neofs,ny,nx)
+  # To sort out crash when neofs=1. Virginie 
+  if (neofs==1) { 
+    PC=InsertDim(PC,2,1) 
+    #EOF=InsertDim(EOF,1,1) # This is not necessary with line dim(EOF) above. Lauriane
+  }
+  # Computation of the % of variance associated with each mode. Fabian.
+  W <- pca$d[1:neofs]
+  tot.var <- sum(pca$d^2)
+  Var.eof <- 100 * pca$d[1:neofs]^2/tot.var
+  for (e in 1:neofs) { 
+    # sg is initialized here. It detects the sign of the pattern and latter the pattern is
+    # multiply by -1 if necessary to obtain the nominal IPO or PDO. Fabian
+    # CFU_EOF should be generic enough to compute any type of EOF. The change of sign of 
+    # the pattern should be done outside CFU_EOF. Lauriane & Virginie.
+         #sg <- 1
+    # Factor to normalize the PC.
+    eof.pc.sd <- sd(PC[,e])
+    # Factor to normalize the EOF.
+    eof.patt.nn <- EOF[e,,]*mask
+         # Weights have already been taken into account in covariance matrix. Lauriane.
+         #eof.patt.ms <- sum(eof.patt.nn^2*t(wght*mask),na.rm=TRUE)/sum(t(wght*mask),na.rm=TRUE)
+    eof.patt.ms <- sum(eof.patt.nn^2,na.rm=TRUE)
+    # Normalize PC and EOF
+    eof.patt <- eof.patt.nn/eof.patt.ms
+    # I would rather multiply eof.pc by eof.patt.ms and W[e] so that the user can reconstruct
+    # the original field by EOF * PC only (*weight) rather than EOF * PC * (normalization 
+    # factor that the user does not know) * (eigenvalue that is not straightforward to get back
+    # from the % of variance explained) (*weight). The division by sd(pc) is very easy to do 
+    # outside CFU_EOF in case the user needs a normalized PC but it is more difficult to get 
+    # back the amplitude of the variability represented by a given mode outside CFU_EOF if 
+    # we do not output such a muliplied PC multiplied. Virginie. 
+    #eof.pc <- PC[,e]/eof.pc.sd
+    eof.pc <- PC[,e]*eof.patt.ms*W[e]
+    # I am also wondering if we should not output the EOF divided by the weight
+    # so that the reconstruction is only EOF * PC only rather than EOF * PC * weight
+    # since we have multiplied ano by weight. Virginie
+    eof.patt <- eof.patt/wght  
+    # Change the sign of first EOF pattern for PDO, IPO, etc. Fabian
+         #if (e==1) if (eof.patt[round(nlon/2),round(nlat/2)] > 0) sg <- -1
+    EOF[e,,] <- eof.patt
+    PC[,e] <- eof.pc
+  }
+  return(list(EOFs=EOF,PCs=PC,Var=Var.eof,mask=mask,wght=wght))
+}
+
+
+CFU_plotbox <- function(NAO,yr1,yr2,tit='NAO index',tar='DJF',pr.qu=expression('NAO index (PC1) SLP'~(sigma)),obs='ERAInterim',fcsys='EC-Earth 3',file='NAO_fc.ps') {
+##  
+## Produce time series plot showing the distribution of the forecast members with box-and-whisker plot.
+##
+## Description:
+##
+##      Generates vector graphics file of a forecast box-and-whisker plot time series vs. the observed evolution.
+##      Only works for re-forecasts started each year.
+##  
+## Usage:
+##
+##      CFU_plotbox(lon,lat,NAO,yr1,yr2,tit='NAO index',tar='DJF',pr.qu='NAO index (PC1) SLP',
+##                  obs='ERAInterim',fcsys='EC-Earth 3',file='NAO_fc.ps')
+## Arguments:
+##
+##               NAO: List of obs, forecast arrays of, e.g., the NAO index for each ensemble member, output of CFU_NAO().
+##               yr1: Year of first forecast target period.
+##
+##               yr2: Year of last forecast target period.
+##
+##               tit: Title of the Figure.
+##
+##               tar: Forecast target period.
+##  
+##             pr.qu: Prognostic quantity.
+##  
+##               obs: Observational dataset.
+##  
+##             fcsys: Climate forecast system.
+##  
+##              file: Name of the postscript output file.
+##
+## Output:
+##
+##              Vector graphics file of a forecast box-and-whisker plot time series vs. the observed evolution.
+##
+## Author:
+##
+##              Fabian Lienert <flienert@ic3.cat>  09 Aug 2013.
+##
+## Modifications:
+##
+##              Lauriane Batte <lauriane.batte@ic3.cat> 18 Mar 2014
+##                  1. No normalization of the indices (already taken care of
+##                     in previous functions)
+##                  2. Taking into account NA cases (missing data)
+##
+##
+  nyr <- dim(NAO$NAOO.ver)[3]
+  postscript(file,horizontal=T)
+  ## Observed time series.
+  pc.o <- ts(NAO$NAOO.ver[1,1,],deltat=1,start=yr1,end=yr2)
+  ## Normalization of obs, forecast members.
+  ## This has already been done in CFU_MODE
+  #  pc.o <- pc.o/sd(pc.o)
+  #  sd.fc <- apply(NAO$NAOF.ver[1,,],c(1),sd)
+  #  NAO$NAOF.ver[1,,] <- NAO$NAOF.ver[1,,]/sd.fc
+  ## Produce plot.
+  par(mar = c(5, 6, 4, 2))
+  boxplot(NAO$NAOF.ver[1,,],add=F,main=paste(tit,tar),ylab='',xlab='',
+          col='red',lwd=2,t='b',axes=F,cex.main=2,
+          ylim=c(-max(abs(c(NAO$NAOF.ver[1,,],pc.o)),na.rm=TRUE),max(abs(c(NAO$NAOF.ver[1,,],pc.o)),na.rm=TRUE)))
+  lines(1:nyr,pc.o,lwd=3,col='blue'); abline(h=0,lty=1)
+  legend('bottomleft',c(obs,fcsys),lty=c(1,1),lwd=c(3,3),pch=c(NA,NA),
+         col=c('blue','red'),horiz=T,bty='n',cex=2.2) 
+  axis(1,c(1:nyr),NA,cex.axis=2.0)
+  axis(1,seq(1,nyr,by=1),seq(start(pc.o)[1],end(pc.o)[1],by=1),cex.axis=2.0)
+  mtext(1,line=3,text=tar,cex=1.9)
+  mtext(3,line=-2,text=paste(' AC =',round(cor(pc.o,apply(NAO$NAOF.ver[1,,],c(2),mean,na.rm=TRUE)),2)),cex=1.9,adj=0)
+  axis(2,cex.axis=2.0)
+  mtext(2,line=3,text=pr.qu,cex=1.9)
+  box()
+  dev.off()
+}
+
+CFU_NAO <- function(lon,lat,ano.f,ano.o,fcavt=2:4,fcsys=1) {
+##  
+## Compute the North Atlantic Oscillation (NAO) index based on the leading EOF of sea level pressure (SLP) anomalies.
+## The PCs are obtained by projecting the forecast and oberved anomalies onto the observed EOF pattern.
+## By default (fcavt=2:4) CFU_NAO() computes the NAO index for 1-month lead seasonal forecasts that can be
+## plotted with CFU_boxplot().
+##
+## Description:
+##
+##      Returns cross-validated PCs of the NAO index for forecast (ano.f) and observations (ano.o) based on the
+##      observed leading EOF pattern.
+##
+## Usage:
+##
+##      CFU_NAO(lon,lat,ano.f,ano.o,fcavt=2:4,fcsys=1)
+##
+## Arguments:
+##
+##   	       lon: A vector of longitudes.
+##
+##	           lat: A vector of latitudes.
+##  
+##             ano.f: Array of North Atlantic SLP (0-60N, 80W-0) forecast anomalies from CFU_ano() with dimensions
+##                     (number of forecast systems, ensemble members, start years, forecast months, latitudes, longitudes).
+##
+##             ano.o: Array of North Atlantic SLP (0-60N, 80W-0) forecast anomalies from CFU_ano() with dimensions
+##                     (number of forecast systems, ensemble members, start years, forecast months, latitudes, longitudes).  
+##  
+##             fcavt: A vector of time steps to average across defining the target period.
+##
+##             fcsys: Number of the forecast system.
+##
+##
+## Output:
+##
+##    	   NAOF.ver: Array of forecast NAO index in verification format
+##                    (number of forecast systems, ensemble members, start years, forecast months).
+##  
+##    	   NAOO.ver: Array of observed NAO index in verification format
+##                    (1, 1, number of start years, forecast months).  
+##
+## Author:
+##
+##              Fabian Lienert <flienert@ic3.cat>  08 Aug 2013.
+##
+##              Lauriane Batte <lauriane.batte@ic3.cat>   March 2013. 
+##            
+##              Virginie Guemas <virginie.guemas@ic3.cat> 17 March 2013. Removing the rotation.
+##
+  nlon <- length(lon); nlat <- length(lat)
+  nyr <- dim(ano.f)[3]; nfc <- dim(ano.f)[2]; lt <- dim(ano.f)[4]
+  # A security check should be added nlon==nx & nlat==ny. Virginie.
+  nlonr <- ceiling(length(lon[lon<180 & lon>=0]))
+
+  ## Rotate field to start west of 0 longitude. Fabian.
+  # This rotation should be done after the projection to output the 
+  # NAO pattern only. Virginie.
+  #ano.o.sh <- (ano.o*NA)
+  #ano.o.sh[1,1,,,,] <- ano.o[1,1,,,,c((nlonr+1):nlon,1:nlonr)]
+  #ano.f.sh <- (ano.f*NA)
+  #ano.f.sh[1,,,,,] <- ano.f[fcsys,,,,,c((nlonr+1):nlon,1:nlonr)]
+  #remove(ano.o,ano.f); 
+  ano.f.sh <- ano.f; ano.o.sh=ano.o
+  gc()
+
+  ## Target period mean. Fabian.
+  # The same operations can be done in half the lines using s2dverification. Virginie
+  # Also this operation could be more general using CFU_season. Virginie
+  o1 <- apply(ano.o.sh[1,1,,fcavt,,],c(1,3,4),mean,na.rm=T)
+  dim(o1) <- c(1,1,nyr,1,nlat,nlon)
+  f1 <- apply(ano.f.sh[1,,,fcavt,,],c(1,2,4,5),mean,na.rm=T)
+  dim(f1) <- c(1,nfc,nyr,1,nlat,nlon)
+
+  ## Cross-validated PCs. Fabian.
+  # This should be extended to nmod and nlt by simple loops. Virginie
+  NAOF.ver <- array(NA,c(1,nfc,nyr))
+  NAOO.ver <- array(NA,c(1,1,nyr))
+  for (iy in 1:nyr) {
+    ## Observed EOF excluding one forecast start year. Fabian
+    # Without the rotation and only 1 EOF. Virginie
+    # OEOF <- CFU_EOF(c(lon[c((nlonr+1):nlon)]-360,lon[1:nlonr]),lat,o1[1,1,-iy,1,,],neofs=3)
+    OEOF <- CFU_EOF(lon,lat,o1[1,1,-iy,1,,],neofs=1)
+    ## Correct polarity of pattern. Fabian
+    sign <- 1; if ( 0 < mean(OEOF$EOFs[1,,round(2*nlat/3)],na.rm=T) ) {sign <- -1}
+    OEOF$EOFs <- OEOF$EOFs*sign; OEOF$PCs <- OEOF$PCs*sign
+    # There should be an option that allows compute the model EOFs instead of only projecting on the observed EOFS. Virginie
+    # for (ifc in 1:nfc) {
+    #   FEOF <- CFU_EOF(lon,lat,f1[1,ifc,,1,,],neofs=1)
+    #   NAOF.ver[1,ifc,]=FEOF$PCs
+    # }
+    ## Project forecast anomalies. Fabian
+    # Without the rotation. Virginie
+    #PCF <- CFU_MODE(c(lon[c((nlonr+1):nlon)]-360,lon[1:nlonr]),lat,f1,OEOF,mode=1)
+    PCF <- CFU_MODE(lon,lat,f1,OEOF,mode=1)
+    # for (imo in 1:nmod) { for (il in 1:lt) {NAOF.ver[imo,,iy,il] <- PCF$PC.ver[imo,,iy,imo]}}
+    NAOF.ver[1,,iy] <- PCF$PC.ver[1,,iy,1]
+    ## Project observed anomalies. Fabian.
+    # Without the rotation. Virginie.
+    #PCO <- CFU_MODE(c(lon[c((nlonr+1):nlon)]-360,lon[1:nlonr]),lat,o1,OEOF,mode=1)
+    PCO <- CFU_MODE(lon,lat,o1,OEOF,mode=1)
+    ## Keep PCs of excluded forecast start year. Fabian.
+    NAOO.ver[1,1,iy] <- PCO$PC.ver[1,1,iy,1]
+  }
+  # Here there could be a rotation of the pattern. Virginie
+  # OEOF$EOFs[1,,] <- OEOF$EOFs[1,c((nlonr+1):nlon,1:nlonr),]
+  # The problem with this rotation is that the user does not have the corresponding longitudes anymore. 
+  # I don't think if should be rotated at all then. Virginie
+  return(list(NAOF.ver=NAOF.ver,NAOO.ver=NAOO.ver,OEOF=OEOF))
+}

+ 71 - 0
eigentechniques/unit_testing/test_EOF.R

@@ -0,0 +1,71 @@
+source('../CFU_Rfunc.txt')
+library(s2dverification)
+#
+# A little testing script by Virginie
+# 
+
+nt=6
+ny=5
+nx=4
+neofs=min(nt,nx*ny)
+a=array(rnorm(nt*ny*nx),dim=c(nt,ny,nx))
+a=a-InsertDim(Mean1Dim(a,1),1,nt)
+lon=c(20,25,30,35)
+lat=c(20,25,30,35,40)
+a[c(18,22)]=NA
+
+# Weight computation
+#wgt=array(cos(lat*pi/180),dim=c(ny,nx))
+#wgt <- wgt/sum(wgt,na.rm=T)
+#wgt <- sqrt(wgt)
+#wgt <- array(1,dim=c(ny,nx))
+
+#Within CFU_EOF
+b=CFU_EOF(lon,lat,a,neofs=neofs)
+
+#Without CFU_EOF
+#abis=array(a*InsertDim(wgt,1,nt),dim=c(nt,ny*nx))
+#b=La.svd(abis)
+#EOF=array(b$v,dim=c(neofs,ny,nx))
+#PC=b$u
+
+test=array(0,dim=dim(a))
+for (i in 1:nt) {
+  for (j in 1:neofs) {
+    # Check within CFU_EOF
+    #test[i,,]=test[i,,]+b$PCs[i,j]*b$EOFs[j,,]*b$D[j]/wgt
+    #test[i,,]=test[i,,]+b$PCs[i,j]*b$EOFs[j,,]/wgt # new PC normalization
+    test[i,,]=test[i,,]+b$PCs[i,j]*b$EOFs[j,,] # new EOF and PC normalization
+
+    # Check without CFU_EOF
+    #test[i,,]=test[i,,]+PC[i,j]*EOF[j,,]*b$d[j]/wgt 
+  }
+}
+
+print("Restitution of the original field after decomposing by CFU_EOF")
+print("The difference below should be of the order of the machine precision:")
+print(max(abs(test-a),na.rm=T))
+
+test2=array(0,dim=c(nt,neofs))
+for (j in 1:neofs) {
+  proj=CFU_MODE(InsertDim(InsertDim(InsertDim(a,1,1),2,1),4,1),b,mode=j)
+  for (i in 1:nt) {
+    # Check within CFU_MODE
+    test2[i,j]=test2[i,j]+proj$PC.ver[1,1,i,1]
+
+    # Check without CFU_MODE
+    #test2[i,j]=test2[i,j]+sum(a[i,,]*EOF[j,,]/b$d[j]/wgt)
+  }
+}
+print("Restitution of the PC from CFU_EOF when projecting with CFU_MODE")
+print("The difference below should be of the order of the machine precision:")
+#diag=test2-PC
+diag=test2-b$PCs
+#diag[which(1e-14>abs(InsertDim(b$d,1,nt)))]=0 # If the eigenvalue is of the order 
+# of the machine precision ~ 1e-15 then dividing by it gives a huge PC by projection
+# hence I do not consider the reconstruction in such case. 
+diag[which(1e-5>abs(InsertDim(b$Var,1,nt)))]=0 # If the eigenvalue is of the order 
+# of the machine precision ~ 1e-15 then dividing by it gives a huge PC by projection
+# hence I do not consider the reconstruction in such case. 
+print(max(abs(diag)))
+

+ 31 - 0
interpolation/README

@@ -0,0 +1,31 @@
+In this directory, you can find:
+
+1) a bash script to interpolate horizontally : interp 
+This script relies on the SCRIP (Spherical Coordinate 
+Remapping and Interpolation Package) for the computation 
+of the interpolation weights and on a fortran executable
+written by Virginie Guemas and added to the SCRIP package
+to apply these weights : scrip_use
+You can also extrapolate after interpolating with 
+scrip_use_extrap
+
+2) the fortran sources from SCRIP are also available in 
+case you need to compute new interpolation weights. 
+
+3) a python script to interpolate vertically : 
+interp_vert.py
+
+4) a python script to extrapolate vertically :
+vertextrap.py
+
+5) a python script to extrapolate horizontally a 
+3d field : extrap.py
+
+6) a python script to extrapolate horizontally a 
+2d field : extrap2d.py
+
+7) the fortran sources to rotate the U and V components 
+on the ORCA irregular grid.
+
+More information can be found here:
+http://ic3.cat/wikicfu/index.php/Tools/Interpolation

+ 94 - 0
interpolation/extrap.py

@@ -0,0 +1,94 @@
+# This function extrapolates horizontally each level of a (x,y,z) 
+# field using the nearest neighbour method.
+#
+# Usage   : python extrap.py <input file> <input variable name> 
+#    <grid description file> <2d longitude name in grid file>
+#    <2d latitude name in grid file> <mask 1=extrapolate> 
+#    <mask variable name> <output file>
+#
+# History : Virginie Guemas - Initial version     -       2012
+#           Virginie Guemas - Masking the outputs - March 2014
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+import cdms2 as cdms
+import sys,string
+from cdms2 import MV
+import numpy as N
+from numpy import ma
+#
+# 1. Input arguments
+# ===================
+# 
+# Input file and var names :
+# ---------------------------
+fileIN=sys.argv[1]
+varIN=sys.argv[2]
+#
+# Meshmask file : 
+# ----------------
+fileM=sys.argv[3]
+varlon=sys.argv[4]
+varlat=sys.argv[5]
+#
+# Location of points to fill :
+# ----------------------------
+fileF=sys.argv[6]
+varF=sys.argv[7]
+#
+# Output file name :
+# -------------------
+fileOUT=sys.argv[8]
+#
+# 2. Get the input files 
+# =======================
+#
+f=cdms.open(fileIN)
+var0_a=f(varIN,squeeze=1)
+f.close()
+mask3d=var0_a.mask
+(lz1,ly0,lx0)=var0_a.shape
+#	    
+f=cdms.open(fileM)
+lon=f(varlon,squeeze=1)
+lat=f(varlat,squeeze=1)
+f.close()
+#
+f=cdms.open(fileF)
+Pfill=f(varF,squeeze=1)
+f.close()
+#
+var4=N.zeros((lz1,ly0,lx0))
+var4=var4.astype('d')
+var4=N.where(mask3d==False,var0_a,var4)
+
+pi=N.pi
+coslat=N.cos(lat*pi/180)
+coslon=N.cos(lon*pi/180)
+sinlat=N.sin(lat*pi/180)
+sinlon=N.sin(lon*pi/180)
+
+indexes1=N.where(N.sum(Pfill[:,:,:],0)>=1)
+for ind in N.arange(indexes1[0].shape[0]) :
+  jy=indexes1[0][ind]
+  jx=indexes1[1][ind]
+  distance=MV.arccos(coslat[jy,jx]*coslat*(coslon[jy,jx]*coslon+sinlon[jy,jx]*sinlon)+sinlat[jy,jx]*sinlat)
+  indexes2=N.where(Pfill[:,jy,jx]>=1) 
+  for ind2 in N.arange(indexes2[0].shape[0]) :
+    jz=indexes2[0][ind2]
+    if mask3d[jz,:,:].mean() < 1 :
+      distance=N.where(mask3d[jz,:,:]==False,distance,1e20)
+      dismin=distance.min()
+      test=cdms.createVariable(var0_a[jz,:,:],mask=MV.where(distance==dismin,False,True))
+      var4[jz,jy,jx]=test.mean()
+
+maskout=MV.where(Pfill>0.5,False,mask3d)
+var4=cdms.createVariable(var4,id=var0_a.id)
+var4=MV.where(maskout<0.5,var4,1e20)
+var4.getAxis(0).id='z'
+var4.getAxis(0)[:]=var0_a.getAxis(0)[:]
+var4.getAxis(1).id='y'
+var4.getAxis(2).id='x'
+var4.id=var0_a.id
+
+h=cdms.open(fileOUT,'w')
+h.write(var4)
+h.close()

+ 99 - 0
interpolation/extrap2d.py

@@ -0,0 +1,99 @@
+# This function extrapolates horizontally a (x,y) field using the
+# nearest neighbour method.
+#
+# Usage   : python extrap2d.py <input file> <input variable name> 
+#    <grid description file> <2d longitude name in grid file>
+#    <2d latitude name in grid file> <mask 1=extrapolate> 
+#    <mask variable name> <output file>
+#
+# History : Virginie Guemas - Initial version     -       2012
+#           Virginie Guemas - Masking the outputs - March 2014
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+import cdms2 as cdms
+import sys,string
+from cdms2 import MV
+import numpy as N
+from numpy import ma
+#
+# 1. Input arguments
+# ===================
+# 
+# Input file and var names :
+# ---------------------------
+fileIN=sys.argv[1]
+varIN=sys.argv[2]
+#
+# Meshmask file : 
+# ----------------
+fileM=sys.argv[3]
+varlon=sys.argv[4]
+varlat=sys.argv[5]
+#
+# Location of points to fill :
+# ----------------------------
+fileF=sys.argv[6]
+varF=sys.argv[7]
+#
+# Output file name :
+# -------------------
+fileOUT=sys.argv[8]
+#
+# 2. Get the input files 
+# =======================
+#
+f=cdms.open(fileIN)
+var0_a=f(varIN,squeeze=1)
+f.close()
+(ly0,lx0)=var0_a.shape
+mask3d=var0_a.mask
+if mask3d.size==1 :
+ if var0_a.id in ('gsu','gru','gtu','ssu_m'):
+   varmask='umask'
+ elif var0_a.id in ('gsv','grv','gtv','ssv_m'):
+   varmask='vmask'
+ else:
+   varmask='tmask'
+ if (ly0,lx0) == (1021,1442) : 
+   f=cdms.open('/home/vguemas/SCRIP/interpvert/mesh_mask_glorys2v1_lev0.nc')
+   mask3d=1-f(varmask,squeeze=1)
+   f.close()
+#	    
+f=cdms.open(fileM)
+lon=f(varlon,squeeze=1)
+lat=f(varlat,squeeze=1)
+f.close()
+#
+f=cdms.open(fileF)
+Pfill=f(varF,squeeze=1)
+f.close()
+#
+var4=N.zeros((ly0,lx0))
+var4=var4.astype('d')
+var4=N.where(mask3d==False,var0_a,var4)
+
+pi=N.pi
+coslat=N.cos(lat*pi/180)
+coslon=N.cos(lon*pi/180)
+sinlat=N.sin(lat*pi/180)
+sinlon=N.sin(lon*pi/180)
+
+indexes1=N.where(Pfill[:,:]>=1)
+for ind in N.arange(indexes1[0].shape[0]) :
+  jy=indexes1[0][ind]
+  jx=indexes1[1][ind]
+  distance=MV.arccos(coslat[jy,jx]*coslat*(coslon[jy,jx]*coslon+sinlon[jy,jx]*sinlon)+sinlat[jy,jx]*sinlat)
+  distance=N.where(mask3d[:,:]==False,distance,1e20)
+  dismin=distance.min()
+  test=cdms.createVariable(var0_a[:,:],mask=MV.where(distance==dismin,False,True))
+  var4[jy,jx]=test.mean()
+
+maskout=MV.where(Pfill>0.5,False,mask3d)
+var4=cdms.createVariable(var4,id=var0_a.id)
+var4=MV.where(maskout<0.5,var4,1e20)
+var4.getAxis(0).id='y'
+var4.getAxis(1).id='x'
+var4.id=var0_a.id
+
+h=cdms.open(fileOUT,'w')
+h.write(var4)
+h.close()

+ 119 - 0
interpolation/interp

@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+#
+# interp, scrip_use and scrip_use_extrap have been written by Virginie Guemas
+# in 2011 based on SCRIP software that computes interpolation weights:
+# Spherical Coordinate Remapping and Interpolation Package
+#
+#~~~~~~~~~~~~~~~
+# get arguments
+#~~~~~~~~~~~~~~~
+
+if [ $# == 0 ]; then
+  echo
+  echo "USAGE: $(basename $0) <input_file> <input_var> <method> <gridin> <gridout> <output_file> <debug>"
+  echo "<method> can be bilinear/bicubic/conserv/distwgt"
+  echo "<gridin> and <gridout> can be ORCA1t_v2.2/ORCA1t_v3.2/ORCA025t_v3.2/ERA40/ERAint/EcEarth2/EcEarth3/HadISST/NCEP/NOAAv3b/NSIDC"
+  echo "<debug> optional argument in case default choices not convenient : TRUE/FALSE"
+  exit 1
+fi
+ 
+filein=$1
+varin=$2
+method=$3
+gridin=$4
+gridout=$5
+fileout=$6
+if [ $# == 7 ]; then
+  debug=$7
+else
+  debug='FALSE'
+fi
+
+exec='scrip_use'
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# /cfu/data conventions or not ?
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+case $gridin in
+  'ORCA1t_v2.2'|'ORCA1t_v3.2'|'ORCA025t_v3.2'|'NSIDC') 
+   flag1='FALSE'
+   ;;
+  'NCEP'|'ERAint'|'ERA40'|'EcEarth2'|'EcEarth3'|'HadISST'|'NOAAv3b') 
+   flag1='TRUE'
+   ;;
+  *) 
+   flag1='FALSE'
+   ;;
+esac
+
+case $gridin in 
+  'ERA40'|'ERAint'|'EcEarth2'|'EcEarth3'|'HadISST'|'NCEP'|'NOAAv3b'|'NSIDC') 
+   flag2='TRUE'
+   ;;
+  'ORCA1t_v2.2'|'ORCA1t_v3.2'|'ORCA025t_v3.2') 
+   flag2='FALSE'
+   ;;
+  *)
+   flag2='FALSE' 
+   ;;
+esac
+
+#~~~~~~~~~~~~~~~~~~~~
+# land data or not ?
+#~~~~~~~~~~~~~~~~~~~~
+
+case $varin in
+  'tas'|'prlr'|'g500'|'g200'|'psl'|'hflsd'|'hfssd'|'hus'|'rls'|'rss'|'rsds'|'vas'|'uas'|'tasmax'|'tasmin') ;;
+  'tos'|'ice'|'mld'|'sic'|'sit'|'sea_ice_area_fraction') gridin=${gridin}'_ocean' ;;
+  'ice_pres'|'somixhgt'|'sosstsst'|'sosaline'|'iicethic'|'ileadfra'|'isnowthi'|'iicetemp'|'somxl010'|'somxlheatc'|'somxlsaltc'|'sobarstfu'|'sobarstfv'|'sobarstf'|'heatc_sl'|'vertmeansal'|'votemper'|'vosaline'|'rhopn') gridout=${gridout}'_ocean' ;;
+  *) echo "unexpected input variable : contact Virginie" ; exit 1 ;;
+esac
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Weigths available or not ?
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+if [[ ! -e /cfu/pub/scripts/interpolation/weigths/rmp_${gridin}_to_${gridout}_${method}.nc ]] ; then
+  echo "unexpected input or output grid : contact Virginie" 
+  exit 1
+fi
+
+#~~~~~~~~~~~~~~~
+# Interpolation
+#~~~~~~~~~~~~~~~
+
+if [[ ! -e ${filein} ]] ; then
+  echo ${filein}" does not exist" 
+  exit 1
+fi
+
+vars=`cdo showvar ${filein}`
+exist='FALSE'
+for var in $vars ; do 
+  if [[ $var == ${varin} ]] ; then exist='TRUE' ; fi
+done
+if [[ $exist == 'FALSE' ]] ; then
+  echo $var" is not in input file" 
+  exit 1
+fi
+
+cat > scrip_use_in <<EOF
+&remap_inputs
+    remap_wgt   = '/cfu/pub/scripts/interpolation/weigths/rmp_${gridin}_to_${gridout}_${method}.nc'
+    infile      = '${filein}'
+    invertlat   = ${flag1}
+    var         = '${varin}'
+    fromregular = ${flag2}
+    outfile     = '${fileout}'
+/
+EOF
+
+ln -sf /cfu/pub/scripts/interpolation/${exec} ${exec}
+./${exec}
+
+if [[ $debug == 'FALSE' ]] ; then
+  rm -f ${exec} scrip_use_in
+else
+  ln -sf /cfu/pub/scripts/interpolation/${exec}_extrap ${exec}_extrap
+fi

+ 192 - 0
interpolation/interp_vert.py

@@ -0,0 +1,192 @@
+# This function interpolates vertically a (x,y,z) field using the
+# conservative method.
+#
+# Usage   : python interp_vert.py <input file> <input variable name> 
+#    <input grid description file> <input level thickness in grid 
+#    file> <input mask file> <input mask variable> <output grid 
+#    description file> <output level thickness in grid file>  
+#    <output level depth in grid file> <output file>
+#
+# History : Virginie Guemas - Initial version adpated
+#                             from PhD tools        -       2012
+#                             Cleaning              - March 2014
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#import cdms,regrid,sys,string
+import cdms2 as cdms
+import sys,string
+#import MLab,MV,MA,Numeric as N
+from cdms2 import MV
+import numpy as N
+from numpy import ma
+#import vcs,time
+#x=vcs.init()
+#
+# 1. Input arguments
+# ===================
+# 
+# Input file and var names :
+# ---------------------------
+fileIN=sys.argv[1]
+varIN=sys.argv[2]
+#
+# Input grid file : 
+# ------------------
+fileIG=sys.argv[3]
+varIT=sys.argv[4]
+#
+# Input mask file :
+# ------------------
+fileM=sys.argv[5]
+varM=sys.argv[6]
+#
+# Output grid file : 
+# -------------------
+fileOG=sys.argv[7]
+varOT=sys.argv[8]
+varOD=sys.argv[9]
+#
+# Output file name :
+# -------------------
+fileOUT=sys.argv[10]
+#
+# 2. Get the input files 
+# =======================
+#
+f=cdms.open(fileIN)
+var0_a=f(varIN,squeeze=1)
+f.close()
+
+if var0_a.rank() != 3 :
+    print "3d input file please !"
+dims_var0=var0_a.getOrder()
+if dims_var0[0] == 'x' : 
+    lx0=var0_a.shape[0]
+    if dims_var0[1] == 'y' : 
+        ly0=var0_a.shape[1]
+        lz0=var0_a.shape[2]
+    else :
+        ly0=var0_a.shape[2]
+        lz0=var0_a.shape[1]
+else :
+    if dims_var0[0] == 'y' :
+        ly0=var0_a.shape[0]
+        if dims_var0[1] == 'x' :
+            lx0=var0_a.shape[1]
+            lz0=var0_a.shape[2]
+        else : 
+            lx0=var0_a.shape[2]
+            lz0=var0_a.shape[1]	
+    else :
+        lz0=var0_a.shape[0]
+        if dims_var0[1] == 'x' :
+            lx0=var0_a.shape[1]
+            ly0=var0_a.shape[2]
+        else :
+            lx0=var0_a.shape[2]
+            ly0=var0_a.shape[1]
+#	    
+var0=N.zeros((lz0,ly0,lx0))
+var0=var0.astype('d')
+#
+if dims_var0[0] == 'x' : 
+    if dims_var0[1] == 'y' : 
+        for jk in N.arange(lz0) :
+            var0[jk,:,:]=N.transpose(var0_a[:,:,jk])	
+    else :
+        for ji in N.arange(lx0) :
+            var0[:,:,ji]=var0_a[ji,:,:]
+else :
+    if dims_var0[0] == 'y' :
+        if dims_var0[1] == 'x' :
+            for jk in N.arange(lz0) :
+                var0[jk,:,:]=var0_a[:,:,jk]	
+        else : 
+            for jk in N.arange(lz0) :
+                var0[jk,:,:]=var0_a[:,jk,:]
+    else :
+        if dims_var0[1] == 'x' :
+            for ji in N.arange(lx0) :
+                var0[:,:,ji]=var0_a[:,ji,:]	    
+        else :
+            var0[:,:,:]=var0_a[:,:,:]
+var0=ma.masked_array(var0,mask=None,fill_value=None)
+#
+f=cdms.open(fileIG)
+thick_in=f(varIT,squeeze=1)
+f.close()
+#
+f=cdms.open(fileM)
+mask=f(varM,squeeze=1)
+f.close()
+if mask.rank() != 2 :
+    print "2d input mask please !"
+dims_mask=mask.getOrder()
+if dims_mask[0] == 'x' : 
+    mask=N.transpose(mask)
+if mask.shape[1] != lx0 or mask.shape[0] != ly0 :
+    print "same dims for mask and input file please !"
+#
+f=cdms.open(fileOG)
+thick_out=f(varOT,squeeze=1)
+depth_out=f(varOD,squeeze=1)
+f.close()
+lz1=thick_out.shape[0]
+if depth_out.shape[0] != lz1 :
+    print "same dims for depth and thickness please !"
+#
+# 2. 2D to 3D mask file
+# =======================
+#
+mask_in=N.zeros((lz0,ly0,lx0))
+mask_in=mask_in.astype('d')
+
+for i in N.arange(lx0) :
+    for j in N.arange(ly0) :
+        if mask[j,i] > 0 :
+            down=mask[j,i]
+            for k in N.arange(down) :
+                 mask_in[k,j,i]=1.	    
+
+# 3. Vertical interpolation
+# ==========================
+#
+var1=N.zeros((lz1,ly0,lx0),'d')
+thick_bis=N.zeros((ly0,lx0),'d')
+thick_jk1=N.zeros((ly0,lx0),'d')
+mask_out=N.zeros((lz1,ly0,lx0),'d')
+#
+jk0=0
+for jk1 in N.arange(lz1) :
+    while ( (jk0 <= (lz0-1)) & (N.add.reduce(thick_in[0:(jk0+1)]) <= N.add.reduce(thick_out[0:(jk1+1)])) ) :
+        thick_bis=N.multiply(N.minimum(thick_in[jk0],N.add.reduce(thick_in[0:(jk0+1)])-N.add.reduce(thick_out[0:jk1])),mask_in[jk0,:,:])
+        var1[jk1,:,:]=var1[jk1,:,:]+N.multiply(var0[jk0,:,:],thick_bis)
+        thick_jk1=thick_jk1+thick_bis
+        jk0=jk0+1
+    if ( jk0 <= (lz0-1)) :
+      thick_bis=N.multiply(N.minimum(thick_out[jk1],N.add.reduce(thick_out[0:(jk1+1)])-N.add.reduce(thick_in[0:jk0])),mask_in[jk0,:,:])
+      var1[jk1,:,:]=var1[jk1,:,:]+N.multiply(var0[jk0,:,:],thick_bis)
+      thick_jk1=thick_jk1+thick_bis
+    mask_out[jk1,:,:]=N.where(thick_jk1>0.01,1.,0.)
+    thick_jk1=N.where(thick_jk1==0.,1.,thick_jk1)
+    var1[jk1,:,:]=var1[jk1,:,:]/thick_jk1
+    thick_jk1[:,:]=0
+
+lon=cdms.createAxis(N.arange(lx0),id='x')
+lat=cdms.createAxis(N.arange(ly0),id='y')
+depth=cdms.createAxis(depth_out,id='z')
+
+var2=cdms.createVariable(var1,id=var0_a.id)
+var2.getAxis(0)[:]=depth_out
+var2.getAxis(0).id='z'
+var2.getAxis(1).id='y'
+var2.getAxis(2).id='x'
+var3=MV.where(mask_out>0.,var1,1e20)
+var3.id=var0_a.id
+var3.getAxis(1).id='y'
+var3.getAxis(2).id='x'
+var3.getAxis(0).id='z'
+var3.getAxis(0)[:]=depth_out
+
+g=cdms.open(fileOUT,'w')
+g.write(var3)
+g.close()

+ 17 - 0
interpolation/rotateUVorca_sources/README

@@ -0,0 +1,17 @@
+The dimensions and the periodicity of the grid are hard-coded. It
+is therefore compulsory to recompile for each new configuration.
+The information can be found in the par_ORCA*.h90 in the model
+sources you are using.
+
+ORCA1:
+jpi = 362
+jpj = 292
+jpiglo = 362
+nperio = 6
+
+ORCA025:
+jpi = 1442
+jpj = 1021
+jpiglo = 1442
+nperio = 4
+

+ 189 - 0
interpolation/rotateUVorca_sources/dom_oce.f90

@@ -0,0 +1,189 @@
+MODULE dom_oce
+   !!======================================================================
+   !!                       ***  MODULE dom_oce  ***
+   !!       
+   !! ** Purpose :   Define in memory all the ocean space domain variables
+   !!======================================================================
+   !! History :  1.0  ! 2005-10  (A. Beckmann, G. Madec)  reactivate s-coordinate 
+   !!----------------------------------------------------------------------
+   !! NEMO/OPA 3.2 , LOCEAN-IPSL (2009) 
+   !! $Id: dom_oce.F90 1886 2010-05-27 10:13:51Z rblod $ 
+   !! Software governed by the CeCILL licence (modipsl/doc/NEMO_CeCILL.txt)
+   !!----------------------------------------------------------------------
+   use par_kind
+!   USE par_oce      ! ocean parameters
+
+   IMPLICIT NONE
+   PUBLIC           ! allows the acces to par_oce when dom_oce is used
+   !                ! exception to coding rules... to be suppressed ???
+
+   !!----------------------------------------------------------------------
+   !! time & space domain namelist
+   !! ----------------------------
+   !                                              !!* Namelist namdom : time & space domain 
+   INTEGER , PUBLIC ::   nn_bathy     =  0         !: = 0/1 ,compute/read the bathymetry file
+   REAL(wp), PUBLIC ::   rn_e3zps_min = 5.0_wp     !: miminum thickness for partial steps (meters)
+   REAL(wp), PUBLIC ::   rn_e3zps_rat = 0.1_wp     !: minimum thickness ration for partial steps
+   INTEGER , PUBLIC ::   nn_msh       = 0          !: = 1 create a mesh-mask file
+   INTEGER , PUBLIC ::   nn_acc       = 0          !: = 0/1 use of the acceleration of convergence technique
+   REAL(wp), PUBLIC ::   rn_atfp      = 0.1_wp     !: asselin time filter parameter
+   REAL(wp), PUBLIC ::   rn_rdt       = 3600._wp   !: time step for the dynamics (and tracer if nacc=0)
+   REAL(wp), PUBLIC ::   rn_rdtmin    = 3600._wp   !: minimum time step on tracers
+   REAL(wp), PUBLIC ::   rn_rdtmax    = 3600._wp   !: maximum time step on tracers
+   REAL(wp), PUBLIC ::   rn_rdth      =  800._wp   !: depth variation of tracer step
+   INTEGER , PUBLIC ::   nn_baro      = 64         !: number of barotropic time steps (key_dynspg_ts)
+   INTEGER , PUBLIC ::   nn_closea    =  0         !: =0 suppress closed sea/lake from the ORCA domain or not (=1)
+
+   !                                          ! old non-DOCTOR names still used in the model
+   INTEGER , PUBLIC ::   ntopo                !: = 0/1 ,compute/read the bathymetry file
+   REAL(wp), PUBLIC ::   e3zps_min            !: miminum thickness for partial steps (meters)
+   REAL(wp), PUBLIC ::   e3zps_rat            !: minimum thickness ration for partial steps
+   INTEGER , PUBLIC ::   nmsh                 !: = 1 create a mesh-mask file
+   INTEGER , PUBLIC ::   nacc                 !: = 0/1 use of the acceleration of convergence technique
+   REAL(wp), PUBLIC ::   atfp                 !: asselin time filter parameter
+   REAL(wp), PUBLIC ::   rdt                  !: time step for the dynamics (and tracer if nacc=0)
+   REAL(wp), PUBLIC ::   rdtmin               !: minimum time step on tracers
+   REAL(wp), PUBLIC ::   rdtmax               !: maximum time step on tracers
+   REAL(wp), PUBLIC ::   rdth                 !: depth variation of tracer step
+   INTEGER , PUBLIC ::   nclosea              !: =0 suppress closed sea/lake from the ORCA domain or not (=1)
+
+
+   !                                         !!! associated variables
+   INTEGER , PUBLIC                 ::   neuler  = 0   !: restart euler forward option (0=Euler)
+   REAL(wp), PUBLIC                 ::   atfp1         !: asselin time filter coeff. (atfp1= 1-2*atfp)
+   REAL(wp), PUBLIC, DIMENSION(jpk) ::   rdttra        !: vertical profile of tracer time step
+
+   !                                         !!* Namelist namcla : cross land advection
+   INTEGER, PUBLIC ::   nn_cla = 0            !: =1 cross land advection for exchanges through some straits (ORCA2)
+
+   !                                          ! old non-DOCTOR names still used in the model
+   INTEGER, PUBLIC ::   n_cla = 0             !: =1 cross land advection for exchanges through some straits (ORCA2)
+
+   !!----------------------------------------------------------------------
+   !! space domain parameters
+   !!----------------------------------------------------------------------
+   LOGICAL, PUBLIC ::   lzoom      =  .FALSE.   !: zoom flag
+   LOGICAL, PUBLIC ::   lzoom_e    =  .FALSE.   !: East  zoom type flag
+   LOGICAL, PUBLIC ::   lzoom_w    =  .FALSE.   !: West  zoom type flag
+   LOGICAL, PUBLIC ::   lzoom_s    =  .FALSE.   !: South zoom type flag
+   LOGICAL, PUBLIC ::   lzoom_n    =  .FALSE.   !: North zoom type flag
+   LOGICAL, PUBLIC ::   lzoom_arct =  .FALSE.   !: ORCA    arctic zoom flag
+   LOGICAL, PUBLIC ::   lzoom_anta =  .FALSE.   !: ORCA antarctic zoom flag
+
+   !                                     !!! domain parameters linked to mpp
+   INTEGER, PUBLIC ::   nperio = 4        !: type of lateral boundary condition
+   INTEGER, PUBLIC ::   nimpp, njmpp      !: i- & j-indexes for mpp-subdomain left bottom
+   INTEGER, PUBLIC ::   nreci, nrecj      !: overlap region in i and j
+   INTEGER, PUBLIC ::   nproc             !: number for local processor
+   INTEGER, PUBLIC ::   narea             !: number for local area
+   INTEGER, PUBLIC ::   nbondi, nbondj    !: mark of i- and j-direction local boundaries
+   INTEGER, PUBLIC ::   npolj             !: north fold mark (0, 3 or 4)
+   INTEGER, PUBLIC ::   nlci, nldi, nlei  !: i-dimensions of the local subdomain and its first and last indoor indices
+   INTEGER, PUBLIC ::   nlcj, nldj, nlej  !: i-dimensions of the local subdomain and its first and last indoor indices
+   INTEGER, PUBLIC ::   noea, nowe        !: index of the local neighboring processors in
+   INTEGER, PUBLIC ::   noso, nono        !: east, west, south and north directions
+   INTEGER, PUBLIC ::   npne, npnw        !: index of north east and north west processor
+   INTEGER, PUBLIC ::   npse, npsw        !: index of south east and south west processor
+   INTEGER, PUBLIC ::   nbne, nbnw        !: logical of north east & north west processor
+   INTEGER, PUBLIC ::   nbse, nbsw        !: logical of south east & south west processor
+   INTEGER, PUBLIC ::   nidom             !: ???
+
+   INTEGER, PUBLIC, DIMENSION(jpi)    ::   mig        !: local  ==> global domain i-index
+   INTEGER, PUBLIC, DIMENSION(jpj)    ::   mjg        !: local  ==> global domain j-index
+   !                                                  ! (mi0=1 and mi1=0 if the global index is not in the local domain)
+
+   !!----------------------------------------------------------------------
+   !! horizontal curvilinear coordinate and scale factors
+   !! ---------------------------------------------------------------------
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   glamt, glamu   !: longitude of t-, u-, v- and f-points (degre)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   glamv, glamf   !:
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   gphit, gphiu   !: latitude  of t-, u-, v- and f-points (degre)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   gphiv, gphif   !:
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   e1t, e2t       !: horizontal scale factors at t-point (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   e1u, e2u       !: horizontal scale factors at u-point (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   e1v, e2v       !: horizontal scale factors at v-point (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   e1f, e2f       !: horizontal scale factors at f-point (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   ff             !: coriolis factor (2.*omega*sin(yphi) ) (s-1)
+
+   !!----------------------------------------------------------------------
+   !! vertical coordinate and scale factors
+   !! ---------------------------------------------------------------------
+   !                                           !!* Namelist namzgr : vertical coordinate *
+   LOGICAL, PUBLIC ::   ln_zco     =  .TRUE.    !: z-coordinate - full step
+   LOGICAL, PUBLIC ::   ln_zps     =  .FALSE.   !: z-coordinate - partial step
+   LOGICAL, PUBLIC ::   ln_sco     =  .FALSE.   !: s-coordinate or hybrid z-s coordinate
+
+   !! All coordinates
+   !! ---------------
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   gdep3w          !: depth of T-points (sum of e3w) (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   gdept , gdepw   !: analytical depth at T-W  points (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   e3v   , e3f     !: analytical vertical scale factors at  V--F
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   e3t   , e3u     !:                                       T--U  points (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   e3vw            !: analytical vertical scale factors at  VW--
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   e3w   , e3uw    !:                                        W--UW  points (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hur  , hvr    !: inverse of u and v-points ocean depth (1/m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hu   , hv     !: depth at u- and v-points (meters)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hu_0 , hv_0   !: refernce depth at u- and v-points (meters)
+
+   INTEGER, PUBLIC ::   nla10              !: deepest    W level Above  ~10m (nlb10 - 1)
+   INTEGER, PUBLIC ::   nlb10              !: shallowest W level Bellow ~10m (nla10 + 1) 
+
+   !! z-coordinate with full steps (also used in the other cases as reference z-coordinate)
+   !! =-----------------====------
+   REAL(wp), PUBLIC, DIMENSION(jpk)     ::   gdept_0, gdepw_0   !: reference depth of t- and w-points (m)
+   REAL(wp), PUBLIC, DIMENSION(jpk)     ::   e3t_0  , e3w_0     !: reference vertical scale factors at T- and W-pts (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hdept  , hdepw     !: ocean bottom depth at T and W points
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   e3tp   , e3wp      !: ocean bottom level thickness at T and W points
+
+   !! s-coordinate and hybrid z-s-coordinate
+   !! =----------------======---------------
+   REAL(wp), PUBLIC, DIMENSION(jpk) ::   gsigt, gsigw   !: model level depth coefficient at t-, w-levels (analytic)
+   REAL(wp), PUBLIC, DIMENSION(jpk) ::   gsi3w          !: model level depth coefficient at w-level (sum of gsigw)
+   REAL(wp), PUBLIC, DIMENSION(jpk) ::   esigt, esigw   !: vertical scale factor coef. at t-, w-levels
+
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hbatv , hbatf    !: ocean depth at the vertical of  V--F
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hbatt , hbatu    !:                                 T--U  points (m)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   scosrf, scobot   !: ocean surface and bottom topographies 
+   !                                                          !  (if deviating from coordinate surfaces in HYBRID)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hifv  , hiff     !: interface depth between stretching    at  V--F
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   hift  , hifu     !: and quasi-uniform spacing                 T--U  points (m)
+
+   !!----------------------------------------------------------------------
+   !! masks, bathymetry
+   !! ---------------------------------------------------------------------
+   INTEGER , PUBLIC, DIMENSION(jpi,jpj) ::   mbathy    !: number of ocean level (=0, 1, ... , jpk-1)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   bathy     !: ocean depth (meters)
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   tmask_i   !: interior domain T-point mask
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj) ::   bmask     !: land/ocean mask of barotropic stream function
+
+   REAL(wp), PUBLIC, DIMENSION(jpi,jpj,jpk) ::   tmask, umask, vmask, fmask   !: land/ocean mask at T-, U-, V- and F-points
+
+
+
+   !!----------------------------------------------------------------------
+   !! calendar variables
+   !! ---------------------------------------------------------------------
+   INTEGER , PUBLIC ::   nyear       !: current year
+   INTEGER , PUBLIC ::   nmonth      !: current month
+   INTEGER , PUBLIC ::   nday        !: current day of the month
+   INTEGER , PUBLIC ::   ndastp      !: time step date in yyyymmdd format
+   INTEGER , PUBLIC ::   nday_year   !: current day counted from jan 1st of the current year
+   INTEGER , PUBLIC ::   nsec_year   !: current time step counted in second since 00h jan 1st of the current year
+   INTEGER , PUBLIC ::   nsec_month  !: current time step counted in second since 00h 1st day of the current month
+   INTEGER , PUBLIC ::   nsec_day    !: current time step counted in second since 00h of the current day
+   REAL(wp), PUBLIC ::   fjulday     !: julian day 
+   REAL(wp), PUBLIC ::   adatrj      !: number of elapsed days since the begining of the whole simulation
+   !                                 !: (cumulative duration of previous runs that may have used different time-step size)
+   INTEGER , PUBLIC, DIMENSION(0: 1) ::   nyear_len     !: length in days of the previous/current year
+   INTEGER , PUBLIC, DIMENSION(0:13) ::   nmonth_len    !: length in days of the months of the current year
+   INTEGER , PUBLIC, DIMENSION(0:13) ::   nmonth_half   !: second since Jan 1st 0h of the current year and the half of the months
+   INTEGER , PUBLIC, DIMENSION(0:13) ::   nmonth_end    !: second since Jan 1st 0h of the current year and the end of the months
+   INTEGER , PUBLIC                  ::   nsec1jan000   !: second since Jan 1st 0h of nit000 year and Jan 1st 0h the current year
+
+   !!----------------------------------------------------------------------
+   !! agrif domain
+   !!----------------------------------------------------------------------
+   LOGICAL, PUBLIC, PARAMETER ::   lk_agrif = .FALSE.   !: agrif flag
+
+   !!======================================================================
+END MODULE dom_oce

+ 287 - 0
interpolation/rotateUVorca_sources/geo2ocean.f90

@@ -0,0 +1,287 @@
+MODULE geo2ocean
+   !!======================================================================
+   !!                     ***  MODULE  geo2ocean  ***
+   !! Ocean mesh    :  ???
+   !!======================================================================
+   !! History :  OPA  !  07-1996  (O. Marti)  Original code
+   !!   NEMO     1.0  !  02-2008  (G. Madec)  F90: Free form
+   !!            3.0  !  
+   !!----------------------------------------------------------------------
+
+   !!----------------------------------------------------------------------
+   !!   repcmo      : 
+   !!   angle       :
+   !!   geo2oce     :
+   !!----------------------------------------------------------------------
+   USE dom_oce         ! mesh and scale factors
+   USE phycst          ! physical constants
+   USE par_kind        ! precision 
+   USE lbclnk
+   IMPLICIT NONE
+   PRIVATE
+
+   PUBLIC   rot_rep
+
+   REAL(wp), DIMENSION(jpi,jpj) ::   &
+      gsint, gcost,   &  ! cos/sin between model grid lines and NP direction at T point
+      gsinu, gcosu,   &  ! cos/sin between model grid lines and NP direction at U point
+      gsinv, gcosv,   &  ! cos/sin between model grid lines and NP direction at V point
+      gsinf, gcosf       ! cos/sin between model grid lines and NP direction at F point
+
+   LOGICAL ::   lmust_init = .TRUE.        !: used to initialize the cos/sin variables (se above)
+
+   !! * Substitutions
+   !!----------------------------------------------------------------------
+   !! NEMO/OPA 3.0 , LOCEAN-IPSL (2008) 
+   !! $Id: geo2ocean.F90 1833 2010-04-13 17:44:52Z smasson $ 
+   !! Software governed by the CeCILL licence (modipsl/doc/NEMO_CeCILL.txt)
+   !!----------------------------------------------------------------------
+
+CONTAINS
+
+
+   SUBROUTINE rot_rep ( pxin, pyin, cd_type, cdtodo, prot )
+      !!----------------------------------------------------------------------
+      !!                  ***  ROUTINE rot_rep  ***
+      !!
+      !! ** Purpose :   Rotate the Repere: Change vector componantes between
+      !!                geographic grid <--> stretched coordinates grid.
+      !!
+      !! History :
+      !!   9.2  !  07-04  (S. Masson)  
+      !!                  (O. Marti ) Original code (repere and repcmo)
+      !!----------------------------------------------------------------------
+      REAL(wp), DIMENSION(jpi,jpj), INTENT( IN ) ::   pxin, pyin   ! vector componantes
+      CHARACTER(len=1),             INTENT( IN ) ::   cd_type      ! define the nature of pt2d array grid-points
+      CHARACTER(len=5),             INTENT( IN ) ::   cdtodo       ! specify the work to do:
+      !!                                                           ! 'en->i' east-north componantes to model i componante
+      !!                                                           ! 'en->j' east-north componantes to model j componante
+      !!                                                           ! 'ij->e' model i-j componantes to east componante
+      !!                                                           ! 'ij->n' model i-j componantes to east componante
+      REAL(wp), DIMENSION(jpi,jpj), INTENT(out) ::   prot      
+
+      !!----------------------------------------------------------------------
+
+      ! Initialization of gsin* and gcos* at first call
+      ! -----------------------------------------------
+
+      IF( lmust_init ) THEN
+
+         CALL angle       ! initialization of the transformation
+         lmust_init = .FALSE.
+
+      ENDIF 
+      
+      SELECT CASE (cdtodo)
+      CASE ('en->i')      ! 'en->i' est-north componantes to model i componante
+         SELECT CASE (cd_type)
+         CASE ('T')   ;   prot(:,:) = pxin(:,:) * gcost(:,:) + pyin(:,:) * gsint(:,:)
+         CASE ('U')   ;   prot(:,:) = pxin(:,:) * gcosu(:,:) + pyin(:,:) * gsinu(:,:)
+         CASE ('V')   ;   prot(:,:) = pxin(:,:) * gcosv(:,:) + pyin(:,:) * gsinv(:,:)
+         CASE ('F')   ;   prot(:,:) = pxin(:,:) * gcosf(:,:) + pyin(:,:) * gsinf(:,:)
+         CASE DEFAULT   ;   STOP 'Only T, U, V and F grid points are coded' 
+         END SELECT
+      CASE ('en->j')      ! 'en->j' est-north componantes to model j componante
+         SELECT CASE (cd_type)
+         CASE ('T')   ;   prot(:,:) = pyin(:,:) * gcost(:,:) - pxin(:,:) * gsint(:,:)
+         CASE ('U')   ;   prot(:,:) = pyin(:,:) * gcosu(:,:) - pxin(:,:) * gsinu(:,:)
+         CASE ('V')   ;   prot(:,:) = pyin(:,:) * gcosv(:,:) - pxin(:,:) * gsinv(:,:)   
+         CASE ('F')   ;   prot(:,:) = pyin(:,:) * gcosf(:,:) - pxin(:,:) * gsinf(:,:)   
+         CASE DEFAULT   ;   STOP 'Only T, U, V and F grid points are coded'
+         END SELECT
+      CASE ('ij->e')      ! 'ij->e' model i-j componantes to est componante
+         SELECT CASE (cd_type)
+         CASE ('T')   ;   prot(:,:) = pxin(:,:) * gcost(:,:) - pyin(:,:) * gsint(:,:)
+         CASE ('U')   ;   prot(:,:) = pxin(:,:) * gcosu(:,:) - pyin(:,:) * gsinu(:,:)
+         CASE ('V')   ;   prot(:,:) = pxin(:,:) * gcosv(:,:) - pyin(:,:) * gsinv(:,:)
+         CASE ('F')   ;   prot(:,:) = pxin(:,:) * gcosf(:,:) - pyin(:,:) * gsinf(:,:)
+         CASE DEFAULT   ;   STOP 'Only T, U, V and F grid points are coded' 
+         END SELECT
+      CASE ('ij->n')      ! 'ij->n' model i-j componantes to est componante
+         SELECT CASE (cd_type)
+         CASE ('T')   ;   prot(:,:) = pyin(:,:) * gcost(:,:) + pxin(:,:) * gsint(:,:)
+         CASE ('U')   ;   prot(:,:) = pyin(:,:) * gcosu(:,:) + pxin(:,:) * gsinu(:,:)
+         CASE ('V')   ;   prot(:,:) = pyin(:,:) * gcosv(:,:) + pxin(:,:) * gsinv(:,:)
+         CASE ('F')   ;   prot(:,:) = pyin(:,:) * gcosf(:,:) + pxin(:,:) * gsinf(:,:)
+         CASE DEFAULT   ;   STOP 'Only T, U, V and F grid points are coded' 
+         END SELECT
+      CASE DEFAULT   ;   STOP 'rot_rep: Syntax Error in the definition of cdtodo' 
+      END SELECT
+      
+   END SUBROUTINE rot_rep
+
+
+   SUBROUTINE angle
+      !!----------------------------------------------------------------------
+      !!                  ***  ROUTINE angle  ***
+      !! 
+      !! ** Purpose :   Compute angles between model grid lines and the North direction
+      !!
+      !! ** Method  :
+      !!
+      !! ** Action  :   Compute (gsint, gcost, gsinu, gcosu, gsinv, gcosv, gsinf, gcosf) arrays:
+      !!      sinus and cosinus of the angle between the north-south axe and the 
+      !!      j-direction at t, u, v and f-points
+      !!
+      !! History :
+      !!   7.0  !  96-07  (O. Marti )  Original code
+      !!   8.0  !  98-06  (G. Madec )
+      !!   8.5  !  98-06  (G. Madec )  Free form, F90 + opt.
+      !!   9.2  !  07-04  (S. Masson)  Add T, F points and bugfix in cos lateral boundary
+      !!----------------------------------------------------------------------
+      INTEGER ::   ji, jj      ! dummy loop indices
+      !!
+      REAL(wp) ::   &
+         zlam, zphi,            &  ! temporary scalars
+         zlan, zphh,            &  !    "         "
+         zxnpt, zynpt, znnpt,   &  ! x,y components and norm of the vector: T point to North Pole
+         zxnpu, zynpu, znnpu,   &  ! x,y components and norm of the vector: U point to North Pole
+         zxnpv, zynpv, znnpv,   &  ! x,y components and norm of the vector: V point to North Pole
+         zxnpf, zynpf, znnpf,   &  ! x,y components and norm of the vector: F point to North Pole
+         zxvvt, zyvvt, znvvt,   &  ! x,y components and norm of the vector: between V points below and above a T point
+         zxffu, zyffu, znffu,   &  ! x,y components and norm of the vector: between F points below and above a U point
+         zxffv, zyffv, znffv,   &  ! x,y components and norm of the vector: between F points left  and right a V point
+         zxuuf, zyuuf, znuuf       ! x,y components and norm of the vector: between U points below and above a F point
+      !!----------------------------------------------------------------------
+
+      ! ============================= !
+      ! Compute the cosinus and sinus !
+      ! ============================= !
+      ! (computation done on the north stereographic polar plane)
+      
+      DO jj = 2, (jpj-1)
+!CDIR NOVERRCHK
+         DO ji = 2, jpi   ! vector opt.
+
+            ! north pole direction & modulous (at t-point)
+            zlam = glamt(ji,jj)
+            zphi = gphit(ji,jj)
+            zxnpt = 0. - 2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            zynpt = 0. - 2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            znnpt = zxnpt*zxnpt + zynpt*zynpt
+
+            ! north pole direction & modulous (at u-point)
+            zlam = glamu(ji,jj)
+            zphi = gphiu(ji,jj)
+            zxnpu = 0. - 2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            zynpu = 0. - 2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            znnpu = zxnpu*zxnpu + zynpu*zynpu
+
+            ! north pole direction & modulous (at v-point)
+            zlam = glamv(ji,jj)
+            zphi = gphiv(ji,jj)
+            zxnpv = 0. - 2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            zynpv = 0. - 2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            znnpv = zxnpv*zxnpv + zynpv*zynpv
+
+            ! north pole direction & modulous (at f-point)
+            zlam = glamf(ji,jj)
+            zphi = gphif(ji,jj)
+            zxnpf = 0. - 2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            zynpf = 0. - 2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )
+            znnpf = zxnpf*zxnpf + zynpf*zynpf
+
+            ! j-direction: v-point segment direction (around t-point)
+            zlam = glamv(ji,jj  )
+            zphi = gphiv(ji,jj  )
+            zlan = glamv(ji,jj-1)
+            zphh = gphiv(ji,jj-1)
+            zxvvt =  2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * COS( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            zyvvt =  2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * SIN( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            znvvt = SQRT( znnpt * ( zxvvt*zxvvt + zyvvt*zyvvt )  )
+            znvvt = MAX( znvvt, 1.e-14 )
+
+            ! j-direction: f-point segment direction (around u-point)
+            zlam = glamf(ji,jj  )
+            zphi = gphif(ji,jj  )
+            zlan = glamf(ji,jj-1)
+            zphh = gphif(ji,jj-1)
+            zxffu =  2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * COS( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            zyffu =  2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * SIN( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            znffu = SQRT( znnpu * ( zxffu*zxffu + zyffu*zyffu )  )
+            znffu = MAX( znffu, 1.e-14 )
+
+            ! i-direction: f-point segment direction (around v-point)
+            zlam = glamf(ji  ,jj)
+            zphi = gphif(ji  ,jj)
+            zlan = glamf(ji-1,jj)
+            zphh = gphif(ji-1,jj)
+            zxffv =  2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * COS( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            zyffv =  2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * SIN( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            znffv = SQRT( znnpv * ( zxffv*zxffv + zyffv*zyffv )  )
+            znffv = MAX( znffv, 1.e-14 )
+
+            ! j-direction: u-point segment direction (around f-point)
+            zlam = glamu(ji,jj+1)
+            zphi = gphiu(ji,jj+1)
+            zlan = glamu(ji,jj  )
+            zphh = gphiu(ji,jj  )
+            zxuuf =  2. * COS( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * COS( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            zyuuf =  2. * SIN( rad*zlam ) * TAN( rpi/4. - rad*zphi/2. )   &
+               &  -  2. * SIN( rad*zlan ) * TAN( rpi/4. - rad*zphh/2. )
+            znuuf = SQRT( znnpf * ( zxuuf*zxuuf + zyuuf*zyuuf )  )
+            znuuf = MAX( znuuf, 1.e-14 )
+
+            ! cosinus and sinus using scalar and vectorial products
+            gsint(ji,jj) = ( zxnpt*zyvvt - zynpt*zxvvt ) / znvvt
+            gcost(ji,jj) = ( zxnpt*zxvvt + zynpt*zyvvt ) / znvvt
+
+            gsinu(ji,jj) = ( zxnpu*zyffu - zynpu*zxffu ) / znffu
+            gcosu(ji,jj) = ( zxnpu*zxffu + zynpu*zyffu ) / znffu
+
+            gsinf(ji,jj) = ( zxnpf*zyuuf - zynpf*zxuuf ) / znuuf
+            gcosf(ji,jj) = ( zxnpf*zxuuf + zynpf*zyuuf ) / znuuf
+
+            ! (caution, rotation of 90 degres)
+            gsinv(ji,jj) = ( zxnpv*zxffv + zynpv*zyffv ) / znffv
+            gcosv(ji,jj) =-( zxnpv*zyffv - zynpv*zxffv ) / znffv
+
+         END DO
+      END DO
+
+      ! =============== !
+      ! Geographic mesh !
+      ! =============== !
+
+      DO jj = 2, (jpj-1)
+         DO ji = 2, jpi   ! vector opt.
+            IF( MOD( ABS( glamv(ji,jj) - glamv(ji,jj-1) ), 360. ) < 1.e-8 ) THEN
+               gsint(ji,jj) = 0.
+               gcost(ji,jj) = 1.
+            ENDIF
+            IF( MOD( ABS( glamf(ji,jj) - glamf(ji,jj-1) ), 360. ) < 1.e-8 ) THEN
+               gsinu(ji,jj) = 0.
+               gcosu(ji,jj) = 1.
+            ENDIF
+            IF(      ABS( gphif(ji,jj) - gphif(ji-1,jj) )         < 1.e-8 ) THEN
+               gsinv(ji,jj) = 0.
+               gcosv(ji,jj) = 1.
+            ENDIF
+            IF( MOD( ABS( glamu(ji,jj) - glamu(ji,jj+1) ), 360. ) < 1.e-8 ) THEN
+               gsinf(ji,jj) = 0.
+               gcosf(ji,jj) = 1.
+            ENDIF
+         END DO
+      END DO
+
+      ! =========================== !
+      ! Lateral boundary conditions !
+      ! =========================== !
+
+      ! lateral boundary cond.: T-, U-, V-, F-pts, sgn
+      CALL lbc_lnk( gcost, 'T', -1._wp )   ;   CALL lbc_lnk( gsint, 'T', -1._wp )
+      CALL lbc_lnk( gcosu, 'U', -1._wp )   ;   CALL lbc_lnk( gsinu, 'U', -1._wp )
+      CALL lbc_lnk( gcosv, 'V', -1._wp )   ;   CALL lbc_lnk( gsinv, 'V', -1._wp )
+      CALL lbc_lnk( gcosf, 'F', -1._wp )   ;   CALL lbc_lnk( gsinf, 'F', -1._wp )
+
+   END SUBROUTINE angle
+
+
+END MODULE geo2ocean

+ 22 - 0
interpolation/rotateUVorca_sources/handle_err.f90

@@ -0,0 +1,22 @@
+module handerr
+
+contains
+
+  subroutine handle_err(errcode, var)
+
+    implicit none
+    include 'netcdf.inc'
+  
+    integer,intent(in) :: errcode
+    character*20,intent(in),optional :: var
+  
+    if (present(var)) then
+      print *, 'Error: ', nf_strerror(errcode), " var: ", var
+    else
+      print *, 'Error: ', nf_strerror(errcode)
+    endif
+    stop 2
+
+  end subroutine handle_err
+
+end module handerr

+ 137 - 0
interpolation/rotateUVorca_sources/lbclnk.f90

@@ -0,0 +1,137 @@
+MODULE lbclnk
+   !!======================================================================
+   !!                       ***  MODULE  lbclnk  ***
+   !! Ocean        : lateral boundary conditions
+   !!=====================================================================
+   !!  OPA 9.0 , LOCEAN-IPSL (2005) 
+   !! $Id: lbclnk.F90 1344 2009-03-27 14:02:19Z rblod $
+   !! This software is governed by the CeCILL licence see modipsl/doc/NEMO_CeCILL.txt 
+   !!----------------------------------------------------------------------
+   !!----------------------------------------------------------------------
+   !!   Default option                              shared memory computing
+   !!----------------------------------------------------------------------
+   !!   lbc_lnk      : generic interface for lbc_lnk_3d and lbc_lnk_2d
+   !!   lbc_lnk_3d   : set the lateral boundary condition on a 3D variable
+   !!                  on OPA ocean mesh
+   !!   lbc_lnk_2d   : set the lateral boundary condition on a 2D variable
+   !!                  on OPA ocean mesh
+   !!----------------------------------------------------------------------
+   !! * Modules used
+!   USE oce             ! ocean dynamics and tracers   
+   USE dom_oce         ! ocean space and time domain 
+!   USE in_out_manager  ! I/O manager
+   USE lbcnfd          ! north fold
+
+   IMPLICIT NONE
+   PRIVATE
+
+   PUBLIC lbc_lnk       ! ocean/ice  lateral boundary conditions
+   !!----------------------------------------------------------------------
+
+CONTAINS
+
+
+   SUBROUTINE lbc_lnk( pt2d, cd_type, psgn, cd_mpp, pval )
+      !!---------------------------------------------------------------------
+      !!                 ***  ROUTINE lbc_lnk_2d  ***
+      !!
+      !! ** Purpose :   set lateral boundary conditions (non mpp case)
+      !!
+      !! ** Method  :
+      !!
+      !! History :
+      !!        !  97-06  (G. Madec)  Original code
+      !!        !  01-05  (E. Durand)  correction
+      !!   8.5  !  02-09  (G. Madec)  F90: Free form and module
+      !!        !  09-03  (R. Benshila)  External north fold treatment  
+      !!----------------------------------------------------------------------
+      !! * Arguments
+      CHARACTER(len=1), INTENT( in ) ::   &
+         cd_type       ! nature of pt2d grid-point
+         !             !   = T , U , V , F or W  gridpoints
+         !             !   = I sea-ice U-V gridpoint (= F ocean grid point with indice shift)
+      REAL(wp), INTENT( in ) ::   &
+         psgn          ! control of the sign change
+         !             !   =-1 , the sign is modified following the type of b.c. used
+         !             !   = 1 , no sign change
+      REAL(wp), DIMENSION(jpi,jpj), INTENT( inout ) ::   &
+         pt2d          ! 2D array on which the boundary condition is applied
+      CHARACTER(len=3), INTENT( in ), OPTIONAL ::    &
+         cd_mpp        ! fill the overlap area only (here do nothing)
+      REAL(wp)        , INTENT(in   ), OPTIONAL           ::   pval      ! background value (used at closed boundaries)
+
+      !! * Local declarations
+      REAL(wp) ::   zland
+
+      IF( PRESENT( pval ) ) THEN      ! set land value (zero by default)
+         zland = pval
+      ELSE
+         zland = 0.e0
+      ENDIF 
+
+      IF (PRESENT(cd_mpp)) THEN
+         ! only fill the overlap area and extra allows 
+         ! this is in mpp case. In this module, just do nothing
+      ELSE      
+         !                                     ! East-West boundaries
+         !                                     ! ====================
+         SELECT CASE ( nperio )
+         !
+         CASE ( 1 , 4 , 6 )                       !** cyclic east-west
+            pt2d( 1 ,:) = pt2d(jpim1,:)               ! all points
+            pt2d(jpi,:) = pt2d(  2  ,:)
+            !
+         CASE DEFAULT                             !** East closed  --  West closed
+            SELECT CASE ( cd_type )
+            CASE ( 'T' , 'U' , 'V' , 'W' )            ! T-, U-, V-, W-points
+               pt2d( 1 ,:) = zland
+               pt2d(jpi,:) = zland
+            CASE ( 'F' )                              ! F-point
+               pt2d(jpi,:) = zland
+            END SELECT
+            !
+         END SELECT
+ 
+         !                                     ! North-South boundaries
+         !                                     ! ======================
+         SELECT CASE ( nperio )
+         !
+         CASE ( 2 )                               !**  South symmetric  --  North closed
+            SELECT CASE ( cd_type )
+            CASE ( 'T' , 'U' , 'W' )                   ! T-, U-, W-points
+               pt2d(:, 1 ) = pt2d(:,3)
+               pt2d(:,jpj) = zland
+            CASE ( 'V' , 'F' )                         ! V-, F-points
+               pt2d(:, 1 ) = psgn * pt2d(:,2)
+               pt2d(:,jpj) = zland
+            END SELECT
+            !
+         CASE ( 3 , 4 , 5 , 6 )                   !**  North fold  T or F-point pivot  --  South closed
+            SELECT CASE ( cd_type )                    ! South : closed
+            CASE ( 'T' , 'U' , 'V' , 'W' , 'I' )             ! all points except F-point
+               pt2d(:, 1 ) = zland
+            END SELECT
+            !                                          ! North fold
+            pt2d( 1 ,1  ) = zland 
+            pt2d( 1 ,jpj) = zland 
+            pt2d(jpi,jpj) = zland
+            CALL lbc_nfd( pt2d(:,:), cd_type, psgn )
+            !
+         CASE DEFAULT                             !**  North closed  --  South closed
+            SELECT CASE ( cd_type )
+            CASE ( 'T' , 'U' , 'V' , 'W' )             ! T-, U-, V-, W-points
+               pt2d(:, 1 ) = zland
+               pt2d(:,jpj) = zland
+            CASE ( 'F' )                               ! F-point
+               pt2d(:,jpj) = zland
+            END SELECT
+            !
+         END SELECT
+
+      ENDIF
+      
+   END SUBROUTINE lbc_lnk
+
+
+   !!======================================================================
+END MODULE lbclnk

+ 189 - 0
interpolation/rotateUVorca_sources/lbcnfd.f90

@@ -0,0 +1,189 @@
+MODULE lbcnfd
+   !!======================================================================
+   !!                       ***  MODULE  lbcnfd  ***
+   !! Ocean        : north fold  boundary conditions
+   !!======================================================================
+   !!             9.0  !  09-03  (R. Benshila) Initial version 
+   !!----------------------------------------------------------------------
+   !! * Modules used
+   !USE oce             ! ocean dynamics and tracers   
+   USE dom_oce         ! ocean space and time domain 
+   !USE in_out_manager  ! I/O manager
+
+   IMPLICIT NONE
+   PRIVATE
+
+   PUBLIC lbc_nfd       ! north fold conditions
+   !!----------------------------------------------------------------------
+
+CONTAINS
+
+   SUBROUTINE lbc_nfd( pt2d, cd_type, psgn, pr2dj )
+      !!----------------------------------------------------------------------
+      !!                  ***  routine lbc_nfd_2d  ***
+      !!
+      !! ** Purpose :   2D lateral boundary condition : North fold treatment
+      !!       without processor exchanges. 
+      !!
+      !! ** Method  :   
+      !!
+      !! ** Action  :   pt2d with update value at its periphery
+      !!
+      !!----------------------------------------------------------------------
+      !! * Arguments
+      CHARACTER(len=1) , INTENT( in ) ::   &
+         cd_type       ! define the nature of ptab array grid-points
+      !             ! = T , U , V , F , W points
+      !             ! = S : T-point, north fold treatment ???
+      !             ! = G : F-point, north fold treatment ???
+      REAL(wp), INTENT( in ) ::   &
+         psgn          ! control of the sign change
+      !             !   = -1. , the sign is changed if north fold boundary
+      !             !   =  1. , the sign is kept  if north fold boundary
+      REAL(wp), DIMENSION(:,:), INTENT( inout ) ::   &
+         pt2d          ! 3D array on which the boundary condition is applied
+      INTEGER, OPTIONAL, INTENT(in) :: pr2dj
+
+      !! * Local declarations
+      INTEGER  ::   ji, jl, ipr2dj
+      INTEGER  ::   ijt, iju, ijpj, ijpjm1
+
+      SELECT CASE ( jpni )
+      CASE ( 1 )  ! only one proc along I
+         ijpj = nlcj
+      CASE DEFAULT 
+         ijpj = 4
+      END SELECT
+
+
+      IF( PRESENT(pr2dj) ) THEN
+         ipr2dj = pr2dj
+         IF (jpni .GT. 1) ijpj = ijpj + ipr2dj
+      ELSE
+         ipr2dj = 0 
+      ENDIF
+
+      ijpjm1 = ijpj-1
+
+
+      SELECT CASE ( npolj )
+
+      CASE ( 3, 4 )                       ! *  North fold  T-point pivot
+
+         SELECT CASE ( cd_type )
+
+         CASE ( 'T', 'S', 'W' )
+            DO jl = 0, ipr2dj
+               DO ji = 2, jpiglo
+                  ijt=jpiglo-ji+2
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-2-jl)
+               END DO
+            END DO
+            DO ji = jpiglo/2+1, jpiglo
+               ijt=jpiglo-ji+2
+               pt2d(ji,ijpj-1) = psgn * pt2d(ijt,ijpj-1)
+            END DO
+         CASE ( 'U' )                                     ! U-point
+            DO jl =0, ipr2dj
+               DO ji = 1, jpiglo-1
+                  iju = jpiglo-ji+1
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-2-jl)
+               END DO
+            END DO
+            DO ji = jpiglo/2, jpiglo-1
+               iju = jpiglo-ji+1
+               pt2d(ji,ijpjm1) = psgn * pt2d(iju,ijpjm1)
+            END DO
+         CASE ( 'V' )                                     ! V-point
+            DO jl =-1, ipr2dj
+               DO ji = 2, jpiglo
+                  ijt = jpiglo-ji+2
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-3-jl)
+               END DO
+            END DO
+         CASE ( 'F' , 'G' )                               ! F-point
+            DO jl =-1, ipr2dj
+               DO ji = 1, jpiglo-1
+                  iju = jpiglo-ji+1
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-3-jl)
+               END DO
+            END DO
+         CASE ( 'I' )                                     ! ice U-V point
+            DO jl =0, ipr2dj
+               pt2d(2,ijpj+jl) = psgn * pt2d(3,ijpj-1+jl)
+               DO ji = 3, jpiglo
+                  iju = jpiglo - ji + 3
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-1-jl)
+               END DO
+            END DO
+         END SELECT
+
+      CASE ( 5, 6 )                        ! *  North fold  F-point pivot
+
+         SELECT CASE ( cd_type )
+         CASE ( 'T' , 'W' ,'S' )                          ! T-, W-point
+            DO jl = 0, ipr2dj
+               DO ji = 1, jpiglo
+                  ijt = jpiglo-ji+1
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-1-jl)
+               END DO
+            END DO
+         CASE ( 'U' )                                     ! U-point
+            DO jl = 0, ipr2dj
+               DO ji = 1, jpiglo-1
+                  iju = jpiglo-ji
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-1-jl)
+               END DO
+            END DO
+         CASE ( 'V' )                                     ! V-point
+            DO jl = 0, ipr2dj
+               DO ji = 1, jpiglo
+                  ijt = jpiglo-ji+1
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-2-jl)
+               END DO
+            END DO
+            DO ji = jpiglo/2+1, jpiglo
+               ijt = jpiglo-ji+1
+               pt2d(ji,ijpjm1) = psgn * pt2d(ijt,ijpjm1)
+            END DO
+         CASE ( 'F' , 'G' )                               ! F-point
+            DO jl = 0, ipr2dj
+               DO ji = 1, jpiglo-1
+                  iju = jpiglo-ji
+                  pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-2-jl)
+               END DO
+            END DO
+            DO ji = jpiglo/2+1, jpiglo-1
+               iju = jpiglo-ji
+               pt2d(ji,ijpjm1) = psgn * pt2d(iju,ijpjm1)
+            END DO
+         CASE ( 'I' )                                  ! ice U-V point
+            pt2d( 2 ,ijpj:ijpj+ipr2dj) = 0.e0
+            DO jl = 0, ipr2dj
+               DO ji = 2 , jpiglo-1
+                  ijt = jpiglo - ji + 2
+                  pt2d(ji,ijpj+jl)= 0.5 * ( pt2d(ji,ijpj-1-jl) + psgn * pt2d(ijt,ijpj-1-jl) )
+               END DO
+            END DO
+         END SELECT
+
+      CASE DEFAULT                           ! *  closed : the code probably never go through
+
+         !SELECT CASE ( cd_type)
+         !CASE ( 'T' , 'U' , 'V' , 'W' )                 ! T-, U-, V-, W-points
+         !   pt2d(:, 1:1-ipr2dj     ) = 0.e0
+         !   pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0
+         !CASE ( 'F' )                                   ! F-point
+         !   pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0
+         !CASE ( 'I' )                                   ! ice U-V point
+         !   pt2d(:, 1:1-ipr2dj     ) = 0.e0
+         !   pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0
+         !END SELECT
+
+      END SELECT
+      
+
+   END SUBROUTINE lbc_nfd
+
+   !!======================================================================
+END MODULE lbcnfd

+ 61 - 0
interpolation/rotateUVorca_sources/makefile

@@ -0,0 +1,61 @@
+#!/bin/csh
+#
+COMPILE = gfortran
+FLAGS   = -O3 -I/usr/include -L/usr/lib
+LIB     =  -lnetcdf -lnetcdff  
+INCLUDE = 
+SRCDIR  = .
+EXEDIR  = ../.
+OBJSET  = \
+	phycst.o \
+    geo2ocean.o \
+    dom_oce.o \
+    lbcnfd.o \
+    lbclnk.o \
+    par_kind.o \
+    handle_err.o \
+    rotateUVorca.o
+
+OBJTEST  = \
+	phycst.o \
+    geo2ocean.o \
+    dom_oce.o \
+    lbcnfd.o \
+    lbclnk.o \
+    handle_err.o \
+    par_kind.o 
+
+all: $(EXEDIR)/rotateUVorca 
+
+$(EXEDIR)/rotateUVorca: $(OBJTEST) rotateUVorca.o
+	$(COMPILE) $(FLAGS) $(OBJSET) $(LIB) -o $(EXEDIR)/rotateUVorca
+
+handle_err.o: $(SRCDIR)/handle_err.f90 $(INCLUDE) 
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/handle_err.f90
+
+phycst.o: $(SRCDIR)/phycst.f90 par_kind.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/phycst.f90
+
+dom_oce.o: $(SRCDIR)/dom_oce.f90 par_kind.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/dom_oce.f90
+
+lbcnfd.o: $(SRCDIR)/lbcnfd.f90 par_kind.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/lbcnfd.f90
+
+lbclnk.o: $(SRCDIR)/lbclnk.f90 lbcnfd.o par_kind.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/lbclnk.f90
+
+geo2ocean.o: $(SRCDIR)/geo2ocean.f90 dom_oce.o phycst.o \
+	par_kind.o lbclnk.o $(INCLUDE) 
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/geo2ocean.f90
+
+par_kind.o: $(SRCDIR)/par_kind.f90 $(INCLUDE) 
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/par_kind.f90
+
+rotateUVorca.o: $(SRCDIR)/rotateUVorca.f90 geo2ocean.o \
+	par_kind.o handle_err.o $(INCLUDE) 
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/rotateUVorca.f90
+
+clean: 
+	/bin/rm *.o *.mod
+

+ 49 - 0
interpolation/rotateUVorca_sources/par_kind.f90

@@ -0,0 +1,49 @@
+MODULE par_kind
+   !!======================================================================
+   !!                   ***  MODULE par_kind  ***
+   !! Ocean :  define the kind of real for the whole model
+   !!======================================================================
+   !! History :
+   !!   8.5   02/06  (G. Madec)  Original code
+   !!----------------------------------------------------------------------
+   !!   OPA 9.0 , LOCEAN-IPSL (2005) 
+   !! $Id: par_kind.F90 1152 2008-06-26 14:11:13Z rblod $ 
+   !! This software is governed by the CeCILL licence see modipsl/doc/NEMO_CeCILL.txt 
+   !!----------------------------------------------------------------------
+
+   IMPLICIT NONE
+   PRIVATE
+
+   INTEGER, PUBLIC, PARAMETER ::    &  !:
+      jpbyt   = 8       ,           &  !: real size for mpp communications
+      jpbytda = 4       ,           &  !: real size in input data files 4 or 8
+      jpbi3e  = 4                      !: real size for T3E
+
+   ! Number model from which the SELECTED_*_KIND are requested:
+   !             4 byte REAL       8 byte REAL
+   ! CRAY:           -            precision = 13
+   !                              exponent = 2465
+   ! IEEE:      precision = 6     precision = 15
+   !            exponent = 37     exponent = 307
+
+   INTEGER, PUBLIC, PARAMETER ::        &  !: Floating point section
+      !sp = SELECTED_REAL_KIND( 6, 37),  &  !: single precision (real 4)
+      sp = KIND(1.0),  &  !: single precision (real 4)
+      !dp = SELECTED_REAL_KIND(12,307),  &  !: double precision (real 8)
+      dp = SELECTED_REAL_KIND(2*precision(1.0_sp)),  &  !: double precision (real 8)
+      wp = dp                              !: working precision
+
+   INTEGER, PUBLIC, PARAMETER ::        &  !: Integer section
+      !i4 = SELECTED_INT_KIND(9) ,       &  !: single precision (integer 4)
+      i4 = KIND(1) ,       &  !: single precision (integer 4)
+      i8 = KIND(1)           !: double precision (integer 8)
+
+   INTEGER , PUBLIC, PARAMETER ::   jpi = 1442 
+   INTEGER , PUBLIC, PARAMETER ::   jpim1 = jpi-1
+   INTEGER , PUBLIC, PARAMETER ::   jpj = 1021
+   INTEGER , PUBLIC, PARAMETER ::   jpk = 2
+   INTEGER , PUBLIC, PARAMETER ::   jpiglo = 1442
+   INTEGER , PUBLIC, PARAMETER ::   jpni =1
+
+!!----------------------------------------------------------------------
+END MODULE par_kind

+ 95 - 0
interpolation/rotateUVorca_sources/phycst.f90

@@ -0,0 +1,95 @@
+MODULE phycst
+   !!======================================================================
+   !!                    ***  MODULE  phycst  ***
+   !!     Definition of of both ocean and ice parameters used in the code
+   !!=====================================================================
+   !! History :   OPA  !  1990-10  (C. Levy - G. Madec)  Original code
+   !!             8.1  !  1991-11  (G. Madec, M. Imbard)  cosmetic changes
+   !!   NEMO      1.0  !  2002-08  (G. Madec, C. Ethe)  F90, add ice constants
+   !!              -   !  2006-08  (G. Madec)  style 
+   !!             3.2  !  2006-08  (S. Masson, G. Madec)  suppress useless variables + style 
+   !!----------------------------------------------------------------------
+
+   !!----------------------------------------------------------------------
+   !!   phy_cst  : define and print physical constant and domain parameters
+   !!----------------------------------------------------------------------
+
+   use par_kind 
+
+   IMPLICIT NONE
+   PRIVATE
+
+   PUBLIC   phy_cst     ! routine called by inipar.F90
+
+   REAL(wp), PUBLIC ::   rpi = 3.141592653589793_wp             !: pi
+   REAL(wp), PUBLIC ::   rad = 3.141592653589793_wp / 180._wp   !: conversion from degre into radian
+   REAL(wp), PUBLIC ::   rsmall = 0.5 * EPSILON( 1.e0 )         !: smallest real computer value
+   
+   REAL(wp), PUBLIC ::   rday = 24.*60.*60.       !: day (s)
+   REAL(wp), PUBLIC ::   rsiyea                   !: sideral year (s)
+   REAL(wp), PUBLIC ::   rsiday                   !: sideral day (s)
+   REAL(wp), PUBLIC ::   raamo =  12._wp          !: number of months in one year
+   REAL(wp), PUBLIC ::   rjjhh =  24._wp          !: number of hours in one day
+   REAL(wp), PUBLIC ::   rhhmm =  60._wp          !: number of minutes in one hour
+   REAL(wp), PUBLIC ::   rmmss =  60._wp          !: number of seconds in one minute
+!! REAL(wp), PUBLIC ::   omega = 7.292115083046061e-5_wp ,  &  !: change the last digit!
+   REAL(wp), PUBLIC ::   omega                    !: earth rotation parameter
+   REAL(wp), PUBLIC ::   ra    = 6371229._wp      !: earth radius (meter)
+   REAL(wp), PUBLIC ::   grav  = 9.80665_wp       !: gravity (m/s2)
+   
+   REAL(wp), PUBLIC ::   rtt      = 273.16_wp     !: triple point of temperature (Kelvin)
+   REAL(wp), PUBLIC ::   rt0      = 273.15_wp     !: freezing point of water (Kelvin)
+   REAL(wp), PUBLIC ::   rt0_snow = 273.15_wp     !: melting point of snow  (Kelvin)
+   REAL(wp), PUBLIC ::   rt0_ice  = 273.05_wp     !: melting point of ice   (Kelvin)
+
+   REAL(wp), PUBLIC ::   rau0     = 1020._wp      !: reference volumic mass (density)  (kg/m3)
+   REAL(wp), PUBLIC ::   rau0r                    !: reference specific volume         (m3/kg)
+   REAL(wp), PUBLIC ::   rcp      =    4.e+3_wp   !: ocean specific heat
+   REAL(wp), PUBLIC ::   ro0cpr                   !: = 1. / ( rau0 * rcp )
+
+   REAL(wp), PUBLIC ::   rcdsn   =   0.22_wp      !: conductivity of the snow
+   REAL(wp), PUBLIC ::   rcdic   =   2.034396_wp  !: conductivity of the ice
+   REAL(wp), PUBLIC ::   rcpsn   =   6.9069e+5_wp !: density times specific heat for snow
+   REAL(wp), PUBLIC ::   rcpic   =   1.8837e+6_wp !: volumetric latent heat fusion of sea ice
+   REAL(wp), PUBLIC ::   xlsn    = 110.121e+6_wp  !: volumetric latent heat fusion of snow
+   REAL(wp), PUBLIC ::   xlic    = 300.33e+6_wp   !: volumetric latent heat fusion of ice
+   REAL(wp), PUBLIC ::   xsn     =   2.8e+6       !: latent heat of sublimation of snow
+   REAL(wp), PUBLIC ::   rhoic   = 900._wp        !: volumic mass of sea ice (kg/m3)
+   REAL(wp), PUBLIC ::   rhosn   = 330._wp        !: volumic mass of snow (kg/m3)
+   REAL(wp), PUBLIC ::   emic    =   0.97_wp      !: emissivity of snow or ice
+   REAL(wp), PUBLIC ::   sice    =   6.0_wp       !: reference salinity of ice (psu)
+   REAL(wp), PUBLIC ::   soce    =  34.7_wp       !: reference salinity of sea (psu)
+   REAL(wp), PUBLIC ::   cevap   =   2.5e+6_wp    !: latent heat of evaporation (water)
+   REAL(wp), PUBLIC ::   srgamma =   0.9_wp       !: correction factor for solar radiation (Oberhuber, 1974)
+   REAL(wp), PUBLIC ::   vkarmn  =   0.4_wp       !: von Karman constant
+   REAL(wp), PUBLIC ::   stefan  =   5.67e-8_wp   !: Stefan-Boltzmann constant 
+   !!----------------------------------------------------------------------
+   !! NEMO/OPA 3.2 , LOCEAN-IPSL (2009) 
+   !! $Id: phycst.F90 1932 2010-06-15 10:28:20Z smasson $ 
+   !! Software governed by the CeCILL licence (modipsl/doc/NEMO_CeCILL.txt)
+   !!----------------------------------------------------------------------
+   
+CONTAINS
+   
+   SUBROUTINE phy_cst
+      !!----------------------------------------------------------------------
+      !!                       ***  ROUTINE phy_cst  ***
+      !!
+      !! ** Purpose :   Print model parameters and set and print the constants
+      !!----------------------------------------------------------------------
+      CHARACTER (len=64) ::   cform = "(A12, 3(A13, I7) )" 
+      !!----------------------------------------------------------------------
+
+      !                                   ! Define additional parameters
+      rsiyea = 365.25 * rday * 2. * rpi / 6.283076
+      rsiday = rday / ( 1. + rday / rsiyea )
+      omega  = 2. * rpi / rsiday 
+
+      rau0r  = 1. /   rau0  
+      ro0cpr = 1. / ( rau0 * rcp )
+
+
+   END SUBROUTINE phy_cst
+
+   !!======================================================================
+END MODULE phycst

+ 249 - 0
interpolation/rotateUVorca_sources/rotateUVorca.f90

@@ -0,0 +1,249 @@
+program rotateUVorca
+!==============================================================================
+! This program rotates U and V components from the geographical directions 
+! toward the spherical grid directions based on NEMO3.2 routines 
+!
+! Written on 2012/02/21
+! Author : Virginie Guemas
+!==============================================================================
+!  
+     use par_kind
+     use netcdf
+     use geo2ocean
+     use dom_oce
+     use handerr
+
+     implicit none
+
+     include 'netcdf.inc'
+  
+     character (80) :: &
+     & Ufilein,  &  ! filename containing the Eastward component
+     & Uvarin,   &  ! name of the Eastward component
+     & Vfilein,  &  ! filename containing the Northward component
+     & Vvarin,   &  ! name of the Northward component
+     & meshmask, &  ! name of the meshmask 
+     & Ufileout, &   ! U output file
+     & Vfileout     ! V output file
+
+     integer :: nc_fileU_id, nc_fileV_id, nc_filemask_id, nc_varU_id, &
+     & nc_varV_id, nc_time_id, nc_var_type, nc_outfile_id, ncstat,    &
+     & nc_glamt_id, nc_glamu_id, nc_glamv_id, nc_glamf_id,            &
+     & nc_gphit_id, nc_gphiu_id, nc_gphiv_id, nc_gphif_id
+
+     integer, dimension(:), allocatable :: nc_dims_ids
+ 
+     character (80) :: timename
+
+     integer :: ndims, ntime, jtime
+
+     real (kind=wp), dimension(:), allocatable ::           &
+     &    time
+
+     real (kind=wp), dimension(:,:,:), allocatable ::       &
+     &    Ufield, Vfield, Ufield2, Vfield2
+!
+!==============================================================================
+! 
+     namelist /nam_rotUV/ Ufilein, Uvarin, Vfilein, Vvarin, &
+     &                    meshmask, Ufileout, Vfileout
+!
+!==============================================================================
+!
+!                             Read namelist
+!
+!============================================================================== 
+!
+      open(80, file='namelist_rotateUVorca', status='old', form='formatted')
+      read(80, nml=nam_rotUV)
+      write(*,nml=nam_rotUV)
+!
+!============================================================================== 
+!
+!                       Read input (U,V) components
+!
+!==============================================================================
+!
+     ncstat = nf_open(Ufilein, NF90_NOWRITE, nc_fileU_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Open file Ufilein   ")
+     ncstat = nf_inq_varid(nc_fileU_id, Uvarin, nc_varU_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID varU         ")
+     ncstat = nf_inq_varndims(nc_fileU_id, nc_varU_id, ndims)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq dims varU       ")
+     if ( ndims < 2 .or. ndims > 3) then
+        stop "Input files should have (lon, lat) ot (lon, lat, time) &
+     &       dimensions"
+     endif
+
+     if ( ndims == 3) then
+       allocate(nc_dims_ids(ndims))
+       ncstat = nf_inq_vardimid(nc_fileU_id, nc_varU_id, nc_dims_ids)
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID dims varU    ")
+       ncstat = nf90_inquire_dimension(nc_fileU_id, nc_dims_ids(3), & 
+     & timename, ntime)
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq. dims time      ")
+       allocate(time(ntime))
+       time=0.
+       !ncstat = nf90_get_var(nc_fileU_id, nc_time_id, time)
+       !if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var time        ")
+     else 
+       ntime=1
+       allocate(time(1))
+       time=1.
+     endif
+
+     print*, jpi, jpj, ntime
+     allocate( Ufield (jpi, jpj, ntime))
+     ncstat = nf90_get_var(nc_fileU_id, nc_varU_id, Ufield)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var varU        ")
+     ncstat = nf_close(nc_fileU_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Close fileU         ")
+
+
+     ncstat = nf_open(Vfilein, NF90_NOWRITE, nc_fileV_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Open fileV          ")
+     ncstat = nf_inq_varid(nc_fileV_id, Vvarin, nc_varV_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq. ID varV        ")
+     ncstat = nf_inq_varndims(nc_fileV_id, nc_varV_id, ndims) 
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq. dims varV      ")
+     if ( ndims /= size(nc_dims_ids) ) then
+        stop "Input files should have the same dimensions"
+      endif
+
+     allocate( Vfield (jpi, jpj, ntime))
+     ncstat = nf90_get_var(nc_fileV_id, nc_varV_id, Vfield)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var varV        ")
+     ncstat = nf_close(nc_fileV_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Close fileV         ")
+!==============================================================================
+!
+!                           Read meshmask
+!
+!==============================================================================
+!
+     ncstat = nf_open(meshmask, NF90_NOWRITE, nc_filemask_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Open filemask       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'glamt', nc_glamt_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID glamt        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_glamt_id, glamt)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var glamt       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'glamf', nc_glamf_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID glamf        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_glamf_id, glamf)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var glamf       ")
+  
+     ncstat = nf_inq_varid(nc_filemask_id, 'glamu', nc_glamu_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID glamu        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_glamu_id, glamu)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var glamu       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'glamv', nc_glamv_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID glamv        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_glamv_id, glamv)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var glamv       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'gphit', nc_gphit_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID gphit        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_gphit_id, gphit)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var gphit       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'gphif', nc_gphif_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID gphif        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_gphif_id, gphif)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var gphif       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'gphiu', nc_gphiu_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID gphiu        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_gphiu_id, gphiu)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var gphiu       ")
+
+     ncstat = nf_inq_varid(nc_filemask_id, 'gphiv', nc_gphiv_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Inq ID gphiv        ")
+     ncstat = nf90_get_var(nc_filemask_id, nc_gphiv_id, gphiv)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Get var gphiv       ")
+
+     ncstat = nf_close(nc_filemask_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat, "Close filemask      ")
+!
+!==============================================================================
+! 
+!                          Perform rotation
+!
+!==============================================================================
+!
+      allocate( Ufield2 (jpi, jpj, ntime))
+      allocate( Vfield2 (jpi, jpj, ntime))
+      do jtime = 1,ntime
+        call rot_rep(Ufield(:,:,jtime),Vfield(:,:,jtime),'T','en->i',Ufield2(:,:,jtime))
+        call rot_rep(Ufield(:,:,jtime),Vfield(:,:,jtime),'T','en->j',Vfield2(:,:,jtime))
+      end do
+      deallocate(Ufield,Vfield)
+!
+!============================================================================== 
+!
+!                        Create output netcdf
+!
+!============================================================================== 
+!
+     ncstat = nf_create (Ufileout, NF_CLOBBER, nc_outfile_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     ncstat = nf_def_dim (nc_outfile_id, 'x', jpi, nc_dims_ids(1))
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     ncstat = nf_def_dim (nc_outfile_id, 'y', jpj, nc_dims_ids(2))
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     if ( ndims > 2) then 
+       ncstat = nf_def_dim (nc_outfile_id, 'time', ntime, nc_dims_ids(3))
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+       ncstat = nf_def_var (nc_outfile_id, 'time', NF_DOUBLE, 1, &
+    &           nc_dims_ids(3), nc_time_id)
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     endif
+     ncstat = nf_def_var (nc_outfile_id, Uvarin, NF_DOUBLE, 3,          &
+    &                    nc_dims_ids, nc_varU_id )
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     ncstat = nf_enddef(nc_outfile_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+
+     ncstat = nf_put_var_double(nc_outfile_id, nc_varU_id, Ufield2)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     if ( ndims > 2) then
+       ncstat = nf_put_var_double(nc_outfile_id, nc_time_id, time)
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     endif
+     ncstat = nf_close(nc_outfile_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+!
+!============================================================================== 
+!
+     ncstat = nf_create (Vfileout, NF_CLOBBER, nc_outfile_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     ncstat = nf_def_dim (nc_outfile_id, 'x', jpi, nc_dims_ids(1))
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     ncstat = nf_def_dim (nc_outfile_id, 'y', jpj, nc_dims_ids(2))
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     if ( ndims > 2) then 
+       ncstat = nf_def_dim (nc_outfile_id, 'time', ntime, nc_dims_ids(3))
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+       ncstat = nf_def_var (nc_outfile_id, 'time', NF_DOUBLE, 1, &
+    &           nc_dims_ids(3), nc_time_id)
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     endif
+     ncstat = nf_def_var (nc_outfile_id, Vvarin, NF_DOUBLE, 3,          &
+    &                    nc_dims_ids, nc_varV_id )
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     ncstat = nf_enddef(nc_outfile_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+
+     ncstat = nf_put_var_double(nc_outfile_id, nc_varV_id, Vfield2)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     if ( ndims > 2) then
+       ncstat = nf_put_var_double(nc_outfile_id, nc_time_id, time)
+       if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     endif
+     ncstat = nf_close(nc_outfile_id)
+     if (ncstat .ne. nf_noerr) call handle_err(ncstat)
+     deallocate(Ufield2,Vfield2,time,nc_dims_ids)
+
+end program rotateUVorca

+ 65 - 0
interpolation/scrip_sources/constants.f

@@ -0,0 +1,65 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module defines common constants used in many routines.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: constants.f,v 1.2 2000/04/19 21:56:25 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module constants
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod  ! defines common data types
+
+      implicit none
+
+      save
+
+!-----------------------------------------------------------------------
+
+      real (kind = dbl_kind), parameter :: 
+     &                        zero   = 0.0_dbl_kind,
+     &                        one    = 1.0_dbl_kind,
+     &                        two    = 2.0_dbl_kind,
+     &                        three  = 3.0_dbl_kind,
+     &                        four   = 4.0_dbl_kind,
+     &                        five   = 5.0_dbl_kind,
+     &                        half   = 0.5_dbl_kind,
+     &                        quart  = 0.25_dbl_kind,
+     &                        bignum = 1.e+20_dbl_kind,
+     &                        tiny   = 1.e-14_dbl_kind,
+     &                        pi     = 3.14159265359_dbl_kind,
+     &                        pi2    = two*pi,
+     &                        pih    = half*pi
+
+!-----------------------------------------------------------------------
+
+      end module constants
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 26 - 0
interpolation/scrip_sources/copyright

@@ -0,0 +1,26 @@
+!-----------------------------------------------------------------------
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!-----------------------------------------------------------------------

+ 831 - 0
interpolation/scrip_sources/grids.f

@@ -0,0 +1,831 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module reads in and initializes two grids for remapping.
+!     NOTE: grid1 must be the master grid -- the grid that determines
+!           which cells participate (e.g. land mask) and the fractional
+!           area of grid2 cells that participate in the remapping.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: grids.f,v 1.6 2001/08/21 21:06:41 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module grids
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines data types
+      use constants    ! common constants
+      use iounits      ! I/O unit manager
+      use netcdf_mod   ! netCDF stuff
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     variables that describe each grid
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), save ::
+     &             grid1_size, grid2_size, ! total points on each grid
+     &             grid1_rank, grid2_rank, ! rank of each grid
+     &             grid1_corners, grid2_corners ! number of corners
+                                                ! for each grid cell
+
+      integer (kind=int_kind), dimension(:), allocatable, save ::
+     &             grid1_dims, grid2_dims  ! size of each grid dimension
+
+      character(char_len), save :: 
+     &             grid1_name, grid2_name  ! name for each grid
+
+      character (char_len), save :: 
+     &             grid1_units, ! units for grid coords (degs/radians)
+     &             grid2_units  ! units for grid coords
+
+      real (kind=dbl_kind), parameter ::
+     &      deg2rad = pi/180.   ! conversion for deg to rads
+
+!-----------------------------------------------------------------------
+!
+!     grid coordinates and masks
+!
+!-----------------------------------------------------------------------
+
+      logical (kind=log_kind), dimension(:), allocatable, save ::
+     &             grid1_mask,        ! flag which cells participate
+     &             grid2_mask         ! flag which cells participate
+
+      real (kind=dbl_kind), dimension(:), allocatable, save ::
+     &             grid1_center_lat,  ! lat/lon coordinates for
+     &             grid1_center_lon,  ! each grid center in radians
+     &             grid2_center_lat, 
+     &             grid2_center_lon,
+     &             grid1_area,        ! tot area of each grid1 cell
+     &             grid2_area,        ! tot area of each grid2 cell
+     &             grid1_area_in,     ! area of grid1 cell from file
+     &             grid2_area_in,     ! area of grid2 cell from file
+     &             grid1_frac,        ! fractional area of grid cells
+     &             grid2_frac         ! participating in remapping
+
+      real (kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &             grid1_corner_lat,  ! lat/lon coordinates for
+     &             grid1_corner_lon,  ! each grid corner in radians
+     &             grid2_corner_lat, 
+     &             grid2_corner_lon
+
+      logical (kind=log_kind), save ::
+     &             luse_grid_centers ! use centers for bounding boxes
+     &,            luse_grid1_area   ! use area from grid file
+     &,            luse_grid2_area   ! use area from grid file
+
+      real (kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &             grid1_bound_box,  ! lat/lon bounding box for use
+     &             grid2_bound_box   ! in restricting grid searches
+
+!-----------------------------------------------------------------------
+!
+!     bins for restricting searches
+!
+!-----------------------------------------------------------------------
+
+      character (char_len), save ::
+     &        restrict_type  ! type of bins to use
+
+      integer (kind=int_kind), save ::
+     &        num_srch_bins  ! num of bins for restricted srch
+
+      integer (kind=int_kind), dimension(:,:), allocatable, save ::
+     &        bin_addr1, ! min,max adds for grid1 cells in this lat bin
+     &        bin_addr2  ! min,max adds for grid2 cells in this lat bin
+
+      real(kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &        bin_lats   ! min,max latitude for each search bin
+     &,       bin_lons   ! min,max longitude for each search bin
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine grid_init(grid1_file, grid2_file)
+
+!-----------------------------------------------------------------------
+!
+!     this routine reads grid info from grid files and makes any
+!     necessary changes (e.g. for 0,2pi longitude range)
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) :: 
+     &             grid1_file, grid2_file  ! grid data files
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: 
+     &  n      ! loop counter
+     &, nele   ! element loop counter
+     &, iunit  ! unit number for opening files
+     &, i,j    ! logical 2d addresses
+     &, ip1,jp1
+     &, n_add, e_add, ne_add
+     &, nx, ny
+
+      integer (kind=int_kind) :: 
+     &         ncstat,           ! netCDF status variable
+     &         nc_grid1_id,       ! netCDF grid file id
+     &         nc_grid2_id,       ! netCDF grid file id
+     &         nc_grid1size_id,   ! netCDF grid size dim id
+     &         nc_grid2size_id,   ! netCDF grid size dim id
+     &         nc_grid1corn_id,   ! netCDF grid corner dim id
+     &         nc_grid2corn_id,   ! netCDF grid corner dim id
+     &         nc_grid1rank_id,   ! netCDF grid rank dim id
+     &         nc_grid2rank_id,   ! netCDF grid rank dim id
+     &         nc_grid1area_id,   ! netCDF grid rank dim id
+     &         nc_grid2area_id,   ! netCDF grid rank dim id
+     &         nc_grid1dims_id,   ! netCDF grid dimension size id
+     &         nc_grid2dims_id,   ! netCDF grid dimension size id
+     &         nc_grd1imask_id,   ! netCDF grid imask var id
+     &         nc_grd2imask_id,   ! netCDF grid imask var id
+     &         nc_grd1crnrlat_id, ! netCDF grid corner lat var id
+     &         nc_grd2crnrlat_id, ! netCDF grid corner lat var id
+     &         nc_grd1crnrlon_id, ! netCDF grid corner lon var id
+     &         nc_grd2crnrlon_id, ! netCDF grid corner lon var id
+     &         nc_grd1cntrlat_id, ! netCDF grid center lat var id
+     &         nc_grd2cntrlat_id, ! netCDF grid center lat var id
+     &         nc_grd1cntrlon_id, ! netCDF grid center lon var id
+     &         nc_grd2cntrlon_id  ! netCDF grid center lon var id
+
+      integer (kind=int_kind), dimension(:), allocatable :: 
+     &                            imask ! integer mask read from file
+
+      real (kind=dbl_kind) :: 
+     &  dlat,dlon           ! lat/lon intervals for search bins
+
+      real (kind=dbl_kind), dimension(4) ::
+     &  tmp_lats, tmp_lons  ! temps for computing bounding boxes
+
+!-----------------------------------------------------------------------
+!
+!     open grid files and read grid size/name data
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_open(grid1_file, NF_NOWRITE, nc_grid1_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_open(grid2_file, NF_NOWRITE, nc_grid2_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid1_id, 'grid_size', nc_grid1size_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid1_id, nc_grid1size_id, grid1_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid2_id, 'grid_size', nc_grid2size_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid2_id, nc_grid2size_id, grid2_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid1_id, 'grid_rank', nc_grid1rank_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid1_id, nc_grid1rank_id, grid1_rank)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid2_id, 'grid_rank', nc_grid2rank_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid2_id, nc_grid2rank_id, grid2_rank)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid1_id,'grid_corners',nc_grid1corn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid1_id,nc_grid1corn_id,grid1_corners)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid2_id,'grid_corners',nc_grid2corn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid2_id,nc_grid2corn_id,grid2_corners)
+      call netcdf_error_handler(ncstat)
+
+      allocate( grid1_dims(grid1_rank),
+     &          grid2_dims(grid2_rank))
+
+      ncstat = nf_get_att_text(nc_grid1_id, nf_global, 'title',
+     &                         grid1_name)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text(nc_grid2_id, nf_global, 'title',
+     &                         grid2_name)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     allocate grid coordinates/masks and read data
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_mask      (grid1_size),
+     &          grid2_mask      (grid2_size),
+     &          grid1_center_lat(grid1_size), 
+     &          grid1_center_lon(grid1_size),
+     &          grid2_center_lat(grid2_size), 
+     &          grid2_center_lon(grid2_size),
+     &          grid1_area      (grid1_size),
+     &          grid2_area      (grid2_size),
+     &          grid1_frac      (grid1_size),
+     &          grid2_frac      (grid2_size),
+     &          grid1_corner_lat(grid1_corners, grid1_size),
+     &          grid1_corner_lon(grid1_corners, grid1_size),
+     &          grid2_corner_lat(grid2_corners, grid2_size),
+     &          grid2_corner_lon(grid2_corners, grid2_size),
+     &          grid1_bound_box (4            , grid1_size),
+     &          grid2_bound_box (4            , grid2_size))
+
+      allocate(imask(grid1_size))
+
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_dims', nc_grid1dims_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_imask', nc_grd1imask_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_center_lat', 
+     &                                   nc_grd1cntrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_center_lon', 
+     &                                   nc_grd1cntrlon_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_corner_lat', 
+     &                                   nc_grd1crnrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_corner_lon', 
+     &                                   nc_grd1crnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid1_id, nc_grid1dims_id, grid1_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid1_id, nc_grd1imask_id, imask)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1cntrlat_id, 
+     &                                       grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1cntrlon_id, 
+     &                                       grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1crnrlat_id, 
+     &                                       grid1_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1crnrlon_id, 
+     &                                       grid1_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (luse_grid1_area) then
+        allocate (grid1_area_in(grid1_size))
+        ncstat = nf_inq_varid(nc_grid1_id, 'grid_area', nc_grid1area_id)
+        call netcdf_error_handler(ncstat)
+        ncstat = nf_get_var_double(nc_grid1_id, nc_grid1area_id, 
+     &                                          grid1_area_in)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      grid1_area = zero
+      grid1_frac = zero
+
+!-----------------------------------------------------------------------
+!
+!     initialize logical mask and convert lat/lon units if required
+!
+!-----------------------------------------------------------------------
+
+      where (imask == 1)
+        grid1_mask = .true.
+      elsewhere
+        grid1_mask = .false.
+      endwhere
+      deallocate(imask)
+
+      grid1_units = ' '
+      ncstat = nf_get_att_text(nc_grid1_id, nc_grd1cntrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+
+        grid1_center_lat = grid1_center_lat*deg2rad
+        grid1_center_lon = grid1_center_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'unknown units supplied for grid1 center lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      grid1_units = ' '
+      ncstat = nf_get_att_text(nc_grid1_id, nc_grd1crnrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+
+        grid1_corner_lat = grid1_corner_lat*deg2rad
+        grid1_corner_lon = grid1_corner_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'unknown units supplied for grid1 corner lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      ncstat = nf_close(nc_grid1_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     read data for grid 2
+!
+!-----------------------------------------------------------------------
+
+      allocate(imask(grid2_size))
+
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_dims', nc_grid2dims_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_imask', nc_grd2imask_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_center_lat', 
+     &                                   nc_grd2cntrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_center_lon', 
+     &                                   nc_grd2cntrlon_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_corner_lat', 
+     &                                   nc_grd2crnrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_corner_lon', 
+     &                                   nc_grd2crnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid2_id, nc_grid2dims_id, grid2_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid2_id, nc_grd2imask_id, imask)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2cntrlat_id, 
+     &                                       grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2cntrlon_id, 
+     &                                       grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2crnrlat_id, 
+     &                                       grid2_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2crnrlon_id, 
+     &                                       grid2_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (luse_grid2_area) then
+        allocate (grid2_area_in(grid2_size))
+        ncstat = nf_inq_varid(nc_grid2_id, 'grid_area', nc_grid2area_id)
+        call netcdf_error_handler(ncstat)
+        ncstat = nf_get_var_double(nc_grid2_id, nc_grid2area_id, 
+     &                                          grid2_area_in)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      grid2_area = zero
+      grid2_frac = zero
+
+!-----------------------------------------------------------------------
+!
+!     initialize logical mask and convert lat/lon units if required
+!
+!-----------------------------------------------------------------------
+
+      where (imask == 1)
+        grid2_mask = .true.
+      elsewhere
+        grid2_mask = .false.
+      endwhere
+      deallocate(imask)
+
+      grid2_units = ' '
+      ncstat = nf_get_att_text(nc_grid2_id, nc_grd2cntrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+
+        grid2_center_lat = grid2_center_lat*deg2rad
+        grid2_center_lon = grid2_center_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'unknown units supplied for grid2 center lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      grid2_units = ' '
+      ncstat = nf_get_att_text(nc_grid2_id, nc_grd2crnrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+
+        grid2_corner_lat = grid2_corner_lat*deg2rad
+        grid2_corner_lon = grid2_corner_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'no units supplied for grid2 corner lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      ncstat = nf_close(nc_grid2_id)
+      call netcdf_error_handler(ncstat)
+
+
+!-----------------------------------------------------------------------
+!
+!     convert longitudes to 0,2pi interval
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_center_lon .gt. pi2)  grid1_center_lon =
+     &                                   grid1_center_lon - pi2
+      where (grid1_center_lon .lt. zero) grid1_center_lon =
+     &                                   grid1_center_lon + pi2
+      where (grid2_center_lon .gt. pi2)  grid2_center_lon =
+     &                                   grid2_center_lon - pi2
+      where (grid2_center_lon .lt. zero) grid2_center_lon =
+     &                                   grid2_center_lon + pi2
+      where (grid1_corner_lon .gt. pi2)  grid1_corner_lon =
+     &                                   grid1_corner_lon - pi2
+      where (grid1_corner_lon .lt. zero) grid1_corner_lon =
+     &                                   grid1_corner_lon + pi2
+      where (grid2_corner_lon .gt. pi2)  grid2_corner_lon =
+     &                                   grid2_corner_lon - pi2
+      where (grid2_corner_lon .lt. zero) grid2_corner_lon =
+     &                                   grid2_corner_lon + pi2
+
+!-----------------------------------------------------------------------
+!
+!     make sure input latitude range is within the machine values
+!     for +/- pi/2 
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_center_lat >  pih) grid1_center_lat =  pih
+      where (grid1_corner_lat >  pih) grid1_corner_lat =  pih
+      where (grid1_center_lat < -pih) grid1_center_lat = -pih
+      where (grid1_corner_lat < -pih) grid1_corner_lat = -pih
+
+      where (grid2_center_lat >  pih) grid2_center_lat =  pih
+      where (grid2_corner_lat >  pih) grid2_corner_lat =  pih
+      where (grid2_center_lat < -pih) grid2_center_lat = -pih
+      where (grid2_corner_lat < -pih) grid2_corner_lat = -pih
+
+!-----------------------------------------------------------------------
+!
+!     compute bounding boxes for restricting future grid searches
+!
+!-----------------------------------------------------------------------
+
+      if (.not. luse_grid_centers) then
+        grid1_bound_box(1,:) = minval(grid1_corner_lat, DIM=1)
+        grid1_bound_box(2,:) = maxval(grid1_corner_lat, DIM=1)
+        grid1_bound_box(3,:) = minval(grid1_corner_lon, DIM=1)
+        grid1_bound_box(4,:) = maxval(grid1_corner_lon, DIM=1)
+
+        grid2_bound_box(1,:) = minval(grid2_corner_lat, DIM=1)
+        grid2_bound_box(2,:) = maxval(grid2_corner_lat, DIM=1)
+        grid2_bound_box(3,:) = minval(grid2_corner_lon, DIM=1)
+        grid2_bound_box(4,:) = maxval(grid2_corner_lon, DIM=1)
+
+      else
+
+        nx = grid1_dims(1)
+        ny = grid1_dims(2)
+
+        do n=1,grid1_size
+
+          !*** find N,S and NE points to this grid point
+
+          j = (n - 1)/nx +1
+          i = n - (j-1)*nx
+
+          if (i < nx) then
+            ip1 = i + 1
+          else
+            !*** assume cyclic
+            ip1 = 1
+            !*** but if it is not, correct
+            e_add = (j - 1)*nx + ip1
+            if (abs(grid1_center_lat(e_add) - 
+     &              grid1_center_lat(n   )) > pih) then
+              ip1 = i
+            endif
+          endif
+
+          if (j < ny) then
+            jp1 = j+1
+          else
+            !*** assume cyclic
+            jp1 = 1
+            !*** but if it is not, correct
+            n_add = (jp1 - 1)*nx + i
+            if (abs(grid1_center_lat(n_add) - 
+     &              grid1_center_lat(n   )) > pih) then
+              jp1 = j
+            endif
+          endif
+
+          n_add = (jp1 - 1)*nx + i
+          e_add = (j - 1)*nx + ip1
+          ne_add = (jp1 - 1)*nx + ip1
+
+          !*** find N,S and NE lat/lon coords and check bounding box
+
+          tmp_lats(1) = grid1_center_lat(n)
+          tmp_lats(2) = grid1_center_lat(e_add)
+          tmp_lats(3) = grid1_center_lat(ne_add)
+          tmp_lats(4) = grid1_center_lat(n_add)
+
+          tmp_lons(1) = grid1_center_lon(n)
+          tmp_lons(2) = grid1_center_lon(e_add)
+          tmp_lons(3) = grid1_center_lon(ne_add)
+          tmp_lons(4) = grid1_center_lon(n_add)
+
+          grid1_bound_box(1,n) = minval(tmp_lats)
+          grid1_bound_box(2,n) = maxval(tmp_lats)
+          grid1_bound_box(3,n) = minval(tmp_lons)
+          grid1_bound_box(4,n) = maxval(tmp_lons)
+        end do
+
+        nx = grid2_dims(1)
+        ny = grid2_dims(2)
+
+        do n=1,grid2_size
+
+          !*** find N,S and NE points to this grid point
+
+          j = (n - 1)/nx +1
+          i = n - (j-1)*nx
+
+          if (i < nx) then
+            ip1 = i + 1
+          else
+            !*** assume cyclic
+            ip1 = 1
+            !*** but if it is not, correct
+            e_add = (j - 1)*nx + ip1
+            if (abs(grid2_center_lat(e_add) - 
+     &              grid2_center_lat(n   )) > pih) then
+              ip1 = i
+            endif
+          endif
+
+          if (j < ny) then
+            jp1 = j+1
+          else
+            !*** assume cyclic
+            jp1 = 1
+            !*** but if it is not, correct
+            n_add = (jp1 - 1)*nx + i
+            if (abs(grid2_center_lat(n_add) - 
+     &              grid2_center_lat(n   )) > pih) then
+              jp1 = j
+            endif
+          endif
+
+          n_add = (jp1 - 1)*nx + i
+          e_add = (j - 1)*nx + ip1
+          ne_add = (jp1 - 1)*nx + ip1
+
+          !*** find N,S and NE lat/lon coords and check bounding box
+
+          tmp_lats(1) = grid2_center_lat(n)
+          tmp_lats(2) = grid2_center_lat(e_add)
+          tmp_lats(3) = grid2_center_lat(ne_add)
+          tmp_lats(4) = grid2_center_lat(n_add)
+
+          tmp_lons(1) = grid2_center_lon(n)
+          tmp_lons(2) = grid2_center_lon(e_add)
+          tmp_lons(3) = grid2_center_lon(ne_add)
+          tmp_lons(4) = grid2_center_lon(n_add)
+
+          grid2_bound_box(1,n) = minval(tmp_lats)
+          grid2_bound_box(2,n) = maxval(tmp_lats)
+          grid2_bound_box(3,n) = minval(tmp_lons)
+          grid2_bound_box(4,n) = maxval(tmp_lons)
+        end do
+
+      endif
+
+      where (abs(grid1_bound_box(4,:) - grid1_bound_box(3,:)) > pi)
+        grid1_bound_box(3,:) = zero
+        grid1_bound_box(4,:) = pi2
+      end where
+
+      where (abs(grid2_bound_box(4,:) - grid2_bound_box(3,:)) > pi)
+        grid2_bound_box(3,:) = zero
+        grid2_bound_box(4,:) = pi2
+      end where
+
+      !***
+      !*** try to check for cells that overlap poles
+      !***
+
+      where (grid1_center_lat > grid1_bound_box(2,:))
+     &  grid1_bound_box(2,:) = pih
+
+      where (grid1_center_lat < grid1_bound_box(1,:))
+     &  grid1_bound_box(1,:) = -pih
+
+      where (grid2_center_lat > grid2_bound_box(2,:))
+     &  grid2_bound_box(2,:) = pih
+
+      where (grid2_center_lat < grid2_bound_box(1,:))
+     &  grid2_bound_box(1,:) = -pih
+
+!-----------------------------------------------------------------------
+!
+!     set up and assign address ranges to search bins in order to 
+!     further restrict later searches
+!
+!-----------------------------------------------------------------------
+
+      select case (restrict_type)
+
+      case ('latitude')
+        write(stdout,*) 'Using latitude bins to restrict search.'
+
+        allocate(bin_addr1(2,num_srch_bins))
+        allocate(bin_addr2(2,num_srch_bins))
+        allocate(bin_lats (2,num_srch_bins))
+        allocate(bin_lons (2,num_srch_bins))
+
+        dlat = pi/num_srch_bins
+
+        do n=1,num_srch_bins
+          bin_lats(1,n) = (n-1)*dlat - pih
+          bin_lats(2,n) =     n*dlat - pih
+          bin_lons(1,n) = zero
+          bin_lons(2,n) = pi2
+          bin_addr1(1,n) = grid1_size + 1
+          bin_addr1(2,n) = 0
+          bin_addr2(1,n) = grid2_size + 1
+          bin_addr2(2,n) = 0
+        end do
+
+        do nele=1,grid1_size
+          do n=1,num_srch_bins
+            if (grid1_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid1_bound_box(2,nele) >= bin_lats(1,n)) then
+              bin_addr1(1,n) = min(nele,bin_addr1(1,n))
+              bin_addr1(2,n) = max(nele,bin_addr1(2,n))
+            endif
+          end do
+        end do
+
+        do nele=1,grid2_size
+          do n=1,num_srch_bins
+            if (grid2_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid2_bound_box(2,nele) >= bin_lats(1,n)) then
+              bin_addr2(1,n) = min(nele,bin_addr2(1,n))
+              bin_addr2(2,n) = max(nele,bin_addr2(2,n))
+            endif
+          end do
+        end do
+
+      case ('latlon')
+        write(stdout,*) 'Using lat/lon boxes to restrict search.'
+
+        dlat = pi /num_srch_bins
+        dlon = pi2/num_srch_bins
+
+        allocate(bin_addr1(2,num_srch_bins*num_srch_bins))
+        allocate(bin_addr2(2,num_srch_bins*num_srch_bins))
+        allocate(bin_lats (2,num_srch_bins*num_srch_bins))
+        allocate(bin_lons (2,num_srch_bins*num_srch_bins))
+
+        n = 0
+        do j=1,num_srch_bins
+        do i=1,num_srch_bins
+          n = n + 1
+
+          bin_lats(1,n) = (j-1)*dlat - pih
+          bin_lats(2,n) =     j*dlat - pih
+          bin_lons(1,n) = (i-1)*dlon
+          bin_lons(2,n) =     i*dlon
+          bin_addr1(1,n) = grid1_size + 1
+          bin_addr1(2,n) = 0
+          bin_addr2(1,n) = grid2_size + 1
+          bin_addr2(2,n) = 0
+        end do
+        end do
+
+        num_srch_bins = num_srch_bins**2
+
+        do nele=1,grid1_size
+          do n=1,num_srch_bins
+            if (grid1_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid1_bound_box(2,nele) >= bin_lats(1,n) .and.
+     &          grid1_bound_box(3,nele) <= bin_lons(2,n) .and.
+     &          grid1_bound_box(4,nele) >= bin_lons(1,n)) then
+              bin_addr1(1,n) = min(nele,bin_addr1(1,n))
+              bin_addr1(2,n) = max(nele,bin_addr1(2,n))
+            endif
+          end do
+        end do
+
+        do nele=1,grid2_size
+          do n=1,num_srch_bins
+            if (grid2_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid2_bound_box(2,nele) >= bin_lats(1,n) .and.
+     &          grid2_bound_box(3,nele) <= bin_lons(2,n) .and.
+     &          grid2_bound_box(4,nele) >= bin_lons(1,n)) then
+              bin_addr2(1,n) = min(nele,bin_addr2(1,n))
+              bin_addr2(2,n) = max(nele,bin_addr2(2,n))
+            endif
+          end do
+        end do
+
+      case default
+        stop 'unknown search restriction method'
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine grid_init
+
+!***********************************************************************
+
+      end module grids
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+

+ 797 - 0
interpolation/scrip_sources/grids_one.f

@@ -0,0 +1,797 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module reads in and initializes two grids for remapping.
+!     NOTE: grid1 must be the master grid -- the grid that determines
+!           which cells participate (e.g. land mask) and the fractional
+!           area of grid2 cells that participate in the remapping.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: grids.f,v 1.6 2001/08/21 21:06:41 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module grids_one
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines data types
+      use constants    ! common constants
+      use iounits      ! I/O unit manager
+      use netcdf_mod   ! netCDF stuff
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     variables that describe each grid
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), save ::
+     &             grid1_size, grid2_size, ! total points on each grid
+     &             grid1_rank, grid2_rank, ! rank of each grid
+     &             grid1_corners, grid2_corners ! number of corners
+                                                ! for each grid cell
+
+      integer (kind=int_kind), dimension(:), allocatable, save ::
+     &             grid1_dims, grid2_dims  ! size of each grid dimension
+
+      character(char_len), save :: 
+     &             grid1_name, grid2_name  ! name for each grid
+
+      character (char_len), save :: 
+     &             grid1_units, ! units for grid coords (degs/radians)
+     &             grid2_units  ! units for grid coords
+
+      real (kind=dbl_kind), parameter ::
+     &      deg2rad = pi/180.   ! conversion for deg to rads
+
+!-----------------------------------------------------------------------
+!
+!     grid coordinates and masks
+!
+!-----------------------------------------------------------------------
+
+      logical (kind=log_kind), dimension(:), allocatable, save ::
+     &             grid1_mask,        ! flag which cells participate
+     &             grid2_mask         ! flag which cells participate
+
+      real (kind=dbl_kind), dimension(:), allocatable, save ::
+     &             grid1_center_lat,  ! lat/lon coordinates for
+     &             grid1_center_lon,  ! each grid center in radians
+     &             grid2_center_lat, 
+     &             grid2_center_lon,
+     &             grid1_area,        ! tot area of each grid1 cell
+     &             grid2_area,        ! tot area of each grid2 cell
+     &             grid1_area_in,     ! area of grid1 cell from file
+     &             grid2_area_in,     ! area of grid2 cell from file
+     &             grid1_frac,        ! fractional area of grid cells
+     &             grid2_frac         ! participating in remapping
+
+      real (kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &             grid1_corner_lat,  ! lat/lon coordinates for
+     &             grid1_corner_lon,  ! each grid corner in radians
+     &             grid2_corner_lat, 
+     &             grid2_corner_lon
+
+      logical (kind=log_kind), save ::
+     &             luse_grid_centers ! use centers for bounding boxes
+     &,            luse_grid1_area   ! use area from grid file
+     &,            luse_grid2_area   ! use area from grid file
+
+      real (kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &             grid1_bound_box,  ! lat/lon bounding box for use
+     &             grid2_bound_box   ! in restricting grid searches
+
+!-----------------------------------------------------------------------
+!
+!     bins for restricting searches
+!
+!-----------------------------------------------------------------------
+
+      character (char_len), save ::
+     &        restrict_type  ! type of bins to use
+
+      integer (kind=int_kind), save ::
+     &        num_srch_bins  ! num of bins for restricted srch
+
+      integer (kind=int_kind), dimension(:,:), allocatable, save ::
+     &        bin_addr1, ! min,max adds for grid1 cells in this lat bin
+     &        bin_addr2  ! min,max adds for grid2 cells in this lat bin
+
+      real(kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &        bin_lats   ! min,max latitude for each search bin
+     &,       bin_lons   ! min,max longitude for each search bin
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine grid_init_one(grid_file)
+
+!-----------------------------------------------------------------------
+!
+!     this routine reads grid info from grid files and makes any
+!     necessary changes (e.g. for 0,2pi longitude range)
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) :: 
+     &             grid_file  ! grid data files
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: 
+     &  n      ! loop counter
+     &, nele   ! element loop counter
+     &, iunit  ! unit number for opening files
+     &, i,j    ! logical 2d addresses
+     &, ip1,jp1
+     &, n_add, e_add, ne_add
+     &, nx, ny
+
+      integer (kind=int_kind) :: 
+     &         ncstat,           ! netCDF status variable
+     &         nc_grid_id,       ! netCDF grid file id
+     &         nc_gridsize_id,   ! netCDF grid size dim id
+     &         nc_gridcorn_id,   ! netCDF grid corner dim id
+     &         nc_gridrank_id,   ! netCDF grid rank dim id
+     &         nc_gridarea_id,   ! netCDF grid rank dim id
+     &         nc_griddims_id,   ! netCDF grid dimension size id
+     &         nc_grdmask_id,   ! netCDF grid imask var id
+     &         nc_grdcrnrlat_id, ! netCDF grid corner lat var id
+     &         nc_grdcrnrlon_id, ! netCDF grid corner lon var id
+     &         nc_grdcntrlat_id, ! netCDF grid center lat var id
+     &         nc_grdcntrlon_id, ! netCDF grid center lon var id
+
+      integer (kind=int_kind), dimension(:), allocatable :: 
+     &                            imask ! integer mask read from file
+
+      real (kind=dbl_kind) :: 
+     &  dlat,dlon           ! lat/lon intervals for search bins
+
+      real (kind=dbl_kind), dimension(4) ::
+     &  tmp_lats, tmp_lons  ! temps for computing bounding boxes
+
+!-----------------------------------------------------------------------
+!
+!     open grid files and read grid size/name data
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_open(grid1_file, NF_NOWRITE, nc_grid_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid_id, 'grid_size', nc_gridsize_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid_id, nc_gridsize_id, grid_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_grid2_id,'grid_corners',nc_grid2corn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_grid2_id,nc_grid2corn_id,grid2_corners)
+      call netcdf_error_handler(ncstat)
+
+      allocate( grid1_dims(grid1_rank),
+     &          grid2_dims(grid2_rank))
+
+      ncstat = nf_get_att_text(nc_grid1_id, nf_global, 'title',
+     &                         grid1_name)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text(nc_grid2_id, nf_global, 'title',
+     &                         grid2_name)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     allocate grid coordinates/masks and read data
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_mask      (grid1_size),
+     &          grid2_mask      (grid2_size),
+     &          grid1_center_lat(grid1_size), 
+     &          grid1_center_lon(grid1_size),
+     &          grid2_center_lat(grid2_size), 
+     &          grid2_center_lon(grid2_size),
+     &          grid1_area      (grid1_size),
+     &          grid2_area      (grid2_size),
+     &          grid1_frac      (grid1_size),
+     &          grid2_frac      (grid2_size),
+     &          grid1_corner_lat(grid1_corners, grid1_size),
+     &          grid1_corner_lon(grid1_corners, grid1_size),
+     &          grid2_corner_lat(grid2_corners, grid2_size),
+     &          grid2_corner_lon(grid2_corners, grid2_size),
+     &          grid1_bound_box (4            , grid1_size),
+     &          grid2_bound_box (4            , grid2_size))
+
+      allocate(imask(grid1_size))
+
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_dims', nc_grid1dims_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_imask', nc_grd1imask_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_center_lat', 
+     &                                   nc_grd1cntrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_center_lon', 
+     &                                   nc_grd1cntrlon_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_corner_lat', 
+     &                                   nc_grd1crnrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid1_id, 'grid_corner_lon', 
+     &                                   nc_grd1crnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid1_id, nc_grid1dims_id, grid1_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid1_id, nc_grd1imask_id, imask)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1cntrlat_id, 
+     &                                       grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1cntrlon_id, 
+     &                                       grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1crnrlat_id, 
+     &                                       grid1_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid1_id, nc_grd1crnrlon_id, 
+     &                                       grid1_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (luse_grid1_area) then
+        allocate (grid1_area_in(grid1_size))
+        ncstat = nf_inq_varid(nc_grid1_id, 'grid_area', nc_grid1area_id)
+        call netcdf_error_handler(ncstat)
+        ncstat = nf_get_var_double(nc_grid1_id, nc_grid1area_id, 
+     &                                          grid1_area_in)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      grid1_area = zero
+      grid1_frac = zero
+
+!-----------------------------------------------------------------------
+!
+!     initialize logical mask and convert lat/lon units if required
+!
+!-----------------------------------------------------------------------
+
+      where (imask == 1)
+        grid1_mask = .true.
+      elsewhere
+        grid1_mask = .false.
+      endwhere
+      deallocate(imask)
+
+      grid1_units = ' '
+      ncstat = nf_get_att_text(nc_grid1_id, nc_grd1cntrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+
+        grid1_center_lat = grid1_center_lat*deg2rad
+        grid1_center_lon = grid1_center_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'unknown units supplied for grid1 center lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      grid1_units = ' '
+      ncstat = nf_get_att_text(nc_grid1_id, nc_grd1crnrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+
+        grid1_corner_lat = grid1_corner_lat*deg2rad
+        grid1_corner_lon = grid1_corner_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'unknown units supplied for grid1 corner lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      ncstat = nf_close(nc_grid1_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     read data for grid 2
+!
+!-----------------------------------------------------------------------
+
+      allocate(imask(grid2_size))
+
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_dims', nc_grid2dims_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_imask', nc_grd2imask_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_center_lat', 
+     &                                   nc_grd2cntrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_center_lon', 
+     &                                   nc_grd2cntrlon_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_corner_lat', 
+     &                                   nc_grd2crnrlat_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_varid(nc_grid2_id, 'grid_corner_lon', 
+     &                                   nc_grd2crnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid2_id, nc_grid2dims_id, grid2_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_grid2_id, nc_grd2imask_id, imask)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2cntrlat_id, 
+     &                                       grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2cntrlon_id, 
+     &                                       grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2crnrlat_id, 
+     &                                       grid2_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_grid2_id, nc_grd2crnrlon_id, 
+     &                                       grid2_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (luse_grid2_area) then
+        allocate (grid2_area_in(grid2_size))
+        ncstat = nf_inq_varid(nc_grid2_id, 'grid_area', nc_grid2area_id)
+        call netcdf_error_handler(ncstat)
+        ncstat = nf_get_var_double(nc_grid2_id, nc_grid2area_id, 
+     &                                          grid2_area_in)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      grid2_area = zero
+      grid2_frac = zero
+
+!-----------------------------------------------------------------------
+!
+!     initialize logical mask and convert lat/lon units if required
+!
+!-----------------------------------------------------------------------
+
+      where (imask == 1)
+        grid2_mask = .true.
+      elsewhere
+        grid2_mask = .false.
+      endwhere
+      deallocate(imask)
+
+      grid2_units = ' '
+      ncstat = nf_get_att_text(nc_grid2_id, nc_grd2cntrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+
+        grid2_center_lat = grid2_center_lat*deg2rad
+        grid2_center_lon = grid2_center_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'unknown units supplied for grid2 center lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      grid2_units = ' '
+      ncstat = nf_get_att_text(nc_grid2_id, nc_grd2crnrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+
+        grid2_corner_lat = grid2_corner_lat*deg2rad
+        grid2_corner_lon = grid2_corner_lon*deg2rad
+
+      case ('radians')
+
+        !*** no conversion necessary
+
+      case default
+
+        print *,'no units supplied for grid2 corner lat/lon: '
+        print *,'proceeding assuming radians'
+
+      end select
+
+      ncstat = nf_close(nc_grid2_id)
+      call netcdf_error_handler(ncstat)
+
+
+!-----------------------------------------------------------------------
+!
+!     convert longitudes to 0,2pi interval
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_center_lon .gt. pi2)  grid1_center_lon =
+     &                                   grid1_center_lon - pi2
+      where (grid1_center_lon .lt. zero) grid1_center_lon =
+     &                                   grid1_center_lon + pi2
+      where (grid2_center_lon .gt. pi2)  grid2_center_lon =
+     &                                   grid2_center_lon - pi2
+      where (grid2_center_lon .lt. zero) grid2_center_lon =
+     &                                   grid2_center_lon + pi2
+      where (grid1_corner_lon .gt. pi2)  grid1_corner_lon =
+     &                                   grid1_corner_lon - pi2
+      where (grid1_corner_lon .lt. zero) grid1_corner_lon =
+     &                                   grid1_corner_lon + pi2
+      where (grid2_corner_lon .gt. pi2)  grid2_corner_lon =
+     &                                   grid2_corner_lon - pi2
+      where (grid2_corner_lon .lt. zero) grid2_corner_lon =
+     &                                   grid2_corner_lon + pi2
+
+!-----------------------------------------------------------------------
+!
+!     make sure input latitude range is within the machine values
+!     for +/- pi/2 
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_center_lat >  pih) grid1_center_lat =  pih
+      where (grid1_corner_lat >  pih) grid1_corner_lat =  pih
+      where (grid1_center_lat < -pih) grid1_center_lat = -pih
+      where (grid1_corner_lat < -pih) grid1_corner_lat = -pih
+
+      where (grid2_center_lat >  pih) grid2_center_lat =  pih
+      where (grid2_corner_lat >  pih) grid2_corner_lat =  pih
+      where (grid2_center_lat < -pih) grid2_center_lat = -pih
+      where (grid2_corner_lat < -pih) grid2_corner_lat = -pih
+
+!-----------------------------------------------------------------------
+!
+!     compute bounding boxes for restricting future grid searches
+!
+!-----------------------------------------------------------------------
+
+      if (.not. luse_grid_centers) then
+        grid1_bound_box(1,:) = minval(grid1_corner_lat, DIM=1)
+        grid1_bound_box(2,:) = maxval(grid1_corner_lat, DIM=1)
+        grid1_bound_box(3,:) = minval(grid1_corner_lon, DIM=1)
+        grid1_bound_box(4,:) = maxval(grid1_corner_lon, DIM=1)
+
+        grid2_bound_box(1,:) = minval(grid2_corner_lat, DIM=1)
+        grid2_bound_box(2,:) = maxval(grid2_corner_lat, DIM=1)
+        grid2_bound_box(3,:) = minval(grid2_corner_lon, DIM=1)
+        grid2_bound_box(4,:) = maxval(grid2_corner_lon, DIM=1)
+
+      else
+
+        nx = grid1_dims(1)
+        ny = grid1_dims(2)
+
+        do n=1,grid1_size
+
+          !*** find N,S and NE points to this grid point
+
+          j = (n - 1)/nx +1
+          i = n - (j-1)*nx
+
+          if (i < nx) then
+            ip1 = i + 1
+          else
+            !*** assume cyclic
+            ip1 = 1
+            !*** but if it is not, correct
+            e_add = (j - 1)*nx + ip1
+            if (abs(grid1_center_lat(e_add) - 
+     &              grid1_center_lat(n   )) > pih) then
+              ip1 = i
+            endif
+          endif
+
+          if (j < ny) then
+            jp1 = j+1
+          else
+            !*** assume cyclic
+            jp1 = 1
+            !*** but if it is not, correct
+            n_add = (jp1 - 1)*nx + i
+            if (abs(grid1_center_lat(n_add) - 
+     &              grid1_center_lat(n   )) > pih) then
+              jp1 = j
+            endif
+          endif
+
+          n_add = (jp1 - 1)*nx + i
+          e_add = (j - 1)*nx + ip1
+          ne_add = (jp1 - 1)*nx + ip1
+
+          !*** find N,S and NE lat/lon coords and check bounding box
+
+          tmp_lats(1) = grid1_center_lat(n)
+          tmp_lats(2) = grid1_center_lat(e_add)
+          tmp_lats(3) = grid1_center_lat(ne_add)
+          tmp_lats(4) = grid1_center_lat(n_add)
+
+          tmp_lons(1) = grid1_center_lon(n)
+          tmp_lons(2) = grid1_center_lon(e_add)
+          tmp_lons(3) = grid1_center_lon(ne_add)
+          tmp_lons(4) = grid1_center_lon(n_add)
+
+          grid1_bound_box(1,n) = minval(tmp_lats)
+          grid1_bound_box(2,n) = maxval(tmp_lats)
+          grid1_bound_box(3,n) = minval(tmp_lons)
+          grid1_bound_box(4,n) = maxval(tmp_lons)
+        end do
+
+        nx = grid2_dims(1)
+        ny = grid2_dims(2)
+
+        do n=1,grid2_size
+
+          !*** find N,S and NE points to this grid point
+
+          j = (n - 1)/nx +1
+          i = n - (j-1)*nx
+
+          if (i < nx) then
+            ip1 = i + 1
+          else
+            !*** assume cyclic
+            ip1 = 1
+            !*** but if it is not, correct
+            e_add = (j - 1)*nx + ip1
+            if (abs(grid2_center_lat(e_add) - 
+     &              grid2_center_lat(n   )) > pih) then
+              ip1 = i
+            endif
+          endif
+
+          if (j < ny) then
+            jp1 = j+1
+          else
+            !*** assume cyclic
+            jp1 = 1
+            !*** but if it is not, correct
+            n_add = (jp1 - 1)*nx + i
+            if (abs(grid2_center_lat(n_add) - 
+     &              grid2_center_lat(n   )) > pih) then
+              jp1 = j
+            endif
+          endif
+
+          n_add = (jp1 - 1)*nx + i
+          e_add = (j - 1)*nx + ip1
+          ne_add = (jp1 - 1)*nx + ip1
+
+          !*** find N,S and NE lat/lon coords and check bounding box
+
+          tmp_lats(1) = grid2_center_lat(n)
+          tmp_lats(2) = grid2_center_lat(e_add)
+          tmp_lats(3) = grid2_center_lat(ne_add)
+          tmp_lats(4) = grid2_center_lat(n_add)
+
+          tmp_lons(1) = grid2_center_lon(n)
+          tmp_lons(2) = grid2_center_lon(e_add)
+          tmp_lons(3) = grid2_center_lon(ne_add)
+          tmp_lons(4) = grid2_center_lon(n_add)
+
+          grid2_bound_box(1,n) = minval(tmp_lats)
+          grid2_bound_box(2,n) = maxval(tmp_lats)
+          grid2_bound_box(3,n) = minval(tmp_lons)
+          grid2_bound_box(4,n) = maxval(tmp_lons)
+        end do
+
+      endif
+
+      where (abs(grid1_bound_box(4,:) - grid1_bound_box(3,:)) > pi)
+        grid1_bound_box(3,:) = zero
+        grid1_bound_box(4,:) = pi2
+      end where
+
+      where (abs(grid2_bound_box(4,:) - grid2_bound_box(3,:)) > pi)
+        grid2_bound_box(3,:) = zero
+        grid2_bound_box(4,:) = pi2
+      end where
+
+      !***
+      !*** try to check for cells that overlap poles
+      !***
+
+      where (grid1_center_lat > grid1_bound_box(2,:))
+     &  grid1_bound_box(2,:) = pih
+
+      where (grid1_center_lat < grid1_bound_box(1,:))
+     &  grid1_bound_box(1,:) = -pih
+
+      where (grid2_center_lat > grid2_bound_box(2,:))
+     &  grid2_bound_box(2,:) = pih
+
+      where (grid2_center_lat < grid2_bound_box(1,:))
+     &  grid2_bound_box(1,:) = -pih
+
+!-----------------------------------------------------------------------
+!
+!     set up and assign address ranges to search bins in order to 
+!     further restrict later searches
+!
+!-----------------------------------------------------------------------
+
+      select case (restrict_type)
+
+      case ('latitude')
+        write(stdout,*) 'Using latitude bins to restrict search.'
+
+        allocate(bin_addr1(2,num_srch_bins))
+        allocate(bin_addr2(2,num_srch_bins))
+        allocate(bin_lats (2,num_srch_bins))
+        allocate(bin_lons (2,num_srch_bins))
+
+        dlat = pi/num_srch_bins
+
+        do n=1,num_srch_bins
+          bin_lats(1,n) = (n-1)*dlat - pih
+          bin_lats(2,n) =     n*dlat - pih
+          bin_lons(1,n) = zero
+          bin_lons(2,n) = pi2
+          bin_addr1(1,n) = grid1_size + 1
+          bin_addr1(2,n) = 0
+          bin_addr2(1,n) = grid2_size + 1
+          bin_addr2(2,n) = 0
+        end do
+
+        do nele=1,grid1_size
+          do n=1,num_srch_bins
+            if (grid1_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid1_bound_box(2,nele) >= bin_lats(1,n)) then
+              bin_addr1(1,n) = min(nele,bin_addr1(1,n))
+              bin_addr1(2,n) = max(nele,bin_addr1(2,n))
+            endif
+          end do
+        end do
+
+        do nele=1,grid2_size
+          do n=1,num_srch_bins
+            if (grid2_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid2_bound_box(2,nele) >= bin_lats(1,n)) then
+              bin_addr2(1,n) = min(nele,bin_addr2(1,n))
+              bin_addr2(2,n) = max(nele,bin_addr2(2,n))
+            endif
+          end do
+        end do
+
+      case ('latlon')
+        write(stdout,*) 'Using lat/lon boxes to restrict search.'
+
+        dlat = pi /num_srch_bins
+        dlon = pi2/num_srch_bins
+
+        allocate(bin_addr1(2,num_srch_bins*num_srch_bins))
+        allocate(bin_addr2(2,num_srch_bins*num_srch_bins))
+        allocate(bin_lats (2,num_srch_bins*num_srch_bins))
+        allocate(bin_lons (2,num_srch_bins*num_srch_bins))
+
+        n = 0
+        do j=1,num_srch_bins
+        do i=1,num_srch_bins
+          n = n + 1
+
+          bin_lats(1,n) = (j-1)*dlat - pih
+          bin_lats(2,n) =     j*dlat - pih
+          bin_lons(1,n) = (i-1)*dlon
+          bin_lons(2,n) =     i*dlon
+          bin_addr1(1,n) = grid1_size + 1
+          bin_addr1(2,n) = 0
+          bin_addr2(1,n) = grid2_size + 1
+          bin_addr2(2,n) = 0
+        end do
+        end do
+
+        num_srch_bins = num_srch_bins**2
+
+        do nele=1,grid1_size
+          do n=1,num_srch_bins
+            if (grid1_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid1_bound_box(2,nele) >= bin_lats(1,n) .and.
+     &          grid1_bound_box(3,nele) <= bin_lons(2,n) .and.
+     &          grid1_bound_box(4,nele) >= bin_lons(1,n)) then
+              bin_addr1(1,n) = min(nele,bin_addr1(1,n))
+              bin_addr1(2,n) = max(nele,bin_addr1(2,n))
+            endif
+          end do
+        end do
+
+        do nele=1,grid2_size
+          do n=1,num_srch_bins
+            if (grid2_bound_box(1,nele) <= bin_lats(2,n) .and.
+     &          grid2_bound_box(2,nele) >= bin_lats(1,n) .and.
+     &          grid2_bound_box(3,nele) <= bin_lons(2,n) .and.
+     &          grid2_bound_box(4,nele) >= bin_lons(1,n)) then
+              bin_addr2(1,n) = min(nele,bin_addr2(1,n))
+              bin_addr2(2,n) = max(nele,bin_addr2(2,n))
+            endif
+          end do
+        end do
+
+      case default
+        stop 'unknown search restriction method'
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine grid_init
+
+!***********************************************************************
+
+      end module grids
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+

+ 154 - 0
interpolation/scrip_sources/iounits.f

@@ -0,0 +1,154 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module is a dynamic I/O unit manager.  It keeps track of
+!     which units are in use and reserves units for stdin, stdout, and
+!     stderr.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: iounits.f,v 1.2 2000/04/19 21:56:25 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module iounits
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod   ! defines data types
+
+      implicit none
+
+!-----------------------------------------------------------------------
+
+      logical (kind=log_kind), dimension(99), save ::
+     &    unit_free   ! flags to determine whether unit is free for use
+
+      integer (kind=int_kind), parameter ::
+     &    stdin  = 5, ! reserves unit for standard input
+     &    stdout = 6, ! reserves unit for standard output
+     &    stderr = 6  ! reserves unit for standard error
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine get_unit(iunit)
+
+!-----------------------------------------------------------------------
+!
+!     This routine returns the next available I/O unit number.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     output variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(out) ::
+     &     iunit   ! next free I/O unit
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n
+
+      logical (kind=log_kind), save :: first_call = .true.
+
+!-----------------------------------------------------------------------
+!
+!     if this is the first call, reserve stdout, stdin and stderr
+!
+!-----------------------------------------------------------------------
+
+      if (first_call) then
+        unit_free = .true.
+        unit_free(stdin)  = .false.
+        unit_free(stdout) = .false.
+        unit_free(stderr) = .false.
+        first_call = .false.
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     search for next available unit
+!
+!-----------------------------------------------------------------------
+
+      srch_unit: do n=1,99
+        if (unit_free(n)) then
+          iunit = n
+          unit_free(n) = .false.
+          exit srch_unit
+        endif
+      end do srch_unit
+
+!-----------------------------------------------------------------------
+
+      end subroutine get_unit
+
+!***********************************************************************
+
+      subroutine release_unit(iunit)
+
+!-----------------------------------------------------------------------
+!
+!     This routine releases the specified unit and closes the file.
+!
+!-----------------------------------------------------------------------
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &     iunit   ! I/O unit to release
+
+!-----------------------------------------------------------------------
+!
+!     closes I/O unit and declares it free
+!
+!-----------------------------------------------------------------------
+
+      unit_free(iunit) = .true.
+      close(iunit)
+
+!-----------------------------------------------------------------------
+
+      end subroutine release_unit
+
+!***********************************************************************
+
+      end module iounits
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 53 - 0
interpolation/scrip_sources/kinds_mod.f

@@ -0,0 +1,53 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module defines the F90 kind parameter for common data types.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: kinds_mod.f,v 1.2 2000/04/19 21:56:25 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module kinds_mod
+
+!-----------------------------------------------------------------------
+
+      implicit none
+      save
+
+!-----------------------------------------------------------------------
+
+      integer, parameter :: char_len  = 100,
+     &                      int_kind  = kind(1),
+     &                      log_kind  = kind(.true.),
+     &                      real_kind = selected_real_kind(6),
+     &                      dbl_kind  = selected_real_kind(13)
+
+!-----------------------------------------------------------------------
+
+      end module kinds_mod
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 146 - 0
interpolation/scrip_sources/makefile

@@ -0,0 +1,146 @@
+#!/bin/csh
+#
+# Makefile for interpolation code
+#
+# CVS:$Id: makefile,v 1.7 2000/04/19 21:46:44 pwjones Exp $
+#
+COMPILE = gfortran
+FLAGS   = -O3 -I/usr/include -L/usr/lib
+LIB     =  -lnetcdf -lnetcdff  
+INCLUDE = 
+SRCDIR  = .
+EXEDIR  = ..
+OBJSET  = \
+	kinds_mod.o \
+	constants.o \
+	iounits.o \
+	netcdf.o \
+	grids.o \
+	remap_vars.o \
+	remap_distwgt.o \
+	remap_conserv.o \
+	remap_bilinear.o \
+	remap_bicubic.o \
+	timers.o \
+	remap_write.o \
+	scrip.o
+
+OBJTEST  = \
+	kinds_mod.o \
+	constants.o \
+	iounits.o \
+	netcdf.o \
+	grids.o \
+	timers.o \
+	remap_vars.o \
+	remap_read.o \
+    read_input_file.o \
+	remap.o
+
+all: $(EXEDIR)/scrip $(EXEDIR)/scrip_test $(EXEDIR)/scrip_use $(EXEDIR)/scrip_use_extrap 
+
+$(EXEDIR)/scrip: $(OBJSET)
+	$(COMPILE) $(FLAGS) $(OBJSET) $(LIB) -o $(EXEDIR)/scrip
+
+$(EXEDIR)/scrip_test: $(OBJTEST) scrip_test.o
+	$(COMPILE) $(FLAGS) $(OBJTEST) scrip_test.o $(LIB) \
+	-o $(EXEDIR)/scrip_test
+
+$(EXEDIR)/scrip_use: $(OBJTEST) scrip_use.o
+	$(COMPILE) $(FLAGS) $(OBJTEST) scrip_use.o $(LIB) \
+	-o $(EXEDIR)/scrip_use
+
+$(EXEDIR)/scrip_use_extrap: $(OBJTEST) scrip_use_extrap.o
+	$(COMPILE) $(FLAGS) $(OBJTEST) scrip_use_extrap.o $(LIB) \
+	-o $(EXEDIR)/scrip_use_extrap
+
+kinds_mod.o: $(SRCDIR)/kinds_mod.f $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/kinds_mod.f
+
+constants.o: $(SRCDIR)/constants.f kinds_mod.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/constants.f
+
+iounits.o: $(SRCDIR)/iounits.f kinds_mod.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/iounits.f
+
+netcdf.o: $(SRCDIR)/netcdf.f kinds_mod.o constants.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/netcdf.f
+
+grids.o: $(SRCDIR)/grids.f kinds_mod.o constants.o iounits.o netcdf.o \
+	$(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/grids.f
+
+remap_vars.o: $(SRCDIR)/remap_vars.f kinds_mod.o constants.o grids.o \
+	$(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_vars.f
+
+remap_conserv.o: $(SRCDIR)/remap_conserv.f kinds_mod.o constants.o \
+		timers.o remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_conserv.f
+
+remap_distwgt.o: $(SRCDIR)/remap_distwgt.f kinds_mod.o constants.o \
+		remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_distwgt.f
+
+remap_bilinear.o: $(SRCDIR)/remap_bilinear.f kinds_mod.o constants.o \
+		remap_vars.o grids.o timers.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_bilinear.f
+
+remap_bicubic.o: $(SRCDIR)/remap_bicubic.f kinds_mod.o constants.o \
+		remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_bicubic.f
+
+timers.o: $(SRCDIR)/timers.f kinds_mod.o constants.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/timers.f
+
+remap_write.o: $(SRCDIR)/remap_write.f kinds_mod.o constants.o \
+		netcdf.o remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_write.f
+
+remap_read.o: $(SRCDIR)/remap_read.f kinds_mod.o constants.o netcdf.o \
+		remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap_read.f
+
+read_input_file.o: $(SRCDIR)/read_input_file.f kinds_mod.o constants.o netcdf.o \
+		remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/read_input_file.f
+
+remap.o: $(SRCDIR)/remap.f kinds_mod.o constants.o 
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/remap.f
+
+scrip.o: $(SRCDIR)/scrip.f kinds_mod.o constants.o iounits.o timers.o \
+		remap_vars.o grids.o remap_conserv.o remap_distwgt.o \
+		remap_bilinear.o remap_bicubic.o remap_write.o \
+		$(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/scrip.f
+
+scrip_test.o: $(SRCDIR)/scrip_test.f kinds_mod.o constants.o iounits.o \
+		netcdf.o remap_vars.o grids.o remap.o remap_read.o \
+		$(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/scrip_test.f
+
+scrip_use.o: $(SRCDIR)/scrip_use.f kinds_mod.o constants.o iounits.o \
+                netcdf.o remap_vars.o grids.o remap.o \
+		remap_read.o read_input_file.o \
+                $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/scrip_use.f
+
+scrip_use_extrap.o: $(SRCDIR)/scrip_use_extrap.f kinds_mod.o \
+                constants.o iounits.o \
+                netcdf.o remap_vars.o grids.o remap.o \
+                remap_read.o read_input_file.o \
+                $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/scrip_use_extrap.f
+
+
+scripshape.o: $(SRCDIR)/scripshape.F90 $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/scripshape.F90
+
+scrip_test_repeat.o: $(SRCDIR)/scrip_test_repeat.f kinds_mod.o \
+		constants.o netcdf.o \
+		iounits.o remap_vars.o grids.o $(INCLUDE)
+	$(COMPILE) $(FLAGS) -c $(SRCDIR)/scrip_test_repeat.f
+
+clean: 
+	/bin/rm *.o *.mod
+

+ 79 - 0
interpolation/scrip_sources/netcdf.f

@@ -0,0 +1,79 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module contains the netCDF include file and a netcdf error
+!     handling routine.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: netcdf.f,v 1.2 2000/04/19 21:56:25 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module netcdf_mod
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod
+      use constants
+
+      implicit none
+
+      include 'netcdf.inc'
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine netcdf_error_handler(istat)
+
+!-----------------------------------------------------------------------
+!
+!     This routine provides a simple interface to netCDF error message
+!     routine.
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) :: 
+     &    istat   ! integer status returned by netCDF function call
+
+!-----------------------------------------------------------------------
+
+      if (istat /= NF_NOERR) then
+        print *,'Error in netCDF: ',nf_strerror(istat)
+        stop
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine netcdf_error_handler
+
+!***********************************************************************
+
+      end module netcdf_mod
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 276 - 0
interpolation/scrip_sources/read_input_file.f

@@ -0,0 +1,276 @@
+
+      module read_input_file
+
+!-----------------------------------------------------------------------
+!
+!     contains routines for reading a file to interpolate
+!
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines useful constants
+      use grids         ! includes all grid information
+      use netcdf_mod    ! module with netcdf vars and utilities
+      use remap_vars    ! module for all required remapping variables
+
+      implicit none
+
+      real (kind=dbl_kind), dimension(:,:), allocatable ::
+     &    grid1_array
+
+      integer (kind=int_kind) ::
+     &    ntime, nlat, nlon, natts, nvaratts, nglobatts
+
+      integer (kind=int_kind) ::
+     &    nc_infile_id, nc_time_id, nc_invar_id, dimtime
+
+      real (kind=dbl_kind), dimension(:), allocatable ::
+     &    time
+
+      logical :: invertlat
+
+
+      contains
+
+!***********************************************************************
+
+      subroutine read_input(infile, varname)
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) ::
+     &    infile, varname       ! input filename an varname
+
+!-----------------------------------------------------------------------
+    
+      character(char_len) ::
+     &    timename, name2    
+
+      real (kind=dbl_kind), dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp
+
+      integer, dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp1 
+ 
+      real, dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp2
+
+      double precision, dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp3
+
+      integer, dimension(:), allocatable ::
+     &    time1
+
+      real, dimension(:), allocatable ::
+     &    time2
+
+      double precision, dimension(:), allocatable ::
+     &    time3
+
+
+      integer (kind=int_kind) :: 
+     &    ncstat, nc_var_type, ndims, ji
+
+      integer (kind=int_kind), dimension(:), allocatable :: ! netCDF ids
+     &    nc_dims_ids
+
+!-----------------------------------------------------------------------
+!
+!     read dimension information
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_open(infile, NF_NOWRITE, nc_infile_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_infile_id, varname, nc_invar_id)
+      call netcdf_error_handler(ncstat)
+   
+      ncstat = nf_inq_varndims(nc_infile_id, nc_invar_id, ndims)
+      call netcdf_error_handler(ncstat)
+
+      allocate(nc_dims_ids(ndims))
+      ncstat = nf_inq_vardimid(nc_infile_id, nc_invar_id, nc_dims_ids)
+      call netcdf_error_handler(ncstat)
+      if ( ndims < 1 .or. ndims > 3) then
+        stop "Input files should have (lon) or (lon,lat) or (lon,time) 
+     &        or (lon,lat,time) dimensions"
+      endif
+
+      ncstat = nf_inq_dimlen(nc_infile_id, nc_dims_ids(1), nlon)
+      call netcdf_error_handler(ncstat)
+      if (nlon .ne. grid1_dims(1)) then
+        print*," Input file : ",nlon," longitudes"
+        print*," Weight file : ",grid1_dims(1) ," longitudes"
+        stop " Inconsistent number of longitudes. We stop "
+      endif
+
+      name2='none'
+      nlat=1
+      if ( ndims >= 2 ) then
+        ncstat = nf_inq_dimname(nc_infile_id, nc_dims_ids(2), name2)
+        
+        if ( name2 /= 'time' .and. name2 /= 'time_counter' .and. 
+     &     name2 /= 't' ) then
+ 
+          ncstat = nf_inq_dimlen(nc_infile_id, nc_dims_ids(2), nlat)
+          call netcdf_error_handler(ncstat)
+          if (nlat .ne. grid1_dims(2)) then
+            print*," Input file : ",nlat," latitudes"
+            print*," Weight file : ",grid1_dims(2)," latitudes"
+            stop " Inconsistent number of latitudes. We stop "
+          endif
+          dimtime=3
+        else
+          dimtime=2 
+        endif
+      endif
+!-----------------------------------------------------------------------
+!
+!     read time dimension 
+!
+!-----------------------------------------------------------------------
+
+      nc_time_id=-1
+      if ( ndims == 3 .or. dimtime == 2 ) then
+        ncstat = nf_inq_dim(nc_infile_id, nc_dims_ids(dimtime), 
+     &           timename, ntime)
+        call netcdf_error_handler(ncstat)
+        
+        allocate(time(ntime))
+
+        ncstat = nf_inq_varid(nc_infile_id, timename, nc_time_id)
+        if (ncstat == NF_NOERR) then
+
+          ncstat = nf_inq_vartype(nc_infile_id, nc_time_id, nc_var_type)
+          call netcdf_error_handler(ncstat)
+        
+          allocate(time1(ntime))
+          allocate(time2(ntime))
+          allocate(time3(ntime))
+          if ( nc_var_type == NF_INT ) then
+            ncstat = nf_get_var_int(nc_infile_id, nc_time_id, time1)
+            if ( ncstat == NF_NOERR ) then
+              time = real(time1,kind=dbl_kind)
+            else
+              stop "Problem reading input data"
+            endif
+          elseif (nc_var_type == NF_REAL) then
+            ncstat = nf_get_var_real(nc_infile_id, nc_time_id, time2)
+            if ( ncstat == NF_NOERR ) then
+              time = real(time2,kind=dbl_kind)
+            else
+              stop "Problem reading input data"
+            endif
+          elseif ( nc_var_type == NF_DOUBLE) then
+            ncstat = nf_get_var_double(nc_infile_id,nc_time_id, time3)
+            if ( ncstat == NF_NOERR ) then
+              time = real(time3,kind=dbl_kind)
+            else
+              stop"Problem reading input data"
+            endif
+          else
+            stop"Problem with input data type"
+          endif 
+          deallocate(time1, time2, time3)
+
+          ncstat = nf_inq_varnatts(nc_infile_id, nc_time_id, natts)
+          call netcdf_error_handler(ncstat)
+        else
+          do ji=1,ntime
+            time(ji)=ji
+          enddo
+          natts=0
+        endif
+
+      else
+        ntime=1
+        allocate(time(1))
+        time=1.
+        natts=0
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_array_tmp (nlon,nlat,ntime))
+      allocate( grid1_array_tmp1 (nlon,nlat,ntime))
+      allocate( grid1_array_tmp2 (nlon,nlat,ntime))
+      allocate( grid1_array_tmp3 (nlon,nlat,ntime))
+
+!-----------------------------------------------------------------------
+!
+!     read variable 
+!
+!-----------------------------------------------------------------------
+ 
+      ncstat = nf_inq_vartype(nc_infile_id, nc_invar_id, nc_var_type) 
+      call netcdf_error_handler(ncstat)
+
+      if ( nc_var_type == NF_INT ) then
+        ncstat = nf_get_var_int(nc_infile_id, nc_invar_id, 
+     &                          grid1_array_tmp1)
+        if ( ncstat == NF_NOERR ) then
+          grid1_array_tmp = real(grid1_array_tmp1,kind=dbl_kind)
+        else
+          stop "Problem reading input data"
+        endif
+      elseif (nc_var_type == NF_REAL) then
+        ncstat = nf_get_var_real(nc_infile_id, nc_invar_id, 
+     &                           grid1_array_tmp2)
+        if ( ncstat == NF_NOERR ) then
+          grid1_array_tmp = real(grid1_array_tmp2,kind=dbl_kind)
+        else
+          stop "Problem reading input data"
+        endif
+      elseif ( nc_var_type == NF_DOUBLE) then
+        ncstat = nf_get_var_double(nc_infile_id,nc_invar_id, 
+     &                             grid1_array_tmp3)
+        if ( ncstat == NF_NOERR ) then
+          grid1_array_tmp = real(grid1_array_tmp3,kind=dbl_kind)
+        else
+          stop"Problem reading input data"
+        endif
+      else
+        stop"Problem with input data type"
+      endif
+
+      ncstat = nf_inq_varnatts(nc_infile_id, nc_invar_id, nvaratts)
+      call netcdf_error_handler(ncstat)
+ 
+      deallocate(grid1_array_tmp1, grid1_array_tmp2, grid1_array_tmp3)
+
+!-----------------------------------------------------------------------
+!
+!     reshape input file
+!
+!-----------------------------------------------------------------------
+     
+      allocate( grid1_array (nlon*nlat,ntime)) 
+      if (invertlat) then
+        grid1_array = reshape ( grid1_array_tmp (:,nlat:1:-1,:), 
+     &                         (/ nlat*nlon, ntime /) )
+      else
+        grid1_array = reshape (grid1_array_tmp, (/ nlat*nlon, ntime /) )
+      endif 
+      deallocate(grid1_array_tmp)
+
+      ncstat = nf_inq_varnatts(nc_infile_id, NF_GLOBAL, nglobatts)
+      call netcdf_error_handler(ncstat)
+!-----------------------------------------------------------------------
+
+      end subroutine read_input
+
+!***********************************************************************
+
+      end module read_input_file
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+

+ 272 - 0
interpolation/scrip_sources/read_input_file.save.fr

@@ -0,0 +1,272 @@
+
+      module read_input_file
+
+!-----------------------------------------------------------------------
+!
+!     contains routines for reading a file to interpolate
+!
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines useful constants
+      use grids         ! includes all grid information
+      use netcdf_mod    ! module with netcdf vars and utilities
+      use remap_vars    ! module for all required remapping variables
+
+      implicit none
+
+      real (kind=dbl_kind), dimension(:,:), allocatable ::
+     &    grid1_array
+
+      integer (kind=int_kind) ::
+     &    ntime, nlat, nlon, natts, nvaratts, nglobatts
+
+      integer (kind=int_kind) ::
+     &    nc_infile_id, nc_time_id, nc_invar_id, dimtime
+
+      real (kind=dbl_kind), dimension(:), allocatable ::
+     &    time
+
+      logical :: invertlat
+
+
+      contains
+
+!***********************************************************************
+
+      subroutine read_input(infile, varname)
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) ::
+     &    infile, varname       ! input filename an varname
+
+!-----------------------------------------------------------------------
+    
+      character(char_len) ::
+     &    timename, name2    
+
+      real (kind=dbl_kind), dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp
+
+      integer, dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp1 
+ 
+      real, dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp2
+
+      double precision, dimension(:,:,:), allocatable ::
+     &    grid1_array_tmp3
+
+      integer, dimension(:), allocatable ::
+     &    time1
+
+      real, dimension(:), allocatable ::
+     &    time2
+
+      double precision, dimension(:), allocatable ::
+     &    time3
+
+
+      integer (kind=int_kind) :: 
+     &    ncstat, nc_var_type, ndims
+
+      integer (kind=int_kind), dimension(:), allocatable :: ! netCDF ids
+     &    nc_dims_ids
+
+!-----------------------------------------------------------------------
+!
+!     read dimension information
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_open(infile, NF_NOWRITE, nc_infile_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_infile_id, varname, nc_invar_id)
+      call netcdf_error_handler(ncstat)
+   
+      ncstat = nf_inq_varndims(nc_infile_id, nc_invar_id, ndims)
+      call netcdf_error_handler(ncstat)
+
+      allocate(nc_dims_ids(ndims))
+      ncstat = nf_inq_vardimid(nc_infile_id, nc_invar_id, nc_dims_ids)
+      call netcdf_error_handler(ncstat)
+      if ( ndims < 1 .or. ndims > 3) then
+        stop "Input files should have (lon) or (lon,lat) or (lon,time) 
+     &        or (lon,lat,time) dimensions"
+      endif
+
+      ncstat = nf_inq_dimlen(nc_infile_id, nc_dims_ids(1), nlon)
+      call netcdf_error_handler(ncstat)
+      if (nlon .ne. grid1_dims(1)) then
+        print*," Input file : ",nlon," longitudes"
+        print*," Weight file : ",grid1_dims(1) ," longitudes"
+        stop " Inconsistent number of longitudes. We stop "
+      endif
+
+      name2='none'
+      if ( ndims >= 2 ) then
+        ncstat = nf_inq_dimname(nc_infile_id, nc_dims_ids(2), name2)
+      endif
+      
+      if ( name2 /= 'time' .and. name2 /= 'time_counter' .and. 
+     &     name2 /= 't' ) then
+ 
+        ncstat = nf_inq_dimlen(nc_infile_id, nc_dims_ids(2), nlat)
+        call netcdf_error_handler(ncstat)
+        if (nlat .ne. grid1_dims(2)) then
+          print*," Input file : ",nlat," latitudes"
+          print*," Weight file : ",grid1_dims(2)," latitudes"
+          stop " Inconsistent number of latitudes. We stop "
+        endif
+        dimtime=3
+      else
+        nlat=1
+        if (ndims >=2) then
+          dimtime=2
+        endif
+      endif
+!-----------------------------------------------------------------------
+!
+!     read time dimension 
+!
+!-----------------------------------------------------------------------
+
+      nc_time_id=-1
+      if ( ndims == 3 .or. dimtime == 2 ) then
+        ncstat = nf_inq_dim(nc_infile_id, nc_dims_ids(dimtime), 
+     &           timename, ntime)
+        call netcdf_error_handler(ncstat)
+        
+        allocate(time(ntime))
+        allocate(time1(ntime))
+        allocate(time2(ntime))
+        allocate(time3(ntime))
+
+        ncstat = nf_inq_varid(nc_infile_id, timename, nc_time_id)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_inq_vartype(nc_infile_id, nc_time_id, nc_var_type)
+        call netcdf_error_handler(ncstat)
+        
+        if ( nc_var_type == NF_INT ) then
+          ncstat = nf_get_var_int(nc_infile_id, nc_time_id, time1)
+          if ( ncstat == NF_NOERR ) then
+            time = real(time1,kind=dbl_kind)
+          else
+            stop "Problem reading input data"
+          endif
+        elseif (nc_var_type == NF_REAL) then
+          ncstat = nf_get_var_real(nc_infile_id, nc_time_id, time2)
+          if ( ncstat == NF_NOERR ) then
+            time = real(time2,kind=dbl_kind)
+          else
+            stop "Problem reading input data"
+          endif
+        elseif ( nc_var_type == NF_DOUBLE) then
+          ncstat = nf_get_var_double(nc_infile_id,nc_time_id, time3)
+          if ( ncstat == NF_NOERR ) then
+            time = real(time3,kind=dbl_kind)
+          else
+            stop"Problem reading input data"
+          endif
+        else
+          stop"Problem with input data type"
+        endif 
+        deallocate(time1, time2, time3)
+
+        ncstat = nf_inq_varnatts(nc_infile_id, nc_time_id, natts)
+        call netcdf_error_handler(ncstat)
+
+      else
+        ntime=1
+        allocate(time(1))
+        time=1.
+        natts=0
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_array_tmp (nlon,nlat,ntime))
+      allocate( grid1_array_tmp1 (nlon,nlat,ntime))
+      allocate( grid1_array_tmp2 (nlon,nlat,ntime))
+      allocate( grid1_array_tmp3 (nlon,nlat,ntime))
+
+!-----------------------------------------------------------------------
+!
+!     read variable 
+!
+!-----------------------------------------------------------------------
+ 
+      ncstat = nf_inq_vartype(nc_infile_id, nc_invar_id, nc_var_type) 
+      call netcdf_error_handler(ncstat)
+
+      if ( nc_var_type == NF_INT ) then
+        ncstat = nf_get_var_int(nc_infile_id, nc_invar_id, 
+     &                          grid1_array_tmp1)
+        if ( ncstat == NF_NOERR ) then
+          grid1_array_tmp = real(grid1_array_tmp1,kind=dbl_kind)
+        else
+          stop "Problem reading input data"
+        endif
+      elseif (nc_var_type == NF_REAL) then
+        ncstat = nf_get_var_real(nc_infile_id, nc_invar_id, 
+     &                           grid1_array_tmp2)
+        if ( ncstat == NF_NOERR ) then
+          grid1_array_tmp = real(grid1_array_tmp2,kind=dbl_kind)
+        else
+          stop "Problem reading input data"
+        endif
+      elseif ( nc_var_type == NF_DOUBLE) then
+        ncstat = nf_get_var_double(nc_infile_id,nc_invar_id, 
+     &                             grid1_array_tmp3)
+        if ( ncstat == NF_NOERR ) then
+          grid1_array_tmp = real(grid1_array_tmp3,kind=dbl_kind)
+        else
+          stop"Problem reading input data"
+        endif
+      else
+        stop"Problem with input data type"
+      endif
+
+      ncstat = nf_inq_varnatts(nc_infile_id, nc_invar_id, nvaratts)
+      call netcdf_error_handler(ncstat)
+ 
+      deallocate(grid1_array_tmp1, grid1_array_tmp2, grid1_array_tmp3)
+
+!-----------------------------------------------------------------------
+!
+!     reshape input file
+!
+!-----------------------------------------------------------------------
+     
+      allocate( grid1_array (nlon*nlat,ntime)) 
+      if (invertlat) then
+        grid1_array = reshape ( grid1_array_tmp (:,nlat:1:-1,:), 
+     &                         (/ nlat*nlon, ntime /) )
+      else
+        grid1_array = reshape (grid1_array_tmp, (/ nlat*nlon, ntime /) )
+      endif 
+      deallocate(grid1_array_tmp)
+
+      ncstat = nf_inq_varnatts(nc_infile_id, NF_GLOBAL, nglobatts)
+      call netcdf_error_handler(ncstat)
+!-----------------------------------------------------------------------
+
+      end subroutine read_input
+
+!***********************************************************************
+
+      end module read_input_file
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+

+ 163 - 0
interpolation/scrip_sources/remap.f

@@ -0,0 +1,163 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this routine performs a remapping based on addresses and weights
+!     computed in a setup phase
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap.f,v 1.5 2000/04/19 21:56:25 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_mod
+
+!-----------------------------------------------------------------------
+!
+!     this module contains the routines for performing the actual
+!     remappings
+!
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines common data types
+      use constants    ! defines common constants
+
+      implicit none
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine remap(dst_array, map_wts, dst_add, src_add, 
+     &                 src_array, src_grad1, src_grad2, src_grad3)
+
+!-----------------------------------------------------------------------
+!
+!     performs the remapping based on weights computed elsewhere
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input arrays
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), dimension(:), intent(in) ::
+     &     dst_add,     ! destination address for each link
+     &     src_add      ! source      address for each link
+
+      real (kind=dbl_kind), dimension(:,:), intent(in) ::
+     &     map_wts      ! remapping weights for each link
+
+      real (kind=dbl_kind), dimension(:), intent(in) ::
+     &     src_array    ! array with source field to be remapped
+
+      real (kind=dbl_kind), dimension(:), intent(in), optional ::
+     &     src_grad1    ! gradient arrays on source grid necessary for
+     &,    src_grad2    ! higher-order remappings
+     &,    src_grad3
+
+!-----------------------------------------------------------------------
+!
+!     output variables
+!
+!-----------------------------------------------------------------------
+
+      real (kind=dbl_kind), dimension(:), intent(inout) ::
+     &     dst_array    ! array for remapped field on destination grid
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, iorder
+
+!-----------------------------------------------------------------------
+!
+!     check the order of the interpolation
+!
+!-----------------------------------------------------------------------
+
+      if (present(src_grad1)) then
+        iorder = 2
+      else
+        iorder = 1
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     first order remapping
+!
+!-----------------------------------------------------------------------
+
+      dst_array = zero
+
+      select case (iorder)
+      case(1)
+        do n=1,size(dst_add)
+          dst_array(dst_add(n)) = dst_array(dst_add(n)) + 
+     &                            src_array(src_add(n))*map_wts(1,n)
+        end do
+!-----------------------------------------------------------------------
+!
+!     second order remapping
+!
+!-----------------------------------------------------------------------
+
+      case(2)
+
+        if (size(map_wts,DIM=1) == 3) then
+          do n=1,size(dst_add)
+            dst_array(dst_add(n)) = dst_array(dst_add(n)) +
+     &                              src_array(src_add(n))*map_wts(1,n) +
+     &                              src_grad1(src_add(n))*map_wts(2,n) +
+     &                              src_grad2(src_add(n))*map_wts(3,n)
+          end do
+        else if (size(map_wts,DIM=1) == 4) then
+          do n=1,size(dst_add)
+            dst_array(dst_add(n)) = dst_array(dst_add(n)) +
+     &                              src_array(src_add(n))*map_wts(1,n) +
+     &                              src_grad1(src_add(n))*map_wts(2,n) +
+     &                              src_grad2(src_add(n))*map_wts(3,n) +
+     &                              src_grad3(src_add(n))*map_wts(4,n)
+          end do
+        endif
+
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine remap
+
+!***********************************************************************
+
+      end module remap_mod
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 844 - 0
interpolation/scrip_sources/remap_bicubic.f

@@ -0,0 +1,844 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this module contains necessary routines for performing an 
+!     bicubic interpolation.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_bicubic.f,v 1.5 2001/08/22 18:20:41 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_bicubic
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines common constants
+      use grids         ! module containing grid info
+      use remap_vars    ! module containing remap info
+
+      implicit none
+
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), parameter ::
+     &    max_iter = 100   ! max iteration count for i,j iteration
+
+      real (kind=dbl_kind), parameter ::
+     &     converge = 1.e-10_dbl_kind  ! convergence criterion
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine remap_bicub
+
+!-----------------------------------------------------------------------
+!
+!     this routine computes the weights for a bicubic interpolation.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n,icount,
+     &     dst_add,        ! destination address
+     &     iter,           ! iteration counter
+     &     nmap            ! index of current map being computed
+
+      integer (kind=int_kind), dimension(4) :: 
+     &     src_add         ! address for the four source points
+
+      real (kind=dbl_kind), dimension(4)  ::
+     &     src_lats,       ! latitudes  of four bilinear corners
+     &     src_lons        ! longitudes of four bilinear corners
+
+      real (kind=dbl_kind), dimension(4,4)  ::
+     &     wgts            ! bicubic weights for four corners
+
+      real (kind=dbl_kind) ::
+     &     plat, plon,       ! lat/lon coords of destination point
+     &     iguess, jguess,   ! current guess for bilinear coordinate
+     &     thguess, phguess, ! current guess for lat/lon coordinate
+     &     deli, delj,       ! corrections to i,j
+     &     dth1, dth2, dth3, ! some latitude  differences
+     &     dph1, dph2, dph3, ! some longitude differences
+     &     dthp, dphp,       ! difference between point and sw corner
+     &     mat1, mat2, mat3, mat4, ! matrix elements
+     &     determinant,      ! matrix determinant
+     &     sum_wgts,         ! sum of weights for normalization
+     &     w1,w2,w3,w4,w5,w6,w7,w8, ! 16 bicubic weight functions
+     &     w9,w10,w11,w12,w13,w14,w15,w16
+
+!-----------------------------------------------------------------------
+!
+!     compute mappings from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+      nmap = 1
+      if (grid1_rank /= 2) then
+        stop 'Can not do bicubic interpolation when grid_rank /= 2'
+      endif
+
+      !***
+      !*** loop over destination grid 
+      !***
+
+      grid_loop1: do dst_add = 1, grid2_size
+
+        if (.not. grid2_mask(dst_add)) cycle grid_loop1
+
+        plat = grid2_center_lat(dst_add)
+        plon = grid2_center_lon(dst_add)
+
+!-----------------------------------------------------------------------
+!
+!       find nearest square of grid points on source grid
+!
+!-----------------------------------------------------------------------
+
+        call grid_search_bicub(src_add, src_lats, src_lons, 
+     &                         plat, plon, grid1_dims,
+     &                         grid1_center_lat, grid1_center_lon,
+     &                         grid1_bound_box, bin_addr1, bin_addr2)
+
+        !***
+        !*** check to see if points are land points
+        !***
+
+        if (src_add(1) > 0) then
+          do n=1,4
+            if (.not. grid1_mask(src_add(n))) src_add(1) = 0
+          end do
+        endif
+
+!-----------------------------------------------------------------------
+!
+!       if point found, find local i,j coordinates for weights
+!
+!-----------------------------------------------------------------------
+
+        if (src_add(1) > 0) then
+
+          grid2_frac(dst_add) = one
+
+          !***
+          !*** iterate to find i,j for bicubic approximation
+          !***
+
+          dth1 = src_lats(2) - src_lats(1)
+          dth2 = src_lats(4) - src_lats(1)
+          dth3 = src_lats(3) - src_lats(2) - dth2
+
+          dph1 = src_lons(2) - src_lons(1)
+          dph2 = src_lons(4) - src_lons(1)
+          dph3 = src_lons(3) - src_lons(2)
+
+          if (dph1 >  three*pih) dph1 = dph1 - pi2
+          if (dph2 >  three*pih) dph2 = dph2 - pi2
+          if (dph3 >  three*pih) dph3 = dph3 - pi2
+          if (dph1 < -three*pih) dph1 = dph1 + pi2
+          if (dph2 < -three*pih) dph2 = dph2 + pi2
+          if (dph3 < -three*pih) dph3 = dph3 + pi2
+
+          dph3 = dph3 - dph2
+
+          iguess = half
+          jguess = half
+
+          iter_loop1: do iter=1,max_iter
+
+            dthp = plat - src_lats(1) - dth1*iguess -
+     &                    dth2*jguess - dth3*iguess*jguess
+            dphp = plon - src_lons(1)
+
+            if (dphp >  three*pih) dphp = dphp - pi2
+            if (dphp < -three*pih) dphp = dphp + pi2
+
+            dphp = dphp - dph1*iguess - dph2*jguess - 
+     &                    dph3*iguess*jguess
+
+            mat1 = dth1 + dth3*jguess
+            mat2 = dth2 + dth3*iguess
+            mat3 = dph1 + dph3*jguess
+            mat4 = dph2 + dph3*iguess
+
+            determinant = mat1*mat4 - mat2*mat3
+
+            deli = (dthp*mat4 - mat2*dphp)/determinant
+            delj = (mat1*dphp - dthp*mat3)/determinant
+
+            if (abs(deli) < converge .and. 
+     &          abs(delj) < converge) exit iter_loop1
+
+            iguess = iguess + deli
+            jguess = jguess + delj
+
+          end do iter_loop1
+
+          if (iter <= max_iter) then
+
+!-----------------------------------------------------------------------
+!
+!           successfully found i,j - compute weights
+!
+!-----------------------------------------------------------------------
+
+            wgts(1,1) = (one - jguess**2*(three-two*jguess))*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(1,2) = (one - jguess**2*(three-two*jguess))*
+     &                         iguess**2*(three-two*iguess)
+            wgts(1,3) =        jguess**2*(three-two*jguess)*
+     &                         iguess**2*(three-two*iguess)
+            wgts(1,4) =        jguess**2*(three-two*jguess)*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(2,1) = (one - jguess**2*(three-two*jguess))*
+     &                         iguess*(iguess-one)**2
+            wgts(2,2) = (one - jguess**2*(three-two*jguess))*
+     &                         iguess**2*(iguess-one)
+            wgts(2,3) =        jguess**2*(three-two*jguess)*
+     &                         iguess**2*(iguess-one)
+            wgts(2,4) =        jguess**2*(three-two*jguess)*
+     &                         iguess*(iguess-one)**2
+            wgts(3,1) =        jguess*(jguess-one)**2*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(3,2) =        jguess*(jguess-one)**2*
+     &                         iguess**2*(three-two*iguess)
+            wgts(3,3) =        jguess**2*(jguess-one)*
+     &                         iguess**2*(three-two*iguess)
+            wgts(3,4) =        jguess**2*(jguess-one)*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(4,1) =        iguess*(iguess-one)**2*
+     &                         jguess*(jguess-one)**2
+            wgts(4,2) =        iguess**2*(iguess-one)*
+     &                         jguess*(jguess-one)**2
+            wgts(4,3) =        iguess**2*(iguess-one)*
+     &                         jguess**2*(jguess-one)
+            wgts(4,4) =        iguess*(iguess-one)**2*
+     &                         jguess**2*(jguess-one)
+
+            call store_link_bicub(dst_add, src_add, wgts, nmap)
+
+          else
+            stop 'Iteration for i,j exceed max iteration count'
+          endif
+
+!-----------------------------------------------------------------------
+!
+!       search for bilinear failed - use a distance-weighted
+!       average instead (this is typically near the pole)
+!
+!-----------------------------------------------------------------------
+
+        else if (src_add(1) < 0) then
+
+          src_add = abs(src_add)
+
+          icount = 0
+          do n=1,4
+            if (grid1_mask(src_add(n))) then
+              icount = icount + 1
+            else
+              src_lats(n) = zero
+            endif
+          end do
+
+          if (icount > 0) then
+            !*** renormalize weights
+
+            sum_wgts = sum(src_lats)
+            wgts(1,1) = src_lats(1)/sum_wgts
+            wgts(1,2) = src_lats(2)/sum_wgts
+            wgts(1,3) = src_lats(3)/sum_wgts
+            wgts(1,4) = src_lats(4)/sum_wgts
+            wgts(2:4,:) = zero
+
+            grid2_frac(dst_add) = one
+            call store_link_bicub(dst_add, src_add, wgts, nmap)
+          endif
+
+        endif
+      end do grid_loop1
+
+!-----------------------------------------------------------------------
+!
+!     compute mappings from grid2 to grid1 if necessary
+!
+!-----------------------------------------------------------------------
+
+      if (num_maps > 1) then
+
+      nmap = 2
+      if (grid2_rank /= 2) then
+        stop 'Can not do bicubic interpolation when grid_rank /= 2'
+      endif
+
+      !***
+      !*** loop over destination grid 
+      !***
+
+      grid_loop2: do dst_add = 1, grid1_size
+
+        if (.not. grid1_mask(dst_add)) cycle grid_loop2
+
+        plat = grid1_center_lat(dst_add)
+        plon = grid1_center_lon(dst_add)
+
+        !***
+        !*** find nearest square of grid points on source grid
+        !***
+
+        call grid_search_bicub(src_add, src_lats, src_lons, 
+     &                         plat, plon, grid2_dims,
+     &                         grid2_center_lat, grid2_center_lon,
+     &                         grid2_bound_box, bin_addr2, bin_addr1)
+
+        !***
+        !*** check to see if points are land points
+        !***
+
+        if (src_add(1) > 0) then
+          do n=1,4
+            if (.not. grid2_mask(src_add(n))) src_add(1) = 0
+          end do
+        endif
+
+        !***
+        !*** if point found, find i,j coordinates for weights
+        !***
+
+        if (src_add(1) > 0) then
+
+          grid1_frac(dst_add) = one
+
+          !***
+          !*** iterate to find i,j for bilinear approximation
+          !***
+
+          dth1 = src_lats(2) - src_lats(1)
+          dth2 = src_lats(4) - src_lats(1)
+          dth3 = src_lats(3) - src_lats(2) - dth2
+
+          dph1 = src_lons(2) - src_lons(1)
+          dph2 = src_lons(4) - src_lons(1)
+          dph3 = src_lons(3) - src_lons(2)
+
+          if (dph1 >  pi) dph1 = dph1 - pi2
+          if (dph2 >  pi) dph2 = dph2 - pi2
+          if (dph3 >  pi) dph3 = dph3 - pi2
+          if (dph1 < -pi) dph1 = dph1 + pi2
+          if (dph2 < -pi) dph2 = dph2 + pi2
+          if (dph3 < -pi) dph3 = dph3 + pi2
+
+          dph3 = dph3 - dph2
+
+          iguess = zero
+          jguess = zero
+
+          iter_loop2: do iter=1,max_iter
+
+            dthp = plat - src_lats(1) - dth1*iguess -
+     &                    dth2*jguess - dth3*iguess*jguess
+            dphp = plon - src_lons(1)
+
+            if (dphp >  pi) dphp = dphp - pi2
+            if (dphp < -pi) dphp = dphp + pi2
+
+            dphp = dphp - dph1*iguess - dph2*jguess - 
+     &                    dph3*iguess*jguess
+
+            mat1 = dth1 + dth3*jguess
+            mat2 = dth2 + dth3*iguess
+            mat3 = dph1 + dph3*jguess
+            mat4 = dph2 + dph3*iguess
+
+            determinant = mat1*mat4 - mat2*mat3
+
+            deli = (dthp*mat4 - mat2*dphp)/determinant
+            delj = (mat1*dphp - dthp*mat3)/determinant
+
+            if (abs(deli) < converge .and. 
+     &          abs(delj) < converge) exit iter_loop2
+
+            iguess = iguess + deli
+            jguess = jguess + delj
+
+          end do iter_loop2
+
+          if (iter <= max_iter) then
+
+            !***
+            !*** successfully found i,j - compute weights
+            !***
+
+            wgts(1,1) = (one - jguess**2*(three-two*jguess))*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(1,2) = (one - jguess**2*(three-two*jguess))*
+     &                         iguess**2*(three-two*iguess)
+            wgts(1,3) =        jguess**2*(three-two*jguess)*
+     &                         iguess**2*(three-two*iguess)
+            wgts(1,4) =        jguess**2*(three-two*jguess)*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(2,1) = (one - jguess**2*(three-two*jguess))*
+     &                         iguess*(iguess-one)**2
+            wgts(2,2) = (one - jguess**2*(three-two*jguess))*
+     &                         iguess**2*(iguess-one)
+            wgts(2,3) =        jguess**2*(three-two*jguess)*
+     &                         iguess**2*(iguess-one)
+            wgts(2,4) =        jguess**2*(three-two*jguess)*
+     &                         iguess*(iguess-one)**2
+            wgts(3,1) =        jguess*(jguess-one)**2*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(3,2) =        jguess*(jguess-one)**2*
+     &                         iguess**2*(three-two*iguess)
+            wgts(3,3) =        jguess**2*(jguess-one)*
+     &                         iguess**2*(three-two*iguess)
+            wgts(3,4) =        jguess**2*(jguess-one)*
+     &                  (one - iguess**2*(three-two*iguess))
+            wgts(4,1) =        iguess*(iguess-one)**2*
+     &                         jguess*(jguess-one)**2
+            wgts(4,2) =        iguess**2*(iguess-one)*
+     &                         jguess*(jguess-one)**2
+            wgts(4,3) =        iguess**2*(iguess-one)*
+     &                         jguess**2*(jguess-one)
+            wgts(4,4) =        iguess*(iguess-one)**2*
+     &                         jguess**2*(jguess-one)
+
+            call store_link_bicub(dst_add, src_add, wgts, nmap)
+
+          else
+            stop 'Iteration for i,j exceed max iteration count'
+          endif
+
+        !***
+        !*** search for bilinear failed - us a distance-weighted
+        !*** average instead
+        !***
+
+        else if (src_add(1) < 0) then
+
+          src_add = abs(src_add)
+
+          icount = 0
+          do n=1,4
+            if (grid2_mask(src_add(n))) then
+              icount = icount + 1
+            else
+              src_lats(n) = zero
+            endif
+          end do
+
+          if (icount > 0) then
+            !*** renormalize weights
+
+            sum_wgts = sum(src_lats)
+            wgts(1,1) = src_lats(1)/sum_wgts
+            wgts(1,2) = src_lats(2)/sum_wgts
+            wgts(1,3) = src_lats(3)/sum_wgts
+            wgts(1,4) = src_lats(4)/sum_wgts
+            wgts(2:4,:) = zero
+
+            grid1_frac(dst_add) = one
+            call store_link_bicub(dst_add, src_add, wgts, nmap)
+          endif
+
+        endif
+      end do grid_loop2
+
+      endif ! nmap=2
+
+!-----------------------------------------------------------------------
+
+      end subroutine remap_bicub
+
+!***********************************************************************
+
+      subroutine grid_search_bicub(src_add, src_lats, src_lons, 
+     &                             plat, plon, src_grid_dims,
+     &                             src_center_lat, src_center_lon,
+     &                             src_bound_box,
+     &                             src_bin_add, dst_bin_add)
+
+!-----------------------------------------------------------------------
+!
+!     this routine finds the location of the search point plat, plon
+!     in the source grid and returns the corners needed for a bicubic
+!     interpolation.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     output variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), dimension(4), intent(out) ::
+     &        src_add  ! address of each corner point enclosing P
+
+      real (kind=dbl_kind), dimension(4), intent(out) ::
+     &        src_lats, ! latitudes  of the four corner points
+     &        src_lons  ! longitudes of the four corner points
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      real (kind=dbl_kind), intent(in) ::
+     &        plat,   ! latitude  of the search point
+     &        plon    ! longitude of the search point
+
+      integer (kind=int_kind), dimension(2), intent(in) ::
+     &        src_grid_dims  ! size of each src grid dimension
+
+      real (kind=dbl_kind), dimension(:), intent(in) ::
+     &        src_center_lat, ! latitude  of each src grid center 
+     &        src_center_lon  ! longitude of each src grid center
+
+      real (kind=dbl_kind), dimension(:,:), intent(in) ::
+     &        src_bound_box   ! bounding box for src grid search
+
+      integer (kind=int_kind), dimension(:,:), intent(in) ::
+     &        src_bin_add,    ! search bins for restricting
+     &        dst_bin_add     ! searches
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, next_n, srch_add,   ! dummy indices
+     &    nx, ny,            ! dimensions of src grid
+     &    min_add, max_add,  ! addresses for restricting search
+     &    i, j, jp1, ip1, n_add, e_add, ne_add  ! addresses
+
+      real (kind=dbl_kind) ::  ! vectors for cross-product check
+     &      vec1_lat, vec1_lon,
+     &      vec2_lat, vec2_lon, cross_product, cross_product_last,
+     &      coslat_dst, sinlat_dst, coslon_dst, sinlon_dst,
+     &      dist_min, distance ! for computing dist-weighted avg
+
+!-----------------------------------------------------------------------
+!
+!     restrict search first using search bins. 
+!
+!-----------------------------------------------------------------------
+
+      src_add = 0
+
+      min_add = size(src_center_lat)
+      max_add = 1
+      do n=1,num_srch_bins
+        if (plat >= bin_lats(1,n) .and. plat <= bin_lats(2,n) .and.
+     &      plon >= bin_lons(1,n) .and. plon <= bin_lons(2,n)) then
+          min_add = min(min_add, src_bin_add(1,n))
+          max_add = max(max_add, src_bin_add(2,n))
+        endif
+      end do
+ 
+!-----------------------------------------------------------------------
+!
+!     now perform a more detailed search 
+!
+!-----------------------------------------------------------------------
+
+      nx = src_grid_dims(1)
+      ny = src_grid_dims(2)
+
+      srch_loop: do srch_add = min_add,max_add
+
+        if (plat <= src_bound_box(2,srch_add) .and. 
+     &      plat >= src_bound_box(1,srch_add) .and.
+     &      plon <= src_bound_box(4,srch_add) .and. 
+     &      plon >= src_bound_box(3,srch_add)) then
+
+          !***
+          !*** we are within bounding box so get really serious
+          !***
+
+          !*** find N,S and NE points to this grid point
+
+          j = (srch_add - 1)/nx +1
+          i = srch_add - (j-1)*nx
+
+          if (i < nx) then
+            ip1 = i + 1
+          else
+            ip1 = 1
+          endif
+
+          if (j < ny) then
+            jp1 = j+1
+          else
+            jp1 = 1
+          endif
+
+          n_add = (jp1 - 1)*nx + i
+          e_add = (j - 1)*nx + ip1
+          ne_add = (jp1 - 1)*nx + ip1
+
+          !***
+          !*** find N,S and NE lat/lon coords and check bounding box
+          !***
+
+          src_lats(1) = src_center_lat(srch_add)
+          src_lats(2) = src_center_lat(e_add)
+          src_lats(3) = src_center_lat(ne_add)
+          src_lats(4) = src_center_lat(n_add)
+
+          src_lons(1) = src_center_lon(srch_add)
+          src_lons(2) = src_center_lon(e_add)
+          src_lons(3) = src_center_lon(ne_add)
+          src_lons(4) = src_center_lon(n_add)
+
+          !***
+          !*** for consistency, we must make sure all lons are in
+          !*** same 2pi interval
+          !***
+
+          vec1_lon = src_lons(1) - plon
+          if (vec1_lon > pi) then
+            src_lons(1) = src_lons(1) - pi2
+          else if (vec1_lon < -pi) then
+            src_lons(1) = src_lons(1) + pi2
+          endif
+          do n=2,4
+            vec1_lon = src_lons(n) - src_lons(1)
+            if (vec1_lon > pi) then
+              src_lons(n) = src_lons(n) - pi2
+            else if (vec1_lon < -pi) then
+              src_lons(n) = src_lons(n) + pi2
+            endif
+          end do
+
+          corner_loop: do n=1,4
+            next_n = MOD(n,4) + 1
+
+            !***
+            !*** here we take the cross product of the vector making 
+            !*** up each box side with the vector formed by the vertex
+            !*** and search point.  if all the cross products are 
+            !*** same sign, the point is contained in the box.
+            !***
+
+            vec1_lat = src_lats(next_n) - src_lats(n)
+            vec1_lon = src_lons(next_n) - src_lons(n)
+            vec2_lat = plat - src_lats(n)
+            vec2_lon = plon - src_lons(n)
+
+            !***
+            !*** check for 0,2pi crossings
+            !***
+
+            if (vec1_lon >  three*pih) then
+              vec1_lon = vec1_lon - pi2
+            else if (vec1_lon < -three*pih) then
+              vec1_lon = vec1_lon + pi2
+            endif
+            if (vec2_lon >  three*pih) then
+              vec2_lon = vec2_lon - pi2
+            else if (vec2_lon < -three*pih) then
+              vec2_lon = vec2_lon + pi2
+            endif
+
+            cross_product = vec1_lon*vec2_lat - vec2_lon*vec1_lat
+
+            !***
+            !*** if cross product is less than zero, this cell
+            !*** doesn't work
+            !***
+
+            if (n==1) cross_product_last = cross_product
+            if (cross_product*cross_product_last < zero) then
+              exit corner_loop
+            else
+              cross_product_last = cross_product
+            endif
+
+          end do corner_loop
+
+          !***
+          !*** if cross products all positive, we found the location
+          !***
+
+          if (n > 4) then
+            src_add(1) = srch_add
+            src_add(2) = e_add
+            src_add(3) = ne_add
+            src_add(4) = n_add
+
+            return
+          endif
+
+          !***
+          !*** otherwise move on to next cell
+          !***
+
+        endif !bounding box check
+      end do srch_loop
+
+      !***
+      !*** if no cell found, point is likely either in a box that
+      !*** straddles either pole or is outside the grid.  fall back
+      !*** to a distance-weighted average of the four closest
+      !*** points.  go ahead and compute weights here, but store
+      !*** in src_lats and return -add to prevent the parent
+      !*** routine from computing bilinear weights
+      !***
+
+      coslat_dst = cos(plat)
+      sinlat_dst = sin(plat)
+      coslon_dst = cos(plon)
+      sinlon_dst = sin(plon)
+
+      dist_min = bignum
+      src_lats = bignum
+      do srch_add = min_add,max_add
+        distance = acos(coslat_dst*cos(src_center_lat(srch_add))*
+     &                 (coslon_dst*cos(src_center_lon(srch_add)) +
+     &                  sinlon_dst*sin(src_center_lon(srch_add)))+
+     &                  sinlat_dst*sin(src_center_lat(srch_add)))
+
+        if (distance < dist_min) then
+          sort_loop: do n=1,4
+            if (distance < src_lats(n)) then
+              do i=4,n+1,-1
+                src_add (i) = src_add (i-1)
+                src_lats(i) = src_lats(i-1)
+              end do
+              src_add (n) = -srch_add
+              src_lats(n) = distance
+              dist_min = src_lats(4)
+              exit sort_loop
+            endif
+          end do sort_loop
+        endif
+      end do
+
+      src_lons = one/(src_lats + tiny)
+      distance = sum(src_lons)
+      src_lats = src_lons/distance
+
+!-----------------------------------------------------------------------
+
+      end subroutine grid_search_bicub 
+
+!***********************************************************************
+
+      subroutine store_link_bicub(dst_add, src_add, weights, nmap)
+
+!-----------------------------------------------------------------------
+!
+!     this routine stores the address and weight for four links 
+!     associated with one destination point in the appropriate address 
+!     and weight arrays and resizes those arrays if necessary.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &        dst_add,  ! address on destination grid
+     &        nmap      ! identifies which direction for mapping
+
+      integer (kind=int_kind), dimension(4), intent(in) ::
+     &        src_add   ! addresses on source grid
+
+      real (kind=dbl_kind), dimension(4,4), intent(in) ::
+     &        weights ! array of remapping weights for these links
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, ! dummy index
+     &       num_links_old          ! placeholder for old link number
+
+!-----------------------------------------------------------------------
+!
+!     increment number of links and check to see if remap arrays need
+!     to be increased to accomodate the new link.  then store the
+!     link.
+!
+!-----------------------------------------------------------------------
+
+      select case (nmap)
+      case(1)
+
+        num_links_old  = num_links_map1
+        num_links_map1 = num_links_old + 4
+
+        if (num_links_map1 > max_links_map1) 
+     &     call resize_remap_vars(1,resize_increment)
+
+        do n=1,4
+          grid1_add_map1(num_links_old+n) = src_add(n)
+          grid2_add_map1(num_links_old+n) = dst_add
+          wts_map1    (:,num_links_old+n) = weights(:,n)
+        end do
+
+      case(2)
+
+        num_links_old  = num_links_map2
+        num_links_map2 = num_links_old + 4
+
+        if (num_links_map2 > max_links_map2) 
+     &     call resize_remap_vars(2,resize_increment)
+
+        do n=1,4
+          grid1_add_map2(num_links_old+n) = dst_add
+          grid2_add_map2(num_links_old+n) = src_add(n)
+          wts_map2    (:,num_links_old+n) = weights(:,n)
+        end do
+
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine store_link_bicub
+
+!***********************************************************************
+
+      end module remap_bicubic
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 781 - 0
interpolation/scrip_sources/remap_bilinear.f

@@ -0,0 +1,781 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this module contains necessary routines for performing an 
+!     bilinear interpolation.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_bilinear.f,v 1.6 2001/08/22 18:20:40 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_bilinear
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines common constants
+      use grids         ! module containing grid info
+      use remap_vars    ! module containing remap info
+
+      implicit none
+
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), parameter ::
+     &    max_iter = 100   ! max iteration count for i,j iteration
+
+      real (kind=dbl_kind), parameter ::
+     &     converge = 1.e-10_dbl_kind  ! convergence criterion
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine remap_bilin
+
+!-----------------------------------------------------------------------
+!
+!     this routine computes the weights for a bilinear interpolation.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n,icount,
+     &     dst_add,        ! destination address
+     &     iter,           ! iteration counter
+     &     nmap            ! index of current map being computed
+
+      integer (kind=int_kind), dimension(4) :: 
+     &     src_add         ! address for the four source points
+
+      real (kind=dbl_kind), dimension(4)  ::
+     &     src_lats,       ! latitudes  of four bilinear corners
+     &     src_lons,       ! longitudes of four bilinear corners
+     &     wgts            ! bilinear weights for four corners
+
+      real (kind=dbl_kind) ::
+     &     plat, plon,       ! lat/lon coords of destination point
+     &     iguess, jguess,   ! current guess for bilinear coordinate
+     &     thguess, phguess, ! current guess for lat/lon coordinate
+     &     deli, delj,       ! corrections to i,j
+     &     dth1, dth2, dth3, ! some latitude  differences
+     &     dph1, dph2, dph3, ! some longitude differences
+     &     dthp, dphp,       ! difference between point and sw corner
+     &     mat1, mat2, mat3, mat4, ! matrix elements
+     &     determinant,      ! matrix determinant
+     &     sum_wgts          ! sum of weights for normalization
+
+!-----------------------------------------------------------------------
+!
+!     compute mappings from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+      nmap = 1
+      if (grid1_rank /= 2) then
+        stop 'Can not do bilinear interpolation when grid_rank /= 2'
+      endif
+
+      !***
+      !*** loop over destination grid 
+      !***
+
+      grid_loop1: do dst_add = 1, grid2_size
+
+        if (.not. grid2_mask(dst_add)) cycle grid_loop1
+
+        plat = grid2_center_lat(dst_add)
+        plon = grid2_center_lon(dst_add)
+
+        !***
+        !*** find nearest square of grid points on source grid
+        !***
+
+        call grid_search_bilin(src_add, src_lats, src_lons, 
+     &                         plat, plon, grid1_dims,
+     &                         grid1_center_lat, grid1_center_lon,
+     &                         grid1_bound_box, bin_addr1, bin_addr2)
+
+        !***
+        !*** check to see if points are land points
+        !***
+
+        if (src_add(1) > 0) then
+          do n=1,4
+            if (.not. grid1_mask(src_add(n))) src_add(1) = 0
+          end do
+        endif
+
+        !***
+        !*** if point found, find local i,j coordinates for weights
+        !***
+
+        if (src_add(1) > 0) then
+
+          grid2_frac(dst_add) = one
+
+          !***
+          !*** iterate to find i,j for bilinear approximation
+          !***
+
+          dth1 = src_lats(2) - src_lats(1)
+          dth2 = src_lats(4) - src_lats(1)
+          dth3 = src_lats(3) - src_lats(2) - dth2
+
+          dph1 = src_lons(2) - src_lons(1)
+          dph2 = src_lons(4) - src_lons(1)
+          dph3 = src_lons(3) - src_lons(2)
+
+          if (dph1 >  three*pih) dph1 = dph1 - pi2
+          if (dph2 >  three*pih) dph2 = dph2 - pi2
+          if (dph3 >  three*pih) dph3 = dph3 - pi2
+          if (dph1 < -three*pih) dph1 = dph1 + pi2
+          if (dph2 < -three*pih) dph2 = dph2 + pi2
+          if (dph3 < -three*pih) dph3 = dph3 + pi2
+
+          dph3 = dph3 - dph2
+
+          iguess = half
+          jguess = half
+
+          iter_loop1: do iter=1,max_iter
+
+            dthp = plat - src_lats(1) - dth1*iguess -
+     &                    dth2*jguess - dth3*iguess*jguess
+            dphp = plon - src_lons(1)
+
+            if (dphp >  three*pih) dphp = dphp - pi2
+            if (dphp < -three*pih) dphp = dphp + pi2
+
+            dphp = dphp - dph1*iguess - dph2*jguess - 
+     &                    dph3*iguess*jguess
+
+            mat1 = dth1 + dth3*jguess
+            mat2 = dth2 + dth3*iguess
+            mat3 = dph1 + dph3*jguess
+            mat4 = dph2 + dph3*iguess
+
+            determinant = mat1*mat4 - mat2*mat3
+
+            deli = (dthp*mat4 - mat2*dphp)/determinant
+            delj = (mat1*dphp - dthp*mat3)/determinant
+
+            if (abs(deli) < converge .and. 
+     &          abs(delj) < converge) exit iter_loop1
+
+            iguess = iguess + deli
+            jguess = jguess + delj
+
+          end do iter_loop1
+
+          if (iter <= max_iter) then
+
+            !***
+            !*** successfully found i,j - compute weights
+            !***
+
+            wgts(1) = (one-iguess)*(one-jguess)
+            wgts(2) = iguess*(one-jguess)
+            wgts(3) = iguess*jguess
+            wgts(4) = (one-iguess)*jguess
+
+            call store_link_bilin(dst_add, src_add, wgts, nmap)
+
+          else
+            print *,'Point coords: ',plat,plon
+            print *,'Dest grid lats: ',src_lats
+            print *,'Dest grid lons: ',src_lons
+            print *,'Dest grid addresses: ',src_add
+            print *,'Current i,j : ',iguess, jguess
+            stop 'Iteration for i,j exceed max iteration count'
+          endif
+
+        !***
+        !*** search for bilinear failed - use a distance-weighted
+        !*** average instead (this is typically near the pole)
+        !***
+
+        else if (src_add(1) < 0) then
+
+          src_add = abs(src_add)
+          icount = 0
+          do n=1,4
+            if (grid1_mask(src_add(n))) then
+              icount = icount + 1
+            else
+              src_lats(n) = zero
+            endif
+          end do
+
+          if (icount > 0) then
+            !*** renormalize weights
+
+            sum_wgts = sum(src_lats)
+            wgts(1) = src_lats(1)/sum_wgts
+            wgts(2) = src_lats(2)/sum_wgts
+            wgts(3) = src_lats(3)/sum_wgts
+            wgts(4) = src_lats(4)/sum_wgts
+
+            grid2_frac(dst_add) = one
+            call store_link_bilin(dst_add, src_add, wgts, nmap)
+          endif
+
+        endif
+      end do grid_loop1
+
+!-----------------------------------------------------------------------
+!
+!     compute mappings from grid2 to grid1 if necessary
+!
+!-----------------------------------------------------------------------
+
+      if (num_maps > 1) then
+
+      nmap = 2
+      if (grid2_rank /= 2) then
+        stop 'Can not do bilinear interpolation when grid_rank /= 2'
+      endif
+
+      !***
+      !*** loop over destination grid 
+      !***
+
+      grid_loop2: do dst_add = 1, grid1_size
+
+        if (.not. grid1_mask(dst_add)) cycle grid_loop2
+
+        plat = grid1_center_lat(dst_add)
+        plon = grid1_center_lon(dst_add)
+
+        !***
+        !*** find nearest square of grid points on source grid
+        !***
+
+        call grid_search_bilin(src_add, src_lats, src_lons, 
+     &                         plat, plon, grid2_dims,
+     &                         grid2_center_lat, grid2_center_lon,
+     &                         grid2_bound_box, bin_addr2, bin_addr1)
+
+        !***
+        !*** check to see if points are land points
+        !***
+
+        if (src_add(1) > 0) then
+          do n=1,4
+            if (.not. grid2_mask(src_add(n))) src_add(1) = 0
+          end do
+        endif
+
+        !***
+        !*** if point found, find i,j coordinates for weights
+        !***
+
+        if (src_add(1) > 0) then
+
+          grid1_frac(dst_add) = one
+
+          !***
+          !*** iterate to find i,j for bilinear approximation
+          !***
+
+          dth1 = src_lats(2) - src_lats(1)
+          dth2 = src_lats(4) - src_lats(1)
+          dth3 = src_lats(3) - src_lats(2) - dth2
+
+          dph1 = src_lons(2) - src_lons(1)
+          dph2 = src_lons(4) - src_lons(1)
+          dph3 = src_lons(3) - src_lons(2)
+
+          if (dph1 >  pi) dph1 = dph1 - pi2
+          if (dph2 >  pi) dph2 = dph2 - pi2
+          if (dph3 >  pi) dph3 = dph3 - pi2
+          if (dph1 < -pi) dph1 = dph1 + pi2
+          if (dph2 < -pi) dph2 = dph2 + pi2
+          if (dph3 < -pi) dph3 = dph3 + pi2
+
+          dph3 = dph3 - dph2
+
+          iguess = zero
+          jguess = zero
+
+          iter_loop2: do iter=1,max_iter
+
+            dthp = plat - src_lats(1) - dth1*iguess -
+     &                    dth2*jguess - dth3*iguess*jguess
+            dphp = plon - src_lons(1)
+
+            if (dphp >  pi) dphp = dphp - pi2
+            if (dphp < -pi) dphp = dphp + pi2
+
+            dphp = dphp - dph1*iguess - dph2*jguess - 
+     &                    dph3*iguess*jguess
+
+            mat1 = dth1 + dth3*jguess
+            mat2 = dth2 + dth3*iguess
+            mat3 = dph1 + dph3*jguess
+            mat4 = dph2 + dph3*iguess
+
+            determinant = mat1*mat4 - mat2*mat3
+
+            deli = (dthp*mat4 - mat2*dphp)/determinant
+            delj = (mat1*dphp - dthp*mat3)/determinant
+
+            if (abs(deli) < converge .and. 
+     &          abs(delj) < converge) exit iter_loop2
+
+            iguess = iguess + deli
+            jguess = jguess + delj
+
+          end do iter_loop2
+
+          if (iter <= max_iter) then
+
+            !***
+            !*** successfully found i,j - compute weights
+            !***
+
+            wgts(1) = (one-iguess)*(one-jguess)
+            wgts(2) = iguess*(one-jguess)
+            wgts(3) = iguess*jguess
+            wgts(4) = (one-iguess)*jguess
+
+            call store_link_bilin(dst_add, src_add, wgts, nmap)
+
+          else
+            print *,'Point coords: ',plat,plon
+            print *,'Dest grid lats: ',src_lats
+            print *,'Dest grid lons: ',src_lons
+            print *,'Dest grid addresses: ',src_add
+            print *,'Current i,j : ',iguess, jguess
+            stop 'Iteration for i,j exceed max iteration count'
+          endif
+
+        !***
+        !*** search for bilinear failed - us a distance-weighted
+        !*** average instead
+        !***
+
+        else if (src_add(1) < 0) then
+
+          src_add = abs(src_add)
+          icount = 0
+          do n=1,4
+            if (grid2_mask(src_add(n))) then
+              icount = icount + 1
+            else
+              src_lats(n) = zero
+            endif
+          end do
+
+          if (icount > 0) then
+            !*** renormalize weights
+
+            sum_wgts = sum(src_lats)
+            wgts(1) = src_lats(1)/sum_wgts
+            wgts(2) = src_lats(2)/sum_wgts
+            wgts(3) = src_lats(3)/sum_wgts
+            wgts(4) = src_lats(4)/sum_wgts
+
+            grid1_frac(dst_add) = one
+            call store_link_bilin(dst_add, src_add, wgts, nmap)
+          endif
+
+        endif
+      end do grid_loop2
+
+      endif ! nmap=2
+
+!-----------------------------------------------------------------------
+
+      end subroutine remap_bilin
+
+!***********************************************************************
+
+      subroutine grid_search_bilin(src_add, src_lats, src_lons, 
+     &                             plat, plon, src_grid_dims,
+     &                             src_center_lat, src_center_lon,
+     &                             src_grid_bound_box,
+     &                             src_bin_add, dst_bin_add)
+
+!-----------------------------------------------------------------------
+!
+!     this routine finds the location of the search point plat, plon
+!     in the source grid and returns the corners needed for a bilinear
+!     interpolation.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     output variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), dimension(4), intent(out) ::
+     &        src_add  ! address of each corner point enclosing P
+
+      real (kind=dbl_kind), dimension(4), intent(out) ::
+     &        src_lats, ! latitudes  of the four corner points
+     &        src_lons  ! longitudes of the four corner points
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      real (kind=dbl_kind), intent(in) ::
+     &        plat,   ! latitude  of the search point
+     &        plon    ! longitude of the search point
+
+      integer (kind=int_kind), dimension(2), intent(in) ::
+     &        src_grid_dims  ! size of each src grid dimension
+
+      real (kind=dbl_kind), dimension(:), intent(in) ::
+     &        src_center_lat, ! latitude  of each src grid center 
+     &        src_center_lon  ! longitude of each src grid center
+
+      real (kind=dbl_kind), dimension(:,:), intent(in) ::
+     &        src_grid_bound_box ! bound box for source grid
+
+      integer (kind=int_kind), dimension(:,:), intent(in) ::
+     &        src_bin_add,    ! latitude bins for restricting
+     &        dst_bin_add     ! searches
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, next_n, srch_add,   ! dummy indices
+     &    nx, ny,            ! dimensions of src grid
+     &    min_add, max_add,  ! addresses for restricting search
+     &    i, j, jp1, ip1, n_add, e_add, ne_add  ! addresses
+
+      real (kind=dbl_kind) ::  ! vectors for cross-product check
+     &      vec1_lat, vec1_lon,
+     &      vec2_lat, vec2_lon, cross_product, cross_product_last,
+     &      coslat_dst, sinlat_dst, coslon_dst, sinlon_dst,
+     &      dist_min, distance ! for computing dist-weighted avg
+
+!-----------------------------------------------------------------------
+!
+!     restrict search first using bins
+!
+!-----------------------------------------------------------------------
+
+      src_add = 0
+
+      min_add = size(src_center_lat)
+      max_add = 1
+      do n=1,num_srch_bins
+        if (plat >= bin_lats(1,n) .and. plat <= bin_lats(2,n) .and.
+     &      plon >= bin_lons(1,n) .and. plon <= bin_lons(2,n)) then
+          min_add = min(min_add, src_bin_add(1,n))
+          max_add = max(max_add, src_bin_add(2,n))
+        endif
+      end do
+ 
+!-----------------------------------------------------------------------
+!
+!     now perform a more detailed search 
+!
+!-----------------------------------------------------------------------
+
+      nx = src_grid_dims(1)
+      ny = src_grid_dims(2)
+
+      srch_loop: do srch_add = min_add,max_add
+
+        !*** first check bounding box
+
+        if (plat <= src_grid_bound_box(2,srch_add) .and. 
+     &      plat >= src_grid_bound_box(1,srch_add) .and.
+     &      plon <= src_grid_bound_box(4,srch_add) .and. 
+     &      plon >= src_grid_bound_box(3,srch_add)) then
+
+          !***
+          !*** we are within bounding box so get really serious
+          !***
+
+          !*** determine neighbor addresses
+
+          j = (srch_add - 1)/nx +1
+          i = srch_add - (j-1)*nx
+
+          if (i < nx) then
+            ip1 = i + 1
+          else
+            ip1 = 1
+          endif
+
+          if (j < ny) then
+            jp1 = j+1
+          else
+            jp1 = 1
+          endif
+
+          n_add = (jp1 - 1)*nx + i
+          e_add = (j - 1)*nx + ip1
+          ne_add = (jp1 - 1)*nx + ip1
+
+          src_lats(1) = src_center_lat(srch_add)
+          src_lats(2) = src_center_lat(e_add)
+          src_lats(3) = src_center_lat(ne_add)
+          src_lats(4) = src_center_lat(n_add)
+
+          src_lons(1) = src_center_lon(srch_add)
+          src_lons(2) = src_center_lon(e_add)
+          src_lons(3) = src_center_lon(ne_add)
+          src_lons(4) = src_center_lon(n_add)
+
+          !***
+          !*** for consistency, we must make sure all lons are in
+          !*** same 2pi interval
+          !***
+
+          vec1_lon = src_lons(1) - plon
+          if (vec1_lon >  pi) then
+            src_lons(1) = src_lons(1) - pi2
+          else if (vec1_lon < -pi) then
+            src_lons(1) = src_lons(1) + pi2
+          endif
+          do n=2,4
+            vec1_lon = src_lons(n) - src_lons(1)
+            if (vec1_lon >  pi) then
+              src_lons(n) = src_lons(n) - pi2
+            else if (vec1_lon < -pi) then
+              src_lons(n) = src_lons(n) + pi2
+            endif
+          end do
+
+          corner_loop: do n=1,4
+            next_n = MOD(n,4) + 1
+
+            !***
+            !*** here we take the cross product of the vector making 
+            !*** up each box side with the vector formed by the vertex
+            !*** and search point.  if all the cross products are 
+            !*** positive, the point is contained in the box.
+            !***
+
+            vec1_lat = src_lats(next_n) - src_lats(n)
+            vec1_lon = src_lons(next_n) - src_lons(n)
+            vec2_lat = plat - src_lats(n)
+            vec2_lon = plon - src_lons(n)
+
+            !***
+            !*** check for 0,2pi crossings
+            !***
+
+            if (vec1_lon >  three*pih) then
+              vec1_lon = vec1_lon - pi2
+            else if (vec1_lon < -three*pih) then
+              vec1_lon = vec1_lon + pi2
+            endif
+            if (vec2_lon >  three*pih) then
+              vec2_lon = vec2_lon - pi2
+            else if (vec2_lon < -three*pih) then
+              vec2_lon = vec2_lon + pi2
+            endif
+
+            cross_product = vec1_lon*vec2_lat - vec2_lon*vec1_lat
+
+            !***
+            !*** if cross product is less than zero, this cell
+            !*** doesn't work
+            !***
+
+            if (n == 1) cross_product_last = cross_product
+            if (cross_product*cross_product_last < zero) 
+     &          exit corner_loop
+            cross_product_last = cross_product
+
+          end do corner_loop
+
+          !***
+          !*** if cross products all same sign, we found the location
+          !***
+
+          if (n > 4) then
+            src_add(1) = srch_add
+            src_add(2) = e_add
+            src_add(3) = ne_add
+            src_add(4) = n_add
+
+            return
+          endif
+
+          !***
+          !*** otherwise move on to next cell
+          !***
+
+        endif !bounding box check
+      end do srch_loop
+
+      !***
+      !*** if no cell found, point is likely either in a box that
+      !*** straddles either pole or is outside the grid.  fall back
+      !*** to a distance-weighted average of the four closest
+      !*** points.  go ahead and compute weights here, but store
+      !*** in src_lats and return -add to prevent the parent
+      !*** routine from computing bilinear weights
+      !***
+
+      !print *,'Could not find location for ',plat,plon
+      !print *,'Using nearest-neighbor average for this point'
+
+      coslat_dst = cos(plat)
+      sinlat_dst = sin(plat)
+      coslon_dst = cos(plon)
+      sinlon_dst = sin(plon)
+
+      dist_min = bignum
+      src_lats = bignum
+      do srch_add = min_add,max_add
+        distance = acos(coslat_dst*cos(src_center_lat(srch_add))*
+     &                 (coslon_dst*cos(src_center_lon(srch_add)) +
+     &                  sinlon_dst*sin(src_center_lon(srch_add)))+
+     &                  sinlat_dst*sin(src_center_lat(srch_add)))
+
+        if (distance < dist_min) then
+          sort_loop: do n=1,4
+            if (distance < src_lats(n)) then
+              do i=4,n+1,-1
+                src_add (i) = src_add (i-1)
+                src_lats(i) = src_lats(i-1)
+              end do
+              src_add (n) = -srch_add
+              src_lats(n) = distance
+              dist_min = src_lats(4)
+              exit sort_loop
+            endif
+          end do sort_loop
+        endif
+      end do
+
+      src_lons = one/(src_lats + tiny)
+      distance = sum(src_lons)
+      src_lats = src_lons/distance
+
+!-----------------------------------------------------------------------
+
+      end subroutine grid_search_bilin 
+
+!***********************************************************************
+
+      subroutine store_link_bilin(dst_add, src_add, weights, nmap)
+
+!-----------------------------------------------------------------------
+!
+!     this routine stores the address and weight for four links 
+!     associated with one destination point in the appropriate address 
+!     and weight arrays and resizes those arrays if necessary.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &        dst_add,  ! address on destination grid
+     &        nmap      ! identifies which direction for mapping
+
+      integer (kind=int_kind), dimension(4), intent(in) ::
+     &        src_add   ! addresses on source grid
+
+      real (kind=dbl_kind), dimension(4), intent(in) ::
+     &        weights ! array of remapping weights for these links
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, ! dummy index
+     &       num_links_old          ! placeholder for old link number
+
+!-----------------------------------------------------------------------
+!
+!     increment number of links and check to see if remap arrays need
+!     to be increased to accomodate the new link.  then store the
+!     link.
+!
+!-----------------------------------------------------------------------
+
+      select case (nmap)
+      case(1)
+
+        num_links_old  = num_links_map1
+        num_links_map1 = num_links_old + 4
+
+        if (num_links_map1 > max_links_map1) 
+     &     call resize_remap_vars(1,resize_increment)
+
+        do n=1,4
+          grid1_add_map1(num_links_old+n) = src_add(n)
+          grid2_add_map1(num_links_old+n) = dst_add
+          wts_map1    (1,num_links_old+n) = weights(n)
+        end do
+
+      case(2)
+
+        num_links_old  = num_links_map2
+        num_links_map2 = num_links_old + 4
+
+        if (num_links_map2 > max_links_map2) 
+     &     call resize_remap_vars(2,resize_increment)
+
+        do n=1,4
+          grid1_add_map2(num_links_old+n) = dst_add
+          grid2_add_map2(num_links_old+n) = src_add(n)
+          wts_map2    (1,num_links_old+n) = weights(n)
+        end do
+
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine store_link_bilin
+
+!***********************************************************************
+
+      end module remap_bilinear
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 2197 - 0
interpolation/scrip_sources/remap_conserv.f

@@ -0,0 +1,2197 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this module contains necessary routines for computing addresses
+!     and weights for a conservative interpolation  between any two 
+!     grids on a sphere.  the weights are computed by performing line 
+!     integrals around all overlap regions of the two grids.  see 
+!     Dukowicz and Kodis, SIAM J. Sci. Stat. Comput. 8, 305 (1987) and
+!     Jones, P.W. Monthly Weather Review (submitted).
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_conserv.f,v 1.10 2001/08/21 21:05:13 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_conservative
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines common data types
+      use constants    ! defines common constants
+      use timers       ! module for timing
+      use grids        ! module containing grid information
+      use remap_vars   ! module containing remap information
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     module variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), save :: 
+     &        num_srch_cells ! num cells in restricted search arrays
+
+      integer (kind=int_kind), dimension(:), allocatable, save :: 
+     &        srch_add       ! global address of cells in srch arrays
+
+      real (kind=dbl_kind), parameter :: 
+     &     north_thresh = 1.45_dbl_kind, ! threshold for coord transf.
+     &     south_thresh =-2.00_dbl_kind  ! threshold for coord transf.
+
+      real (kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &     srch_corner_lat,  ! lat of each corner of srch cells
+     &     srch_corner_lon   ! lon of each corner of srch cells
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine remap_conserv
+
+!-----------------------------------------------------------------------
+!
+!     this routine traces the perimeters of every grid cell on each
+!     grid checking for intersections with the other grid and computing
+!     line integrals for each subsegment.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), parameter :: 
+     &        max_subseg = 10000 ! max number of subsegments per segment
+                                 ! to prevent infinite loop
+
+      integer (kind=int_kind) :: 
+     &        grid1_add,  ! current linear address for grid1 cell
+     &        grid2_add,  ! current linear address for grid2 cell
+     &        min_add,    ! addresses for restricting search of
+     &        max_add,    !   destination grid
+     &        n, nwgt,    ! generic counters
+     &        corner,     ! corner of cell that segment starts from
+     &        next_corn,  ! corner of cell that segment ends on
+     &        num_subseg  ! number of subsegments 
+
+      logical (kind=log_kind) :: 
+     &        lcoinc,  ! flag for coincident segments
+     &        lrevers, ! flag for reversing direction of segment
+     &        lbegin   ! flag for first integration of a segment
+
+      logical (kind=log_kind), dimension(:), allocatable ::
+     &        srch_mask   ! mask for restricting searches
+
+      real (kind=dbl_kind) ::
+     &     intrsct_lat, intrsct_lon,       ! lat/lon of next intersect
+     &     beglat, endlat, beglon, endlon, ! endpoints of current seg.
+     &     norm_factor                     ! factor for normalizing wts
+
+      real (kind=dbl_kind), dimension(:), allocatable ::
+     &       grid2_centroid_lat, grid2_centroid_lon, ! centroid coords
+     &       grid1_centroid_lat, grid1_centroid_lon  ! on each grid
+
+      real (kind=dbl_kind), dimension(2) :: begseg ! begin lat/lon for
+                                                   ! full segment
+
+      real (kind=dbl_kind), dimension(6) :: weights ! local wgt array
+
+!-----------------------------------------------------------------------
+!
+!     initialize centroid arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_centroid_lat(grid1_size),
+     &          grid1_centroid_lon(grid1_size),
+     &          grid2_centroid_lat(grid2_size),
+     &          grid2_centroid_lon(grid2_size))
+
+      grid1_centroid_lat = zero
+      grid1_centroid_lon = zero
+      grid2_centroid_lat = zero
+      grid2_centroid_lon = zero
+
+!-----------------------------------------------------------------------
+!
+!     integrate around each cell on grid1
+!
+!-----------------------------------------------------------------------
+
+      allocate(srch_mask(grid2_size))
+
+      print *,'grid1 sweep '
+      do grid1_add = 1,grid1_size
+
+        !***
+        !*** restrict searches first using search bins
+        !***
+
+        call timer_start(1)
+        min_add = grid2_size
+        max_add = 1
+        do n=1,num_srch_bins
+          if (grid1_add >= bin_addr1(1,n) .and.
+     &        grid1_add <= bin_addr1(2,n)) then
+            min_add = min(min_add, bin_addr2(1,n))
+            max_add = max(max_add, bin_addr2(2,n))
+          endif
+        end do
+
+        !***
+        !*** further restrict searches using bounding boxes
+        !***
+
+        num_srch_cells = 0
+        do grid2_add = min_add,max_add
+          srch_mask(grid2_add) = (grid2_bound_box(1,grid2_add) <= 
+     &                            grid1_bound_box(2,grid1_add)) .and.
+     &                           (grid2_bound_box(2,grid2_add) >= 
+     &                            grid1_bound_box(1,grid1_add)) .and.
+     &                           (grid2_bound_box(3,grid2_add) <= 
+     &                            grid1_bound_box(4,grid1_add)) .and.
+     &                           (grid2_bound_box(4,grid2_add) >= 
+     &                            grid1_bound_box(3,grid1_add))
+
+          if (srch_mask(grid2_add)) num_srch_cells = num_srch_cells+1
+        end do
+
+        !***
+        !*** create search arrays
+        !***
+
+        allocate(srch_add(num_srch_cells),
+     &           srch_corner_lat(grid2_corners,num_srch_cells),
+     &           srch_corner_lon(grid2_corners,num_srch_cells))
+
+        n = 0
+        gather1: do grid2_add = min_add,max_add
+          if (srch_mask(grid2_add)) then
+            n = n+1
+            srch_add(n) = grid2_add
+            srch_corner_lat(:,n) = grid2_corner_lat(:,grid2_add)
+            srch_corner_lon(:,n) = grid2_corner_lon(:,grid2_add)
+          endif
+        end do gather1
+        call timer_stop(1)
+
+        !***
+        !*** integrate around this cell
+        !***
+
+        do corner = 1,grid1_corners
+          next_corn = mod(corner,grid1_corners) + 1
+
+          !***
+          !*** define endpoints of the current segment
+          !***
+
+          beglat = grid1_corner_lat(corner,grid1_add)
+          beglon = grid1_corner_lon(corner,grid1_add)
+          endlat = grid1_corner_lat(next_corn,grid1_add)
+          endlon = grid1_corner_lon(next_corn,grid1_add)
+          lrevers = .false.
+
+          !***
+          !*** to ensure exact path taken during both
+          !*** sweeps, always integrate segments in the same 
+          !*** direction (SW to NE).
+          !***
+
+          if ((endlat < beglat) .or.
+     &        (endlat == beglat .and. endlon < beglon)) then 
+            beglat = grid1_corner_lat(next_corn,grid1_add)
+            beglon = grid1_corner_lon(next_corn,grid1_add)
+            endlat = grid1_corner_lat(corner,grid1_add)
+            endlon = grid1_corner_lon(corner,grid1_add)
+            lrevers = .true.
+          endif
+
+          begseg(1) = beglat
+          begseg(2) = beglon
+          lbegin = .true.
+          num_subseg = 0
+
+          !***
+          !*** if this is a constant-longitude segment, skip the rest 
+          !*** since the line integral contribution will be zero.
+          !***
+
+          if (endlon /= beglon) then
+
+          !***
+          !*** integrate along this segment, detecting intersections 
+          !*** and computing the line integral for each sub-segment
+          !***
+
+          do while (beglat /= endlat .or. beglon /= endlon)
+
+            !***
+            !*** prevent infinite loops if integration gets stuck
+            !*** near cell or threshold boundary
+            !***
+
+            num_subseg = num_subseg + 1
+            if (num_subseg > max_subseg) then
+              stop 'integration stalled: num_subseg exceeded limit'
+            endif
+
+            !***
+            !*** find next intersection of this segment with a grid
+            !*** line on grid 2.
+            !***
+
+            call timer_start(2)
+            call intersection(grid2_add,intrsct_lat,intrsct_lon,lcoinc,
+     &                        beglat, beglon, endlat, endlon, begseg, 
+     &                        lbegin, lrevers)
+            call timer_stop(2)
+            lbegin = .false.
+
+            !***
+            !*** compute line integral for this subsegment.
+            !***
+
+            call timer_start(3)
+            if (grid2_add /= 0) then
+              call line_integral(weights, num_wts,
+     &                         beglon, intrsct_lon, beglat, intrsct_lat,
+     &                         grid1_center_lat(grid1_add), 
+     &                         grid1_center_lon(grid1_add),
+     &                         grid2_center_lat(grid2_add), 
+     &                         grid2_center_lon(grid2_add))
+            else
+              call line_integral(weights, num_wts,
+     &                         beglon, intrsct_lon, beglat, intrsct_lat,
+     &                         grid1_center_lat(grid1_add), 
+     &                         grid1_center_lon(grid1_add),
+     &                         grid1_center_lat(grid1_add), 
+     &                         grid1_center_lon(grid1_add))
+            endif
+            call timer_stop(3)
+
+            !***
+            !*** if integrating in reverse order, change
+            !*** sign of weights
+            !***
+
+            if (lrevers) then
+              weights = -weights
+            endif
+
+            !***
+            !*** store the appropriate addresses and weights. 
+            !*** also add contributions to cell areas and centroids.
+            !***
+
+            !if (grid1_add == 119247) then
+            !  print *,grid1_add,grid2_add,corner,weights(1)
+            !  print *,grid1_corner_lat(:,grid1_add)
+            !  print *,grid1_corner_lon(:,grid1_add)
+            !  print *,grid2_corner_lat(:,grid2_add)
+            !  print *,grid2_corner_lon(:,grid2_add)
+            !  print *,beglat,beglon,intrsct_lat,intrsct_lon
+            !endif
+
+            if (grid2_add /= 0) then
+              if (grid1_mask(grid1_add)) then
+                call timer_start(4)
+                call store_link_cnsrv(grid1_add, grid2_add, weights)
+                call timer_stop(4)
+                grid1_frac(grid1_add) = grid1_frac(grid1_add) + 
+     &                                  weights(1)
+                grid2_frac(grid2_add) = grid2_frac(grid2_add) + 
+     &                                  weights(num_wts+1)
+              endif
+
+            endif
+
+            grid1_area(grid1_add) = grid1_area(grid1_add) + weights(1)
+            grid1_centroid_lat(grid1_add) = 
+     &      grid1_centroid_lat(grid1_add) + weights(2)
+            grid1_centroid_lon(grid1_add) = 
+     &      grid1_centroid_lon(grid1_add) + weights(3)
+
+            !***
+            !*** reset beglat and beglon for next subsegment.
+            !***
+
+            beglat = intrsct_lat
+            beglon = intrsct_lon
+          end do
+
+          endif
+
+          !***
+          !*** end of segment
+          !***
+
+        end do
+
+        !***
+        !*** finished with this cell: deallocate search array and
+        !*** start on next cell
+
+        deallocate(srch_add, srch_corner_lat, srch_corner_lon)
+
+      end do
+
+      deallocate(srch_mask)
+
+!-----------------------------------------------------------------------
+!
+!     integrate around each cell on grid2
+!
+!-----------------------------------------------------------------------
+
+      allocate(srch_mask(grid1_size))
+
+      print *,'grid2 sweep '
+      do grid2_add = 1,grid2_size
+
+        !***
+        !*** restrict searches first using search bins
+        !***
+
+        call timer_start(5)
+        min_add = grid1_size
+        max_add = 1
+        do n=1,num_srch_bins
+          if (grid2_add >= bin_addr2(1,n) .and.
+     &        grid2_add <= bin_addr2(2,n)) then
+            min_add = min(min_add, bin_addr1(1,n))
+            max_add = max(max_add, bin_addr1(2,n))
+          endif
+        end do
+
+        !***
+        !*** further restrict searches using bounding boxes
+        !***
+
+        num_srch_cells = 0
+        do grid1_add = min_add, max_add
+          srch_mask(grid1_add) = (grid1_bound_box(1,grid1_add) <= 
+     &                            grid2_bound_box(2,grid2_add)) .and.
+     &                           (grid1_bound_box(2,grid1_add) >= 
+     &                            grid2_bound_box(1,grid2_add)) .and.
+     &                           (grid1_bound_box(3,grid1_add) <= 
+     &                            grid2_bound_box(4,grid2_add)) .and.
+     &                           (grid1_bound_box(4,grid1_add) >= 
+     &                            grid2_bound_box(3,grid2_add))
+
+          if (srch_mask(grid1_add)) num_srch_cells = num_srch_cells+1
+        end do
+
+        allocate(srch_add(num_srch_cells),
+     &           srch_corner_lat(grid1_corners,num_srch_cells),
+     &           srch_corner_lon(grid1_corners,num_srch_cells))
+
+        n = 0
+        gather2: do grid1_add = min_add,max_add
+          if (srch_mask(grid1_add)) then
+            n = n+1
+            srch_add(n) = grid1_add
+            srch_corner_lat(:,n) = grid1_corner_lat(:,grid1_add)
+            srch_corner_lon(:,n) = grid1_corner_lon(:,grid1_add)
+          endif
+        end do gather2
+        call timer_stop(5)
+
+        !***
+        !*** integrate around this cell
+        !***
+
+        do corner = 1,grid2_corners
+          next_corn = mod(corner,grid2_corners) + 1
+
+          beglat = grid2_corner_lat(corner,grid2_add)
+          beglon = grid2_corner_lon(corner,grid2_add)
+          endlat = grid2_corner_lat(next_corn,grid2_add)
+          endlon = grid2_corner_lon(next_corn,grid2_add)
+          lrevers = .false.
+
+          !***
+          !*** to ensure exact path taken during both
+          !*** sweeps, always integrate in the same direction
+          !***
+
+          if ((endlat < beglat) .or.
+     &        (endlat == beglat .and. endlon < beglon)) then 
+            beglat = grid2_corner_lat(next_corn,grid2_add)
+            beglon = grid2_corner_lon(next_corn,grid2_add)
+            endlat = grid2_corner_lat(corner,grid2_add)
+            endlon = grid2_corner_lon(corner,grid2_add)
+            lrevers = .true.
+          endif
+
+          begseg(1) = beglat
+          begseg(2) = beglon
+          lbegin = .true.
+
+          !***
+          !*** if this is a constant-longitude segment, skip the rest 
+          !*** since the line integral contribution will be zero.
+          !***
+
+          if (endlon /= beglon) then
+          num_subseg = 0
+
+          !***
+          !*** integrate along this segment, detecting intersections 
+          !*** and computing the line integral for each sub-segment
+          !***
+
+          do while (beglat /= endlat .or. beglon /= endlon)
+
+            !***
+            !*** prevent infinite loops if integration gets stuck
+            !*** near cell or threshold boundary
+            !***
+
+            num_subseg = num_subseg + 1
+            if (num_subseg > max_subseg) then
+              stop 'integration stalled: num_subseg exceeded limit'
+            endif
+
+            !***
+            !*** find next intersection of this segment with a line 
+            !*** on grid 2.
+            !***
+
+            call timer_start(6)
+            call intersection(grid1_add,intrsct_lat,intrsct_lon,lcoinc,
+     &                        beglat, beglon, endlat, endlon, begseg,
+     &                        lbegin, lrevers)
+            call timer_stop(6)
+            lbegin = .false.
+
+            !***
+            !*** compute line integral for this subsegment.
+            !***
+
+            call timer_start(7)
+            if (grid1_add /= 0) then
+              call line_integral(weights, num_wts,
+     &                         beglon, intrsct_lon, beglat, intrsct_lat,
+     &                         grid1_center_lat(grid1_add), 
+     &                         grid1_center_lon(grid1_add),
+     &                         grid2_center_lat(grid2_add), 
+     &                         grid2_center_lon(grid2_add))
+            else
+              call line_integral(weights, num_wts,
+     &                         beglon, intrsct_lon, beglat, intrsct_lat,
+     &                         grid2_center_lat(grid2_add), 
+     &                         grid2_center_lon(grid2_add),
+     &                         grid2_center_lat(grid2_add), 
+     &                         grid2_center_lon(grid2_add))
+            endif
+            call timer_stop(7)
+
+            if (lrevers) then
+              weights = -weights
+            endif
+
+            !***
+            !*** store the appropriate addresses and weights. 
+            !*** also add contributions to cell areas and centroids.
+            !*** if there is a coincidence, do not store weights
+            !*** because they have been captured in the previous loop.
+            !*** the grid1 mask is the master mask
+            !***
+
+            !if (grid1_add == 119247) then
+            !  print *,grid1_add,grid2_add,corner,weights(1)
+            !  print *,grid1_corner_lat(:,grid1_add)
+            !  print *,grid1_corner_lon(:,grid1_add)
+            !  print *,grid2_corner_lat(:,grid2_add)
+            !  print *,grid2_corner_lon(:,grid2_add)
+            !  print *,beglat,beglon,intrsct_lat,intrsct_lon
+            !endif
+
+            if (.not. lcoinc .and. grid1_add /= 0) then
+              if (grid1_mask(grid1_add)) then
+                call timer_start(8)
+                call store_link_cnsrv(grid1_add, grid2_add, weights)
+                call timer_stop(8)
+                grid1_frac(grid1_add) = grid1_frac(grid1_add) + 
+     &                                  weights(1)
+                grid2_frac(grid2_add) = grid2_frac(grid2_add) + 
+     &                                  weights(num_wts+1)
+              endif
+
+            endif
+
+            grid2_area(grid2_add) = grid2_area(grid2_add) + 
+     &                                      weights(num_wts+1)
+            grid2_centroid_lat(grid2_add) = 
+     &      grid2_centroid_lat(grid2_add) + weights(num_wts+2)
+            grid2_centroid_lon(grid2_add) = 
+     &      grid2_centroid_lon(grid2_add) + weights(num_wts+3)
+
+            !***
+            !*** reset beglat and beglon for next subsegment.
+            !***
+
+            beglat = intrsct_lat
+            beglon = intrsct_lon
+          end do
+
+          endif
+
+          !***
+          !*** end of segment
+          !***
+
+        end do
+
+        !***
+        !*** finished with this cell: deallocate search array and
+        !*** start on next cell
+
+        deallocate(srch_add, srch_corner_lat, srch_corner_lon)
+
+      end do
+
+      deallocate(srch_mask)
+
+!-----------------------------------------------------------------------
+!
+!     correct for situations where N/S pole not explicitly included in
+!     grid (i.e. as a grid corner point). if pole is missing from only
+!     one grid, need to correct only the area and centroid of that 
+!     grid.  if missing from both, do complete weight calculation.
+!
+!-----------------------------------------------------------------------
+
+      !*** North Pole
+      weights(1) =  pi2
+      weights(2) =  pi*pi
+      weights(3) =  zero
+      weights(4) =  pi2
+      weights(5) =  pi*pi
+      weights(6) =  zero
+
+      grid1_add = 0
+      pole_loop1: do n=1,grid1_size
+        if (grid1_area(n) < -three*pih .and.
+     &      grid1_center_lat(n) > zero) then
+          grid1_add = n
+          exit pole_loop1
+        endif
+      end do pole_loop1
+
+      grid2_add = 0
+      pole_loop2: do n=1,grid2_size
+        if (grid2_area(n) < -three*pih .and.
+     &      grid2_center_lat(n) > zero) then
+          grid2_add = n
+          exit pole_loop2
+        endif
+      end do pole_loop2
+
+      if (grid1_add /=0) then
+        grid1_area(grid1_add) = grid1_area(grid1_add) + weights(1)
+        grid1_centroid_lat(grid1_add) = 
+     &  grid1_centroid_lat(grid1_add) + weights(2)
+        grid1_centroid_lon(grid1_add) =
+     &  grid1_centroid_lon(grid1_add) + weights(3)
+      endif
+
+      if (grid2_add /=0) then
+        grid2_area(grid2_add) = grid2_area(grid2_add) + 
+     &                                  weights(num_wts+1)
+        grid2_centroid_lat(grid2_add) = 
+     &  grid2_centroid_lat(grid2_add) + weights(num_wts+2)
+        grid2_centroid_lon(grid2_add) =
+     &  grid2_centroid_lon(grid2_add) + weights(num_wts+3)
+      endif
+
+      if (grid1_add /= 0 .and. grid2_add /=0) then
+        call store_link_cnsrv(grid1_add, grid2_add, weights)
+
+        grid1_frac(grid1_add) = grid1_frac(grid1_add) + 
+     &                          weights(1)
+        grid2_frac(grid2_add) = grid2_frac(grid2_add) + 
+     &                          weights(num_wts+1)
+      endif
+
+      !*** South Pole
+      weights(1) =  pi2
+      weights(2) = -pi*pi
+      weights(3) =  zero
+      weights(4) =  pi2
+      weights(5) = -pi*pi
+      weights(6) =  zero
+
+      grid1_add = 0
+      pole_loop3: do n=1,grid1_size
+        if (grid1_area(n) < -three*pih .and.
+     &      grid1_center_lat(n) < zero) then
+          grid1_add = n
+          exit pole_loop3
+        endif
+      end do pole_loop3
+
+      grid2_add = 0
+      pole_loop4: do n=1,grid2_size
+        if (grid2_area(n) < -three*pih .and.
+     &      grid2_center_lat(n) < zero) then
+          grid2_add = n
+          exit pole_loop4
+        endif
+      end do pole_loop4
+
+      if (grid1_add /=0) then
+        grid1_area(grid1_add) = grid1_area(grid1_add) + weights(1)
+        grid1_centroid_lat(grid1_add) = 
+     &  grid1_centroid_lat(grid1_add) + weights(2)
+        grid1_centroid_lon(grid1_add) =
+     &  grid1_centroid_lon(grid1_add) + weights(3)
+      endif
+
+      if (grid2_add /=0) then
+        grid2_area(grid2_add) = grid2_area(grid2_add) + 
+     &                                  weights(num_wts+1)
+        grid2_centroid_lat(grid2_add) = 
+     &  grid2_centroid_lat(grid2_add) + weights(num_wts+2)
+        grid2_centroid_lon(grid2_add) =
+     &  grid2_centroid_lon(grid2_add) + weights(num_wts+3)
+      endif
+
+      if (grid1_add /= 0 .and. grid2_add /=0) then
+        call store_link_cnsrv(grid1_add, grid2_add, weights)
+
+        grid1_frac(grid1_add) = grid1_frac(grid1_add) + 
+     &                          weights(1)
+        grid2_frac(grid2_add) = grid2_frac(grid2_add) + 
+     &                          weights(num_wts+1)
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     finish centroid computation
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_area /= zero)
+        grid1_centroid_lat = grid1_centroid_lat/grid1_area
+        grid1_centroid_lon = grid1_centroid_lon/grid1_area
+      end where
+
+      where (grid2_area /= zero)
+        grid2_centroid_lat = grid2_centroid_lat/grid2_area
+        grid2_centroid_lon = grid2_centroid_lon/grid2_area
+      end where
+
+!-----------------------------------------------------------------------
+!
+!     include centroids in weights and normalize using destination
+!     area if requested
+!
+!-----------------------------------------------------------------------
+
+      do n=1,num_links_map1
+        grid1_add = grid1_add_map1(n)
+        grid2_add = grid2_add_map1(n)
+        do nwgt=1,num_wts
+          weights(        nwgt) = wts_map1(nwgt,n)
+          if (num_maps > 1) then
+            weights(num_wts+nwgt) = wts_map2(nwgt,n)
+          endif
+        end do
+
+        select case(norm_opt)
+        case (norm_opt_dstarea)
+          if (grid2_area(grid2_add) /= zero) then
+            if (luse_grid2_area) then
+              norm_factor = one/grid2_area_in(grid2_add)
+            else
+              norm_factor = one/grid2_area(grid2_add)
+            endif
+          else
+            norm_factor = zero
+          endif
+        case (norm_opt_frcarea)
+          if (grid2_frac(grid2_add) /= zero) then
+            if (luse_grid2_area) then
+              norm_factor = grid2_area(grid2_add)/
+     &                     (grid2_frac(grid2_add)*
+     &                      grid2_area_in(grid2_add))
+            else
+              norm_factor = one/grid2_frac(grid2_add)
+            endif
+          else
+            norm_factor = zero
+          endif
+        case (norm_opt_none)
+          norm_factor = one
+        end select
+
+        wts_map1(1,n) =  weights(1)*norm_factor
+        wts_map1(2,n) = (weights(2) - weights(1)*
+     &                              grid1_centroid_lat(grid1_add))*
+     &                              norm_factor
+        wts_map1(3,n) = (weights(3) - weights(1)*
+     &                              grid1_centroid_lon(grid1_add))*
+     &                              norm_factor
+
+        if (num_maps > 1) then
+          select case(norm_opt)
+          case (norm_opt_dstarea)
+            if (grid1_area(grid1_add) /= zero) then
+              if (luse_grid1_area) then
+                norm_factor = one/grid1_area_in(grid1_add)
+              else
+                norm_factor = one/grid1_area(grid1_add)
+              endif
+            else
+              norm_factor = zero
+            endif
+          case (norm_opt_frcarea)
+            if (grid1_frac(grid1_add) /= zero) then
+              if (luse_grid1_area) then
+                norm_factor = grid1_area(grid1_add)/
+     &                       (grid1_frac(grid1_add)*
+     &                        grid1_area_in(grid1_add))
+              else
+                norm_factor = one/grid1_frac(grid1_add)
+              endif
+            else
+              norm_factor = zero
+            endif
+          case (norm_opt_none)
+            norm_factor = one
+          end select
+
+          wts_map2(1,n) =  weights(num_wts+1)*norm_factor
+          wts_map2(2,n) = (weights(num_wts+2) - weights(num_wts+1)*
+     &                                grid2_centroid_lat(grid2_add))*
+     &                                norm_factor
+          wts_map2(3,n) = (weights(num_wts+3) - weights(num_wts+1)*
+     &                                grid2_centroid_lon(grid2_add))*
+     &                                norm_factor
+        endif
+
+      end do
+
+      print *, 'Total number of links = ',num_links_map1
+
+      where (grid1_area /= zero) grid1_frac = grid1_frac/grid1_area
+      where (grid2_area /= zero) grid2_frac = grid2_frac/grid2_area
+
+!-----------------------------------------------------------------------
+!
+!     perform some error checking on final weights
+!
+!-----------------------------------------------------------------------
+
+      grid2_centroid_lat = zero
+      grid2_centroid_lon = zero
+
+      do n=1,grid1_size
+        if (grid1_area(n) < -.01) then
+          print *,'Grid 1 area error: ',n,grid1_area(n)
+        endif
+        if (grid1_centroid_lat(n) < -pih-.01 .or.
+     &      grid1_centroid_lat(n) >  pih+.01) then
+          print *,'Grid 1 centroid lat error: ',n,grid1_centroid_lat(n)
+        endif
+        grid1_centroid_lat(n) = zero
+        grid1_centroid_lon(n) = zero
+      end do
+
+      do n=1,grid2_size
+        if (grid2_area(n) < -.01) then
+          print *,'Grid 2 area error: ',n,grid2_area(n)
+        endif
+        if (grid2_centroid_lat(n) < -pih-.01 .or.
+     &      grid2_centroid_lat(n) >  pih+.01) then
+          print *,'Grid 2 centroid lat error: ',n,grid2_centroid_lat(n)
+        endif
+        grid2_centroid_lat(n) = zero
+        grid2_centroid_lon(n) = zero
+      end do
+
+      do n=1,num_links_map1
+        grid1_add = grid1_add_map1(n)
+        grid2_add = grid2_add_map1(n)
+        
+        if (wts_map1(1,n) < -.01) then
+          print *,'Map 1 weight < 0 ',grid1_add,grid2_add,wts_map1(1,n)
+        endif
+        if (norm_opt /= norm_opt_none .and. wts_map1(1,n) > 1.01) then
+          print *,'Map 1 weight > 1 ',grid1_add,grid2_add,wts_map1(1,n)
+        endif
+        grid2_centroid_lat(grid2_add) = 
+     &  grid2_centroid_lat(grid2_add) + wts_map1(1,n)
+
+        if (num_maps > 1) then
+          if (wts_map2(1,n) < -.01) then
+            print *,'Map 2 weight < 0 ',grid1_add,grid2_add,
+     &                                  wts_map2(1,n)
+          endif
+          if (norm_opt /= norm_opt_none .and. wts_map2(1,n) > 1.01) then
+            print *,'Map 2 weight < 0 ',grid1_add,grid2_add,
+     &                                  wts_map2(1,n)
+          endif
+          grid1_centroid_lat(grid1_add) = 
+     &    grid1_centroid_lat(grid1_add) + wts_map2(1,n)
+        endif
+      end do
+
+      do n=1,grid2_size
+        select case(norm_opt)
+        case (norm_opt_dstarea)
+          norm_factor = grid2_frac(grid2_add)
+        case (norm_opt_frcarea)
+          norm_factor = one
+        case (norm_opt_none)
+          if (luse_grid2_area) then
+            norm_factor = grid2_area_in(grid2_add)
+          else
+            norm_factor = grid2_area(grid2_add)
+          endif
+        end select
+        if (abs(grid2_centroid_lat(grid2_add)-norm_factor) > .01) then
+          print *,'Error: sum of wts for map1 ',grid2_add,
+     &            grid2_centroid_lat(grid2_add),norm_factor
+        endif
+      end do
+
+      if (num_maps > 1) then
+        do n=1,grid1_size
+          select case(norm_opt)
+          case (norm_opt_dstarea)
+            norm_factor = grid1_frac(grid1_add)
+          case (norm_opt_frcarea)
+            norm_factor = one
+          case (norm_opt_none)
+            if (luse_grid1_area) then
+              norm_factor = grid1_area_in(grid1_add)
+            else
+              norm_factor = grid1_area(grid1_add)
+            endif
+          end select
+          if (abs(grid1_centroid_lat(grid1_add)-norm_factor) > .01) then
+            print *,'Error: sum of wts for map2 ',grid1_add,
+     &              grid1_centroid_lat(grid1_add),norm_factor
+          endif
+        end do
+      endif
+!-----------------------------------------------------------------------
+
+      end subroutine remap_conserv
+
+!***********************************************************************
+
+      subroutine intersection(location,intrsct_lat,intrsct_lon,lcoinc,
+     &                        beglat, beglon, endlat, endlon, begseg,
+     &                        lbegin, lrevers)
+
+!-----------------------------------------------------------------------
+!
+!     this routine finds the next intersection of a destination grid 
+!     line with the line segment given by beglon, endlon, etc.
+!     a coincidence flag is returned if the segment is entirely 
+!     coincident with an ocean grid line.  the cells in which to search
+!     for an intersection must have already been restricted in the
+!     calling routine.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     intent(in): 
+!
+!-----------------------------------------------------------------------
+
+      logical (kind=log_kind), intent(in) ::
+     &     lbegin, ! flag for first integration along this segment
+     &     lrevers ! flag whether segment integrated in reverse
+
+      real (kind=dbl_kind), intent(in) :: 
+     &     beglat, beglon,  ! beginning lat/lon endpoints for segment
+     &     endlat, endlon   ! ending    lat/lon endpoints for segment
+
+      real (kind=dbl_kind), dimension(2), intent(inout) :: 
+     &     begseg ! begin lat/lon of full segment
+
+!-----------------------------------------------------------------------
+!
+!     intent(out): 
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(out) ::
+     &        location  ! address in destination array containing this
+                        ! segment
+
+      logical (kind=log_kind), intent(out) ::
+     &        lcoinc    ! flag segments which are entirely coincident
+                        ! with a grid line
+
+      real (kind=dbl_kind), intent(out) ::
+     &     intrsct_lat, intrsct_lon ! lat/lon coords of next intersect.
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, next_n, cell, srch_corners, pole_loc
+
+      integer (kind=int_kind), save :: 
+     &     last_loc  ! save location when crossing threshold
+
+      logical (kind=log_kind) :: 
+     &     loutside  ! flags points outside grid
+
+      logical (kind=log_kind), save :: 
+     &     lthresh = .false.  ! flags segments crossing threshold bndy
+
+      real (kind=dbl_kind) ::
+     &     lon1, lon2,       ! local longitude variables for segment
+     &     lat1, lat2,       ! local latitude  variables for segment
+     &     grdlon1, grdlon2, ! local longitude variables for grid cell
+     &     grdlat1, grdlat2, ! local latitude  variables for grid cell
+     &     vec1_lat, vec1_lon, ! vectors and cross products used
+     &     vec2_lat, vec2_lon, ! during grid search
+     &     cross_product, 
+     &     eps, offset,        ! small offset away from intersect
+     &     s1, s2, determ,     ! variables used for linear solve to
+     &     mat1, mat2, mat3, mat4, rhs1, rhs2  ! find intersection
+
+      real (kind=dbl_kind), save ::
+     &     intrsct_lat_off, intrsct_lon_off ! lat/lon coords offset 
+                                            ! for next search
+
+!-----------------------------------------------------------------------
+!
+!     initialize defaults, flags, etc.
+!
+!-----------------------------------------------------------------------
+
+      location = 0
+      lcoinc = .false.
+      intrsct_lat = endlat
+      intrsct_lon = endlon
+
+      if (num_srch_cells == 0) return
+
+      if (beglat > north_thresh .or. beglat < south_thresh) then
+
+        if (lthresh) location = last_loc
+        call pole_intersection(location,
+     &               intrsct_lat,intrsct_lon,lcoinc,lthresh,
+     &               beglat, beglon, endlat, endlon, begseg, lrevers)
+        if (lthresh) then
+          last_loc = location
+          intrsct_lat_off = intrsct_lat
+          intrsct_lon_off = intrsct_lon
+        endif
+        return
+
+      endif
+
+      loutside = .false.
+      if (lbegin) then
+        lat1 = beglat
+        lon1 = beglon
+      else
+        lat1 = intrsct_lat_off
+        lon1 = intrsct_lon_off
+      endif
+      lat2 = endlat
+      lon2 = endlon
+      if ((lon2-lon1) > three*pih) then
+        lon2 = lon2 - pi2
+      else if ((lon2-lon1) < -three*pih) then
+        lon2 = lon2 + pi2
+      endif
+      s1 = zero
+
+!-----------------------------------------------------------------------
+!
+!     search for location of this segment in ocean grid using cross
+!     product method to determine whether a point is enclosed by a cell
+!
+!-----------------------------------------------------------------------
+
+      call timer_start(12)
+      srch_corners = size(srch_corner_lat,DIM=1)
+      srch_loop: do
+
+        !***
+        !*** if last segment crossed threshold, use that location
+        !***
+
+        if (lthresh) then
+          do cell=1,num_srch_cells
+            if (srch_add(cell) == last_loc) then
+              location = last_loc
+              eps = tiny
+              exit srch_loop
+            endif
+          end do
+        endif
+
+        !***
+        !*** otherwise normal search algorithm
+        !***
+
+        cell_loop: do cell=1,num_srch_cells
+          corner_loop: do n=1,srch_corners
+            next_n = MOD(n,srch_corners) + 1
+
+            !***
+            !*** here we take the cross product of the vector making 
+            !*** up each cell side with the vector formed by the vertex
+            !*** and search point.  if all the cross products are 
+            !*** positive, the point is contained in the cell.
+            !***
+
+            vec1_lat = srch_corner_lat(next_n,cell) - 
+     &                 srch_corner_lat(n     ,cell)
+            vec1_lon = srch_corner_lon(next_n,cell) - 
+     &                 srch_corner_lon(n     ,cell)
+            vec2_lat = lat1 - srch_corner_lat(n,cell)
+            vec2_lon = lon1 - srch_corner_lon(n,cell)
+
+            !***
+            !*** if endpoint coincident with vertex, offset
+            !*** the endpoint
+            !***
+
+            if (vec2_lat == 0 .and. vec2_lon == 0) then
+              lat1 = lat1 + 1.d-10*(lat2-lat1)
+              lon1 = lon1 + 1.d-10*(lon2-lon1)
+              vec2_lat = lat1 - srch_corner_lat(n,cell)
+              vec2_lon = lon1 - srch_corner_lon(n,cell)
+            endif
+
+            !***
+            !*** check for 0,2pi crossings
+            !***
+
+            if (vec1_lon >  pi) then
+              vec1_lon = vec1_lon - pi2
+            else if (vec1_lon < -pi) then
+              vec1_lon = vec1_lon + pi2
+            endif
+            if (vec2_lon >  pi) then
+              vec2_lon = vec2_lon - pi2
+            else if (vec2_lon < -pi) then
+              vec2_lon = vec2_lon + pi2
+            endif
+
+            cross_product = vec1_lon*vec2_lat - vec2_lon*vec1_lat
+
+            !***
+            !*** if the cross product for a side is zero, the point 
+            !***   lies exactly on the side or the side is degenerate
+            !***   (zero length).  if degenerate, set the cross 
+            !***   product to a positive number.  otherwise perform 
+            !***   another cross product between the side and the 
+            !***   segment itself. 
+            !*** if this cross product is also zero, the line is 
+            !***   coincident with the cell boundary - perform the 
+            !***   dot product and only choose the cell if the dot 
+            !***   product is positive (parallel vs anti-parallel).
+            !***
+
+            if (cross_product == zero) then
+              if (vec1_lat /= zero .or. vec1_lon /= zero) then
+                vec2_lat = lat2 - lat1
+                vec2_lon = lon2 - lon1
+
+                if (vec2_lon >  pi) then
+                  vec2_lon = vec2_lon - pi2
+                else if (vec2_lon < -pi) then
+                  vec2_lon = vec2_lon + pi2
+                endif
+
+                cross_product = vec1_lon*vec2_lat - vec2_lon*vec1_lat
+              else
+                cross_product = one
+              endif
+
+              if (cross_product == zero) then
+                lcoinc = .true.
+                cross_product = vec1_lon*vec2_lon + vec1_lat*vec2_lat
+                if (lrevers) cross_product = -cross_product
+              endif
+            endif
+
+            !***
+            !*** if cross product is less than zero, this cell
+            !*** doesn't work
+            !***
+
+            if (cross_product < zero) exit corner_loop
+
+          end do corner_loop
+
+          !***
+          !*** if cross products all positive, we found the location
+          !***
+
+          if (n > srch_corners) then
+            location = srch_add(cell)
+
+            !***
+            !*** if the beginning of this segment was outside the
+            !*** grid, invert the segment so the intersection found
+            !*** will be the first intersection with the grid
+            !***
+
+            if (loutside) then
+              lat2 = beglat
+              lon2 = beglon
+              location = 0
+              eps  = -tiny
+            else
+              eps  = tiny
+            endif
+
+            exit srch_loop
+          endif
+
+          !***
+          !*** otherwise move on to next cell
+          !***
+
+        end do cell_loop
+
+        !***
+        !*** if still no cell found, the point lies outside the grid.
+        !***   take some baby steps along the segment to see if any
+        !***   part of the segment lies inside the grid.  
+        !***
+
+        loutside = .true.
+        s1 = s1 + 0.001_dbl_kind
+        lat1 = beglat + s1*(endlat - beglat)
+        lon1 = beglon + s1*(lon2   - beglon)
+
+        !***
+        !*** reached the end of the segment and still outside the grid
+        !*** return no intersection
+        !***
+
+        if (s1 >= one) return
+
+      end do srch_loop
+      call timer_stop(12)
+
+!-----------------------------------------------------------------------
+!
+!     now that a cell is found, search for the next intersection.
+!     loop over sides of the cell to find intersection with side
+!     must check all sides for coincidences or intersections
+!
+!-----------------------------------------------------------------------
+
+      call timer_start(13)
+      intrsct_loop: do n=1,srch_corners
+        next_n = mod(n,srch_corners) + 1
+
+        grdlon1 = srch_corner_lon(n     ,cell)
+        grdlon2 = srch_corner_lon(next_n,cell)
+        grdlat1 = srch_corner_lat(n     ,cell)
+        grdlat2 = srch_corner_lat(next_n,cell)
+
+        !***
+        !*** set up linear system to solve for intersection
+        !***
+
+        mat1 = lat2 - lat1
+        mat2 = grdlat1 - grdlat2
+        mat3 = lon2 - lon1
+        mat4 = grdlon1 - grdlon2
+        rhs1 = grdlat1 - lat1
+        rhs2 = grdlon1 - lon1
+
+        if (mat3 >  pi) then
+          mat3 = mat3 - pi2
+        else if (mat3 < -pi) then
+          mat3 = mat3 + pi2
+        endif
+        if (mat4 >  pi) then
+          mat4 = mat4 - pi2
+        else if (mat4 < -pi) then
+          mat4 = mat4 + pi2
+        endif
+        if (rhs2 >  pi) then
+          rhs2 = rhs2 - pi2
+        else if (rhs2 < -pi) then
+          rhs2 = rhs2 + pi2
+        endif
+
+        determ = mat1*mat4 - mat2*mat3
+
+        !***
+        !*** if the determinant is zero, the segments are either 
+        !***   parallel or coincident.  coincidences were detected 
+        !***   above so do nothing.
+        !*** if the determinant is non-zero, solve for the linear 
+        !***   parameters s for the intersection point on each line 
+        !***   segment.
+        !*** if 0<s1,s2<1 then the segment intersects with this side.
+        !***   return the point of intersection (adding a small
+        !***   number so the intersection is off the grid line).
+        !***
+
+        if (abs(determ) > 1.e-30) then
+
+          s1 = (rhs1*mat4 - mat2*rhs2)/determ
+          s2 = (mat1*rhs2 - rhs1*mat3)/determ
+
+          if (s2 >= zero .and. s2 <= one .and.
+     &        s1 >  zero. and. s1 <= one) then
+
+            !***
+            !*** recompute intersection based on full segment
+            !*** so intersections are consistent for both sweeps
+            !***
+
+            if (.not. loutside) then
+              mat1 = lat2 - begseg(1)
+              mat3 = lon2 - begseg(2)
+              rhs1 = grdlat1 - begseg(1)
+              rhs2 = grdlon1 - begseg(2)
+            else
+              mat1 = begseg(1) - endlat
+              mat3 = begseg(2) - endlon
+              rhs1 = grdlat1 - endlat
+              rhs2 = grdlon1 - endlon
+            endif
+
+            if (mat3 >  pi) then
+              mat3 = mat3 - pi2
+            else if (mat3 < -pi) then
+              mat3 = mat3 + pi2
+            endif
+            if (rhs2 >  pi) then
+              rhs2 = rhs2 - pi2
+            else if (rhs2 < -pi) then
+              rhs2 = rhs2 + pi2
+            endif
+
+            determ = mat1*mat4 - mat2*mat3
+
+            !***
+            !*** sometimes due to roundoff, the previous 
+            !*** determinant is non-zero, but the lines
+            !*** are actually coincident.  if this is the
+            !*** case, skip the rest.
+            !***
+
+            if (determ /= zero) then
+              s1 = (rhs1*mat4 - mat2*rhs2)/determ
+              s2 = (mat1*rhs2 - rhs1*mat3)/determ
+
+              offset = s1 + eps/determ
+              if (offset > one) offset = one
+
+              if (.not. loutside) then
+                intrsct_lat = begseg(1) + mat1*s1
+                intrsct_lon = begseg(2) + mat3*s1
+                intrsct_lat_off = begseg(1) + mat1*offset
+                intrsct_lon_off = begseg(2) + mat3*offset
+              else
+                intrsct_lat = endlat + mat1*s1
+                intrsct_lon = endlon + mat3*s1
+                intrsct_lat_off = endlat + mat1*offset
+                intrsct_lon_off = endlon + mat3*offset
+              endif
+              exit intrsct_loop
+            endif
+
+          endif
+        endif
+
+        !***
+        !*** no intersection this side, move on to next side
+        !***
+
+      end do intrsct_loop
+      call timer_stop(13)
+
+!-----------------------------------------------------------------------
+!
+!     if the segment crosses a pole threshold, reset the intersection
+!     to be the threshold latitude.  only check if this was not a
+!     threshold segment since sometimes coordinate transform can end
+!     up on other side of threshold again.
+!
+!-----------------------------------------------------------------------
+
+      if (lthresh) then
+        if (intrsct_lat < north_thresh .or. intrsct_lat > south_thresh)
+     &      lthresh = .false.
+      else if (lat1 > zero .and. intrsct_lat > north_thresh) then
+        intrsct_lat = north_thresh + tiny
+        intrsct_lat_off = north_thresh + eps*mat1
+        s1 = (intrsct_lat - begseg(1))/mat1
+        intrsct_lon     = begseg(2) + s1*mat3
+        intrsct_lon_off = begseg(2) + (s1+eps)*mat3
+        last_loc = location
+        lthresh = .true.
+      else if (lat1 < zero .and. intrsct_lat < south_thresh) then
+        intrsct_lat = south_thresh - tiny
+        intrsct_lat_off = south_thresh + eps*mat1
+        s1 = (intrsct_lat - begseg(1))/mat1
+        intrsct_lon     = begseg(2) + s1*mat3
+        intrsct_lon_off = begseg(2) + (s1+eps)*mat3
+        last_loc = location
+        lthresh = .true.
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine intersection
+
+!***********************************************************************
+
+      subroutine pole_intersection(location,
+     &                 intrsct_lat,intrsct_lon,lcoinc,lthresh,
+     &                 beglat, beglon, endlat, endlon, begseg, lrevers)
+
+!-----------------------------------------------------------------------
+!
+!     this routine is identical to the intersection routine except
+!     that a coordinate transformation (using a Lambert azimuthal
+!     equivalent projection) is performed to treat polar cells more
+!     accurately.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     intent(in): 
+!
+!-----------------------------------------------------------------------
+
+      real (kind=dbl_kind), intent(in) :: 
+     &     beglat, beglon,  ! beginning lat/lon endpoints for segment
+     &     endlat, endlon   ! ending    lat/lon endpoints for segment
+
+      real (kind=dbl_kind), dimension(2), intent(inout) :: 
+     &     begseg ! begin lat/lon of full segment
+
+      logical (kind=log_kind), intent(in) ::
+     &        lrevers   ! flag true if segment integrated in reverse
+
+!-----------------------------------------------------------------------
+!
+!     intent(out): 
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(inout) ::
+     &        location  ! address in destination array containing this
+                        ! segment -- also may contain last location on
+                        ! entry
+
+      logical (kind=log_kind), intent(out) ::
+     &        lcoinc    ! flag segment coincident with grid line
+
+      logical (kind=log_kind), intent(inout) ::
+     &        lthresh   ! flag segment crossing threshold boundary
+
+      real (kind=dbl_kind), intent(out) ::
+     &     intrsct_lat, intrsct_lon ! lat/lon coords of next intersect.
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, next_n, cell, srch_corners, pole_loc
+
+      logical (kind=log_kind) :: loutside ! flags points outside grid
+
+      real (kind=dbl_kind) :: pi4, rns, ! north/south conversion
+     &     x1, x2,       ! local x variables for segment
+     &     y1, y2,       ! local y variables for segment
+     &     begx, begy,   ! beginning x,y variables for segment
+     &     endx, endy,   ! beginning x,y variables for segment
+     &     begsegx, begsegy,   ! beginning x,y variables for segment
+     &     grdx1, grdx2, ! local x variables for grid cell
+     &     grdy1, grdy2, ! local y variables for grid cell
+     &     vec1_y, vec1_x, ! vectors and cross products used
+     &     vec2_y, vec2_x, ! during grid search
+     &     cross_product, eps, ! eps=small offset away from intersect
+     &     s1, s2, determ,     ! variables used for linear solve to
+     &     mat1, mat2, mat3, mat4, rhs1, rhs2  ! find intersection
+
+      real (kind=dbl_kind), dimension(:,:), allocatable ::
+     &     srch_corner_x,  ! x of each corner of srch cells
+     &     srch_corner_y   ! y of each corner of srch cells
+
+      !***
+      !*** save last intersection to avoid roundoff during coord
+      !*** transformation
+      !***
+
+      logical (kind=log_kind), save :: luse_last = .false.
+
+      real (kind=dbl_kind), save :: 
+     &     intrsct_x, intrsct_y  ! x,y for intersection
+
+      !***
+      !*** variables necessary if segment manages to hit pole
+      !***
+
+      integer (kind=int_kind), save :: 
+     &     avoid_pole_count = 0  ! count attempts to avoid pole
+
+      real (kind=dbl_kind), save :: 
+     &     avoid_pole_offset = tiny  ! endpoint offset to avoid pole
+
+!-----------------------------------------------------------------------
+!
+!     initialize defaults, flags, etc.
+!
+!-----------------------------------------------------------------------
+
+      if (.not. lthresh) location = 0
+      lcoinc = .false.
+      intrsct_lat = endlat
+      intrsct_lon = endlon
+
+      loutside = .false.
+      s1 = zero
+
+!-----------------------------------------------------------------------
+!
+!     convert coordinates
+!
+!-----------------------------------------------------------------------
+
+      allocate(srch_corner_x(size(srch_corner_lat,DIM=1),
+     &                       size(srch_corner_lat,DIM=2)),
+     &         srch_corner_y(size(srch_corner_lat,DIM=1),
+     &                       size(srch_corner_lat,DIM=2)))
+
+      if (beglat > zero) then
+        pi4 = quart*pi
+        rns = one
+      else
+        pi4 = -quart*pi
+        rns = -one
+      endif
+
+      if (luse_last) then
+        x1 = intrsct_x
+        y1 = intrsct_y
+      else
+        x1 = rns*two*sin(pi4 - half*beglat)*cos(beglon)
+        y1 =     two*sin(pi4 - half*beglat)*sin(beglon)
+        luse_last = .true.
+      endif
+      x2 = rns*two*sin(pi4 - half*endlat)*cos(endlon)
+      y2 =     two*sin(pi4 - half*endlat)*sin(endlon)
+      srch_corner_x = rns*two*sin(pi4 - half*srch_corner_lat)*
+     &                        cos(srch_corner_lon)
+      srch_corner_y =     two*sin(pi4 - half*srch_corner_lat)*
+     &                        sin(srch_corner_lon)
+
+      begx = x1
+      begy = y1
+      endx = x2
+      endy = y2
+      begsegx = rns*two*sin(pi4 - half*begseg(1))*cos(begseg(2))
+      begsegy =     two*sin(pi4 - half*begseg(1))*sin(begseg(2))
+      intrsct_x = endx
+      intrsct_y = endy
+
+!-----------------------------------------------------------------------
+!
+!     search for location of this segment in ocean grid using cross
+!     product method to determine whether a point is enclosed by a cell
+!
+!-----------------------------------------------------------------------
+
+      call timer_start(12)
+      srch_corners = size(srch_corner_lat,DIM=1)
+      srch_loop: do
+
+        !***
+        !*** if last segment crossed threshold, use that location
+        !***
+
+        if (lthresh) then
+          do cell=1,num_srch_cells
+            if (srch_add(cell) == location) then
+              eps = tiny
+              exit srch_loop
+            endif
+          end do
+        endif
+
+        !***
+        !*** otherwise normal search algorithm
+        !***
+
+        cell_loop: do cell=1,num_srch_cells
+          corner_loop: do n=1,srch_corners
+            next_n = MOD(n,srch_corners) + 1
+
+            !***
+            !*** here we take the cross product of the vector making 
+            !*** up each cell side with the vector formed by the vertex
+            !*** and search point.  if all the cross products are 
+            !*** positive, the point is contained in the cell.
+            !***
+
+            vec1_x = srch_corner_x(next_n,cell) - 
+     &               srch_corner_x(n     ,cell)
+            vec1_y = srch_corner_y(next_n,cell) - 
+     &               srch_corner_y(n     ,cell)
+            vec2_x = x1 - srch_corner_x(n,cell)
+            vec2_y = y1 - srch_corner_y(n,cell)
+
+            !***
+            !*** if endpoint coincident with vertex, offset
+            !*** the endpoint
+            !***
+
+            if (vec2_x == 0 .and. vec2_y == 0) then
+              x1 = x1 + 1.d-10*(x2-x1)
+              y1 = y1 + 1.d-10*(y2-y1)
+              vec2_x = x1 - srch_corner_x(n,cell)
+              vec2_y = y1 - srch_corner_y(n,cell)
+            endif
+
+            cross_product = vec1_x*vec2_y - vec2_x*vec1_y
+
+            !***
+            !*** if the cross product for a side is zero, the point 
+            !***   lies exactly on the side or the length of a side
+            !***   is zero.  if the length is zero set det > 0.
+            !***   otherwise, perform another cross 
+            !***   product between the side and the segment itself. 
+            !*** if this cross product is also zero, the line is 
+            !***   coincident with the cell boundary - perform the 
+            !***   dot product and only choose the cell if the dot 
+            !***   product is positive (parallel vs anti-parallel).
+            !***
+
+            if (cross_product == zero) then
+              if (vec1_x /= zero .or. vec1_y /= 0) then
+                vec2_x = x2 - x1
+                vec2_y = y2 - y1
+                cross_product = vec1_x*vec2_y - vec2_x*vec1_y
+              else
+                cross_product = one
+              endif
+
+              if (cross_product == zero) then
+                lcoinc = .true.
+                cross_product = vec1_x*vec2_x + vec1_y*vec2_y
+                if (lrevers) cross_product = -cross_product
+              endif
+            endif
+
+            !***
+            !*** if cross product is less than zero, this cell
+            !*** doesn't work
+            !***
+
+            if (cross_product < zero) exit corner_loop
+
+          end do corner_loop
+
+          !***
+          !*** if cross products all positive, we found the location
+          !***
+
+          if (n > srch_corners) then
+            location = srch_add(cell)
+
+            !***
+            !*** if the beginning of this segment was outside the
+            !*** grid, invert the segment so the intersection found
+            !*** will be the first intersection with the grid
+            !***
+
+            if (loutside) then
+              x2 = begx
+              y2 = begy
+              location = 0
+              eps  = -tiny
+            else
+              eps  = tiny
+            endif
+
+            exit srch_loop
+          endif
+
+          !***
+          !*** otherwise move on to next cell
+          !***
+
+        end do cell_loop
+
+        !***
+        !*** if no cell found, the point lies outside the grid.
+        !***   take some baby steps along the segment to see if any
+        !***   part of the segment lies inside the grid.  
+        !***
+
+        loutside = .true.
+        s1 = s1 + 0.001_dbl_kind
+        x1 = begx + s1*(x2 - begx)
+        y1 = begy + s1*(y2 - begy)
+
+        !***
+        !*** reached the end of the segment and still outside the grid
+        !*** return no intersection
+        !***
+
+        if (s1 >= one) then
+          deallocate(srch_corner_x, srch_corner_y)
+          luse_last = .false.
+          return
+        endif
+
+      end do srch_loop
+      call timer_stop(12)
+
+!-----------------------------------------------------------------------
+!
+!     now that a cell is found, search for the next intersection.
+!     loop over sides of the cell to find intersection with side
+!     must check all sides for coincidences or intersections
+!
+!-----------------------------------------------------------------------
+
+      call timer_start(13)
+      intrsct_loop: do n=1,srch_corners
+        next_n = mod(n,srch_corners) + 1
+
+        grdy1 = srch_corner_y(n     ,cell)
+        grdy2 = srch_corner_y(next_n,cell)
+        grdx1 = srch_corner_x(n     ,cell)
+        grdx2 = srch_corner_x(next_n,cell)
+
+        !***
+        !*** set up linear system to solve for intersection
+        !***
+
+        mat1 = x2 - x1
+        mat2 = grdx1 - grdx2
+        mat3 = y2 - y1
+        mat4 = grdy1 - grdy2
+        rhs1 = grdx1 - x1
+        rhs2 = grdy1 - y1
+
+        determ = mat1*mat4 - mat2*mat3
+
+        !***
+        !*** if the determinant is zero, the segments are either 
+        !***   parallel or coincident or one segment has zero length.  
+        !***   coincidences were detected above so do nothing.
+        !*** if the determinant is non-zero, solve for the linear 
+        !***   parameters s for the intersection point on each line 
+        !***   segment.
+        !*** if 0<s1,s2<1 then the segment intersects with this side.
+        !***   return the point of intersection (adding a small
+        !***   number so the intersection is off the grid line).
+        !***
+
+        if (abs(determ) > 1.e-30) then
+
+          s1 = (rhs1*mat4 - mat2*rhs2)/determ
+          s2 = (mat1*rhs2 - rhs1*mat3)/determ
+
+          if (s2 >= zero .and. s2 <= one .and.
+     &        s1 >  zero. and. s1 <= one) then
+
+            !***
+            !*** recompute intersection using entire segment
+            !*** for consistency between sweeps
+            !***
+
+            if (.not. loutside) then
+              mat1 = x2 - begsegx
+              mat3 = y2 - begsegy
+              rhs1 = grdx1 - begsegx
+              rhs2 = grdy1 - begsegy
+            else 
+              mat1 = x2 - endx
+              mat3 = y2 - endy
+              rhs1 = grdx1 - endx
+              rhs2 = grdy1 - endy
+            endif
+
+            determ = mat1*mat4 - mat2*mat3
+
+            !***
+            !*** sometimes due to roundoff, the previous 
+            !*** determinant is non-zero, but the lines
+            !*** are actually coincident.  if this is the
+            !*** case, skip the rest.
+            !***
+
+            if (determ /= zero) then
+              s1 = (rhs1*mat4 - mat2*rhs2)/determ
+              s2 = (mat1*rhs2 - rhs1*mat3)/determ
+
+              if (.not. loutside) then
+                intrsct_x = begsegx + s1*mat1
+                intrsct_y = begsegy + s1*mat3
+              else 
+                intrsct_x = endx + s1*mat1
+                intrsct_y = endy + s1*mat3
+              endif
+
+              !***
+              !*** convert back to lat/lon coordinates
+              !***
+
+              intrsct_lon = rns*atan2(intrsct_y,intrsct_x)
+              if (intrsct_lon < zero) 
+     &          intrsct_lon = intrsct_lon + pi2
+
+              if (abs(intrsct_x) > 1.d-10) then
+                intrsct_lat = (pi4 - 
+     &            asin(rns*half*intrsct_x/cos(intrsct_lon)))*two
+              else if (abs(intrsct_y) > 1.d-10) then
+                intrsct_lat = (pi4 - 
+     &            asin(half*intrsct_y/sin(intrsct_lon)))*two
+              else
+                intrsct_lat = two*pi4
+              endif
+
+              !***
+              !*** add offset in transformed space for next pass.
+              !***
+
+              if (s1 - eps/determ < one) then
+                intrsct_x = intrsct_x - mat1*(eps/determ)
+                intrsct_y = intrsct_y - mat3*(eps/determ)
+              else
+                if (.not. loutside) then
+                  intrsct_x = endx
+                  intrsct_y = endy
+                  intrsct_lat = endlat
+                  intrsct_lon = endlon
+                else 
+                  intrsct_x = begsegx
+                  intrsct_y = begsegy
+                  intrsct_lat = begseg(1)
+                  intrsct_lon = begseg(2)
+                endif
+              endif
+
+              exit intrsct_loop
+            endif
+          endif
+        endif
+
+        !***
+        !*** no intersection this side, move on to next side
+        !***
+
+      end do intrsct_loop
+      call timer_stop(13)
+
+      deallocate(srch_corner_x, srch_corner_y)
+
+!-----------------------------------------------------------------------
+!
+!     if segment manages to cross over pole, shift the beginning 
+!     endpoint in order to avoid hitting pole directly
+!     (it is ok for endpoint to be pole point)
+!
+!-----------------------------------------------------------------------
+
+      if (abs(intrsct_x) < 1.e-10 .and. abs(intrsct_y) < 1.e-10 .and.
+     &    (endx /= zero .and. endy /=0)) then
+        if (avoid_pole_count > 2) then
+           avoid_pole_count = 0
+           avoid_pole_offset = 10.*avoid_pole_offset
+        endif
+
+        cross_product = begsegx*(endy-begsegy) - begsegy*(endx-begsegx)
+        intrsct_lat = begseg(1)
+        if (cross_product*intrsct_lat > zero) then
+          intrsct_lon = beglon    + avoid_pole_offset
+          begseg(2)   = begseg(2) + avoid_pole_offset
+        else
+          intrsct_lon = beglon    - avoid_pole_offset
+          begseg(2)   = begseg(2) - avoid_pole_offset
+        endif
+
+        avoid_pole_count = avoid_pole_count + 1
+        luse_last = .false.
+      else
+        avoid_pole_count = 0
+        avoid_pole_offset = tiny
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     if the segment crosses a pole threshold, reset the intersection
+!     to be the threshold latitude and do not reuse x,y intersect
+!     on next entry.  only check if did not cross threshold last
+!     time - sometimes the coordinate transformation can place a
+!     segment on the other side of the threshold again
+!
+!-----------------------------------------------------------------------
+
+      if (lthresh) then
+        if (intrsct_lat > north_thresh .or. intrsct_lat < south_thresh)
+     &    lthresh = .false.
+      else if (beglat > zero .and. intrsct_lat < north_thresh) then
+        mat4 = endlat - begseg(1)
+        mat3 = endlon - begseg(2)
+        if (mat3 >  pi) mat3 = mat3 - pi2
+        if (mat3 < -pi) mat3 = mat3 + pi2
+        intrsct_lat = north_thresh - tiny
+        s1 = (north_thresh - begseg(1))/mat4
+        intrsct_lon = begseg(2) + s1*mat3
+        luse_last = .false.
+        lthresh = .true.
+      else if (beglat < zero .and. intrsct_lat > south_thresh) then
+        mat4 = endlat - begseg(1)
+        mat3 = endlon - begseg(2)
+        if (mat3 >  pi) mat3 = mat3 - pi2
+        if (mat3 < -pi) mat3 = mat3 + pi2
+        intrsct_lat = south_thresh + tiny
+        s1 = (south_thresh - begseg(1))/mat4
+        intrsct_lon = begseg(2) + s1*mat3
+        luse_last = .false.
+        lthresh = .true.
+      endif
+
+      !***
+      !*** if reached end of segment, do not use x,y intersect 
+      !*** on next entry
+      !***
+
+      if (intrsct_lat == endlat .and. intrsct_lon == endlon) then
+        luse_last = .false.
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine pole_intersection
+
+!***********************************************************************
+
+      subroutine line_integral(weights, num_wts, 
+     &                       in_phi1, in_phi2, theta1, theta2,
+     &                       grid1_lat, grid1_lon, grid2_lat, grid2_lon)
+
+!-----------------------------------------------------------------------
+!
+!     this routine computes the line integral of the flux function 
+!     that results in the interpolation weights.  the line is defined
+!     by the input lat/lon of the endpoints.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     intent(in):
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &        num_wts  ! number of weights to compute
+
+      real (kind=dbl_kind), intent(in) :: 
+     &     in_phi1, in_phi2,     ! longitude endpoints for the segment
+     &     theta1, theta2,       ! latitude  endpoints for the segment
+     &     grid1_lat, grid1_lon, ! reference coordinates for each
+     &     grid2_lat, grid2_lon  ! grid (to ensure correct 0,2pi interv.
+
+!-----------------------------------------------------------------------
+!
+!     intent(out):
+!
+!-----------------------------------------------------------------------
+
+      real (kind=dbl_kind), dimension(2*num_wts), intent(out) ::
+     &     weights   ! line integral contribution to weights
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      real (kind=dbl_kind) :: dphi, sinth1, sinth2, costh1, costh2, fac,
+     &                        phi1, phi2, phidiff1, phidiff2, sinint
+      real (kind=dbl_kind) :: f1, f2, fint
+
+!-----------------------------------------------------------------------
+!
+!     weights for the general case based on a trapezoidal approx to
+!     the integrals.
+!
+!-----------------------------------------------------------------------
+
+      sinth1 = SIN(theta1)
+      sinth2 = SIN(theta2)
+      costh1 = COS(theta1)
+      costh2 = COS(theta2)
+
+      dphi = in_phi1 - in_phi2
+      if (dphi >  pi) then
+        dphi = dphi - pi2
+      else if (dphi < -pi) then
+        dphi = dphi + pi2
+      endif
+      dphi = half*dphi
+
+!-----------------------------------------------------------------------
+!
+!     the first weight is the area overlap integral. the second and
+!     fourth are second-order latitude gradient weights.
+!
+!-----------------------------------------------------------------------
+
+      weights(        1) = dphi*(sinth1 + sinth2)
+      weights(num_wts+1) = dphi*(sinth1 + sinth2)
+      weights(        2) = dphi*(costh1 + costh2 + (theta1*sinth1 +
+     &                                              theta2*sinth2))
+      weights(num_wts+2) = dphi*(costh1 + costh2 + (theta1*sinth1 +
+     &                                              theta2*sinth2))
+
+!-----------------------------------------------------------------------
+!
+!     the third and fifth weights are for the second-order phi gradient
+!     component.  must be careful of longitude range.
+!
+!-----------------------------------------------------------------------
+
+      f1 = half*(costh1*sinth1 + theta1)
+      f2 = half*(costh2*sinth2 + theta2)
+
+      phi1 = in_phi1 - grid1_lon
+      if (phi1 >  pi) then
+        phi1 = phi1 - pi2
+      else if (phi1 < -pi) then
+        phi1 = phi1 + pi2
+      endif
+
+      phi2 = in_phi2 - grid1_lon
+      if (phi2 >  pi) then
+        phi2 = phi2 - pi2
+      else if (phi2 < -pi) then
+        phi2 = phi2 + pi2
+      endif
+
+      if ((phi2-phi1) <  pi .and. (phi2-phi1) > -pi) then
+        weights(3) = dphi*(phi1*f1 + phi2*f2)
+      else
+        if (phi1 > zero) then
+          fac = pi
+        else
+          fac = -pi
+        endif
+        fint = f1 + (f2-f1)*(fac-phi1)/abs(dphi)
+        weights(3) = half*phi1*(phi1-fac)*f1 -
+     &               half*phi2*(phi2+fac)*f2 +
+     &               half*fac*(phi1+phi2)*fint
+      endif
+
+      phi1 = in_phi1 - grid2_lon
+      if (phi1 >  pi) then
+        phi1 = phi1 - pi2
+      else if (phi1 < -pi) then
+        phi1 = phi1 + pi2
+      endif
+
+      phi2 = in_phi2 - grid2_lon
+      if (phi2 >  pi) then
+        phi2 = phi2 - pi2
+      else if (phi2 < -pi) then
+        phi2 = phi2 + pi2
+      endif
+
+      if ((phi2-phi1) <  pi .and. (phi2-phi1) > -pi) then
+        weights(num_wts+3) = dphi*(phi1*f1 + phi2*f2)
+      else
+        if (phi1 > zero) then
+          fac = pi
+        else
+          fac = -pi
+        endif
+        fint = f1 + (f2-f1)*(fac-phi1)/abs(dphi)
+        weights(num_wts+3) = half*phi1*(phi1-fac)*f1 -
+     &                       half*phi2*(phi2+fac)*f2 +
+     &                       half*fac*(phi1+phi2)*fint
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine line_integral
+
+!***********************************************************************
+
+      subroutine store_link_cnsrv(add1, add2, weights)
+
+!-----------------------------------------------------------------------
+!
+!     this routine stores the address and weight for this link in
+!     the appropriate address and weight arrays and resizes those
+!     arrays if necessary.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &        add1,  ! address on grid1
+     &        add2   ! address on grid2
+
+      real (kind=dbl_kind), dimension(:), intent(in) ::
+     &        weights ! array of remapping weights for this link
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: nlink, min_link, max_link ! link index
+
+      integer (kind=int_kind), dimension(:,:), allocatable, save ::
+     &        link_add1,  ! min,max link add to restrict search
+     &        link_add2   ! min,max link add to restrict search
+
+      logical (kind=log_kind), save :: first_call = .true.
+
+!-----------------------------------------------------------------------
+!
+!     if all weights are zero, do not bother storing the link
+!
+!-----------------------------------------------------------------------
+
+      if (all(weights == zero)) return
+
+!-----------------------------------------------------------------------
+!
+!     restrict the range of links to search for existing links
+!
+!-----------------------------------------------------------------------
+
+      if (first_call) then
+        allocate(link_add1(2,grid1_size), link_add2(2,grid2_size))
+        link_add1 = 0
+        link_add2 = 0
+        first_call = .false.
+        min_link = 1
+        max_link = 0
+      else
+        min_link = min(link_add1(1,add1),link_add2(1,add2))
+        max_link = max(link_add1(2,add1),link_add2(2,add2))
+        if (min_link == 0) then
+          min_link = 1
+          max_link = 0
+        endif
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     if the link already exists, add the weight to the current weight
+!     arrays
+!
+!-----------------------------------------------------------------------
+
+      do nlink=min_link,max_link
+        if (add1 == grid1_add_map1(nlink)) then
+        if (add2 == grid2_add_map1(nlink)) then
+
+          wts_map1(:,nlink) = wts_map1(:,nlink) + weights(1:num_wts)
+          if (num_maps == 2) then
+            wts_map2(:,nlink) = wts_map2(:,nlink) + 
+     &                                  weights(num_wts+1:2*num_wts)
+          endif
+          return
+
+        endif
+        endif
+      end do
+
+!-----------------------------------------------------------------------
+!
+!     if the link does not yet exist, increment number of links and 
+!     check to see if remap arrays need to be increased to accomodate 
+!     the new link.  then store the link.
+!
+!-----------------------------------------------------------------------
+
+      num_links_map1  = num_links_map1 + 1
+      if (num_links_map1 > max_links_map1) 
+     &   call resize_remap_vars(1,resize_increment)
+
+      grid1_add_map1(num_links_map1) = add1
+      grid2_add_map1(num_links_map1) = add2
+      wts_map1    (:,num_links_map1) = weights(1:num_wts)
+
+      if (num_maps > 1) then
+        num_links_map2  = num_links_map2 + 1
+        if (num_links_map2 > max_links_map2) 
+     &     call resize_remap_vars(2,resize_increment)
+
+        grid1_add_map2(num_links_map2) = add1
+        grid2_add_map2(num_links_map2) = add2
+        wts_map2    (:,num_links_map2) = weights(num_wts+1:2*num_wts)
+      endif
+
+      if (link_add1(1,add1) == 0) link_add1(1,add1) = num_links_map1
+      if (link_add2(1,add2) == 0) link_add2(1,add2) = num_links_map1
+      link_add1(2,add1) = num_links_map1
+      link_add2(2,add2) = num_links_map1
+
+!-----------------------------------------------------------------------
+
+      end subroutine store_link_cnsrv
+
+!***********************************************************************
+
+      end module remap_conservative
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 499 - 0
interpolation/scrip_sources/remap_distwgt.f

@@ -0,0 +1,499 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this module contains necessary routines for performing an 
+!     interpolation using a distance-weighted average of n nearest
+!     neighbors.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_distwgt.f,v 1.3 2000/04/19 21:56:26 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_distance_weight
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines common constants
+      use grids         ! module containing grid info
+      use remap_vars    ! module containing remap info
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     module variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), parameter :: 
+     &     num_neighbors=4  ! num nearest neighbors to interpolate from
+
+      real (kind=dbl_kind), dimension(:), allocatable, save ::
+     &     coslat, sinlat, ! cosine, sine of grid lats (for distance)
+     &     coslon, sinlon, ! cosine, sine of grid lons (for distance)
+     &     wgtstmp         ! an array to hold the link weight
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine remap_distwgt
+
+!-----------------------------------------------------------------------
+!
+!     this routine computes the inverse-distance weights for a
+!     nearest-neighbor interpolation.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      logical (kind=log_kind), dimension(num_neighbors) ::
+     &     nbr_mask        ! mask at nearest neighbors
+
+      integer (kind=int_kind) :: n,
+     &     dst_add,        ! destination address
+     &     nmap            ! index of current map being computed
+
+      integer (kind=int_kind), dimension(num_neighbors) ::
+     &     nbr_add         ! source address at nearest neighbors
+
+      real (kind=dbl_kind), dimension(num_neighbors) ::
+     &     nbr_dist        ! angular distance four nearest neighbors
+
+      real (kind=dbl_kind) ::
+     &     coslat_dst,     ! cos(lat) of destination grid point
+     &     coslon_dst,     ! cos(lon) of destination grid point
+     &     sinlat_dst,     ! sin(lat) of destination grid point
+     &     sinlon_dst,     ! sin(lon) of destination grid point
+     &     dist_tot        ! sum of neighbor distances (for normalizing)
+
+!-----------------------------------------------------------------------
+!
+!     compute mappings from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+      nmap = 1
+
+      !***
+      !*** allocate wgtstmp to be consistent with store_link interface
+      !***
+
+      allocate (wgtstmp(num_wts))
+
+      !***
+      !*** compute cos, sin of lat/lon on source grid for distance
+      !*** calculations
+      !***
+
+      allocate (coslat(grid1_size), coslon(grid1_size),
+     &          sinlat(grid1_size), sinlon(grid1_size))
+
+      coslat = cos(grid1_center_lat)
+      coslon = cos(grid1_center_lon)
+      sinlat = sin(grid1_center_lat)
+      sinlon = sin(grid1_center_lon)
+
+      !***
+      !*** loop over destination grid 
+      !***
+
+      grid_loop1: do dst_add = 1, grid2_size
+
+        if (.not. grid2_mask(dst_add)) cycle grid_loop1
+
+        coslat_dst = cos(grid2_center_lat(dst_add))
+        coslon_dst = cos(grid2_center_lon(dst_add))
+        sinlat_dst = sin(grid2_center_lat(dst_add))
+        sinlon_dst = sin(grid2_center_lon(dst_add))
+
+        !***
+        !*** find nearest grid points on source grid and
+        !*** distances to each point
+        !***
+
+        call grid_search_nbr(nbr_add, nbr_dist, 
+     &                       grid2_center_lat(dst_add),
+     &                       grid2_center_lon(dst_add),
+     &                       coslat_dst, coslon_dst, 
+     &                       sinlat_dst, sinlon_dst,
+     &                       bin_addr1, bin_addr2)
+
+        !***
+        !*** compute weights based on inverse distance
+        !*** if mask is false, eliminate those points
+        !***
+
+        dist_tot = zero
+        do n=1,num_neighbors
+          if (grid1_mask(nbr_add(n))) then
+            nbr_dist(n) = one/nbr_dist(n)
+            dist_tot = dist_tot + nbr_dist(n)
+            nbr_mask(n) = .true.
+          else
+            nbr_mask(n) = .false.
+          endif
+        end do
+
+        !***
+        !*** normalize weights and store the link
+        !***
+
+        do n=1,num_neighbors
+          if (nbr_mask(n)) then
+            wgtstmp(1) = nbr_dist(n)/dist_tot
+            call store_link_nbr(nbr_add(n), dst_add, wgtstmp, nmap)
+            grid2_frac(dst_add) = one
+          endif
+        end do
+
+      end do grid_loop1
+
+      deallocate (coslat, coslon, sinlat, sinlon)
+
+!-----------------------------------------------------------------------
+!
+!     compute mappings from grid2 to grid1 if necessary
+!
+!-----------------------------------------------------------------------
+
+      if (num_maps > 1) then
+
+      nmap = 2
+
+      !***
+      !*** compute cos, sin of lat/lon on source grid for distance
+      !*** calculations
+      !***
+
+      allocate (coslat(grid2_size), coslon(grid2_size),
+     &          sinlat(grid2_size), sinlon(grid2_size))
+
+      coslat = cos(grid2_center_lat)
+      coslon = cos(grid2_center_lon)
+      sinlat = sin(grid2_center_lat)
+      sinlon = sin(grid2_center_lon)
+
+      !***
+      !*** loop over destination grid 
+      !***
+
+      grid_loop2: do dst_add = 1, grid1_size
+
+        if (.not. grid1_mask(dst_add)) cycle grid_loop2
+
+        coslat_dst = cos(grid1_center_lat(dst_add))
+        coslon_dst = cos(grid1_center_lon(dst_add))
+        sinlat_dst = sin(grid1_center_lat(dst_add))
+        sinlon_dst = sin(grid1_center_lon(dst_add))
+
+        !***
+        !*** find four nearest grid points on source grid and
+        !*** distances to each point
+        !***
+
+        call grid_search_nbr(nbr_add, nbr_dist,
+     &                       grid1_center_lat(dst_add),
+     &                       grid1_center_lon(dst_add),
+     &                       coslat_dst, coslon_dst, 
+     &                       sinlat_dst, sinlon_dst,
+     &                       bin_addr2, bin_addr1)
+
+        !***
+        !*** compute weights based on inverse distance
+        !*** if mask is false, eliminate those points
+        !***
+
+        dist_tot = zero
+        do n=1,num_neighbors
+          if (grid2_mask(nbr_add(n))) then
+            nbr_dist(n) = one/nbr_dist(n)
+            dist_tot = dist_tot + nbr_dist(n)
+            nbr_mask(n) = .true.
+          else
+            nbr_mask(n) = .false.
+          endif
+        end do
+
+        !***
+        !*** normalize weights and store the link
+        !***
+
+        do n=1,num_neighbors
+          if (nbr_mask(n)) then
+            wgtstmp(1) = nbr_dist(n)/dist_tot
+            call store_link_nbr(dst_add, nbr_add(n), wgtstmp, nmap)
+            grid1_frac(dst_add) = one
+          endif
+        end do
+
+      end do grid_loop2
+
+      deallocate (coslat, coslon, sinlat, sinlon)
+
+      endif
+
+      deallocate(wgtstmp)
+
+!-----------------------------------------------------------------------
+
+      end subroutine remap_distwgt
+
+!***********************************************************************
+
+      subroutine grid_search_nbr(nbr_add, nbr_dist, plat, plon, 
+     &               coslat_dst, coslon_dst, sinlat_dst, sinlon_dst,
+     &               src_bin_add, dst_bin_add)
+
+!-----------------------------------------------------------------------
+!
+!     this routine finds the closest num_neighbor points to a search 
+!     point and computes a distance to each of the neighbors.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     output variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), dimension(num_neighbors), intent(out) ::
+     &        nbr_add  ! address of each of the closest points
+
+      real (kind=dbl_kind), dimension(num_neighbors), intent(out) ::
+     &        nbr_dist ! distance to each of the closest points
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), dimension(:,:), intent(in) ::
+     &        src_bin_add, ! search bins for restricting search
+     &        dst_bin_add   
+
+      real (kind=dbl_kind), intent(in) ::
+     &        plat,         ! latitude  of the search point
+     &        plon,         ! longitude of the search point
+     &        coslat_dst,   ! cos(lat)  of the search point
+     &        coslon_dst,   ! cos(lon)  of the search point
+     &        sinlat_dst,   ! sin(lat)  of the search point
+     &        sinlon_dst    ! sin(lon)  of the search point
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n, nmax, nadd, nchk, ! dummy indices
+     &        min_add, max_add, nm1, np1, i, j, ip1, im1, jp1, jm1
+
+      real (kind=dbl_kind) ::
+     &        distance      ! angular distance
+
+!-----------------------------------------------------------------------
+!
+!     loop over source grid and find nearest neighbors
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** restrict the search using search bins
+      !*** expand the bins to catch neighbors
+      !***
+
+      select case (restrict_type)
+      case('latitude')
+
+        do n=1,num_srch_bins
+          if (plat >= bin_lats(1,n) .and. plat <= bin_lats(2,n)) then
+            min_add = src_bin_add(1,n)
+            max_add = src_bin_add(2,n)
+
+            nm1 = max(n-1,1)
+            np1 = min(n+1,num_srch_bins)
+
+            min_add = min(min_add,src_bin_add(1,nm1))
+            max_add = max(max_add,src_bin_add(2,nm1))
+            min_add = min(min_add,src_bin_add(1,np1))
+            max_add = max(max_add,src_bin_add(2,np1))
+          endif
+        end do
+
+      case('latlon')
+
+        n = 0
+        nmax = nint(sqrt(real(num_srch_bins)))
+        do j=1,nmax
+        jp1 = min(j+1,nmax)
+        jm1 = max(j-1,1)
+        do i=1,nmax
+          ip1 = min(i+1,nmax)
+          im1 = max(i-1,1)
+
+          n = n+1
+          if (plat >= bin_lats(1,n) .and. plat <= bin_lats(2,n) .and.
+     &        plon >= bin_lons(1,n) .and. plon <= bin_lons(3,n)) then
+            min_add = src_bin_add(1,n)
+            max_add = src_bin_add(2,n)
+
+            nm1 = (jm1-1)*nmax + im1
+            np1 = (jp1-1)*nmax + ip1
+            nm1 = max(nm1,1)
+            np1 = min(np1,num_srch_bins)
+
+            min_add = min(min_add,src_bin_add(1,nm1))
+            max_add = max(max_add,src_bin_add(2,nm1))
+            min_add = min(min_add,src_bin_add(1,np1))
+            max_add = max(max_add,src_bin_add(2,np1))
+          endif
+        end do
+        end do
+
+      end select
+
+      !***
+      !*** initialize distance and address arrays
+      !***
+
+      nbr_add = 0
+      nbr_dist = bignum
+
+      do nadd=min_add,max_add
+
+        !***
+        !*** find distance to this point
+        !***
+
+        distance = acos(sinlat_dst*sinlat(nadd) +
+     &                  coslat_dst*coslat(nadd)*
+     &                 (coslon_dst*coslon(nadd) +
+     &                  sinlon_dst*sinlon(nadd)) )
+        distance = max(distance,1e-5)
+
+        !***
+        !*** store the address and distance if this is one of the
+        !*** smallest four so far
+        !***
+
+        check_loop: do nchk=1,num_neighbors
+          if (distance .lt. nbr_dist(nchk)) then
+            do n=num_neighbors,nchk+1,-1
+              nbr_add(n) = nbr_add(n-1)
+              nbr_dist(n) = nbr_dist(n-1)
+            end do
+            nbr_add(nchk) = nadd
+            nbr_dist(nchk) = distance
+            exit check_loop
+          endif
+        end do check_loop
+
+      end do
+
+!-----------------------------------------------------------------------
+
+      end subroutine grid_search_nbr 
+
+!***********************************************************************
+
+      subroutine store_link_nbr(add1, add2, weights, nmap)
+
+!-----------------------------------------------------------------------
+!
+!     this routine stores the address and weight for this link in
+!     the appropriate address and weight arrays and resizes those
+!     arrays if necessary.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &        add1,  ! address on grid1
+     &        add2,  ! address on grid2
+     &        nmap   ! identifies which direction for mapping
+
+      real (kind=dbl_kind), dimension(:), intent(in) ::
+     &        weights ! array of remapping weights for this link
+
+!-----------------------------------------------------------------------
+!
+!     increment number of links and check to see if remap arrays need
+!     to be increased to accomodate the new link.  then store the
+!     link.
+!
+!-----------------------------------------------------------------------
+
+      select case (nmap)
+      case(1)
+
+        num_links_map1  = num_links_map1 + 1
+
+        if (num_links_map1 > max_links_map1) 
+     &     call resize_remap_vars(1,resize_increment)
+
+        grid1_add_map1(num_links_map1) = add1
+        grid2_add_map1(num_links_map1) = add2
+        wts_map1    (:,num_links_map1) = weights
+
+      case(2)
+
+        num_links_map2  = num_links_map2 + 1
+
+        if (num_links_map2 > max_links_map2) 
+     &     call resize_remap_vars(2,resize_increment)
+
+        grid1_add_map2(num_links_map2) = add1
+        grid2_add_map2(num_links_map2) = add2
+        wts_map2    (:,num_links_map2) = weights
+
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine store_link_nbr
+
+!***********************************************************************
+
+      end module remap_distance_weight
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 1027 - 0
interpolation/scrip_sources/remap_read.f

@@ -0,0 +1,1027 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This routine reads remapping information from files written
+!     by remap_setup.  If remapping in both directions are required,
+!     two input files must be specified.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_read.f,v 1.6 2000/04/19 21:56:26 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_read
+
+!-----------------------------------------------------------------------
+!
+!     contains routines for reading a remap file
+!
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines useful constants
+      use grids         ! includes all grid information
+      use netcdf_mod    ! module with netcdf vars and utilities
+      use remap_vars    ! module for all required remapping variables
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     module variables
+!
+!-----------------------------------------------------------------------
+!-----------------------------------------------------------------------
+!
+!     various netCDF ids for files variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), private :: ! netCDF ids
+     &         ncstat, nc_file_id,
+     &         nc_srcgrdsize_id, nc_dstgrdsize_id,
+     &         nc_srcgrdcorn_id, nc_dstgrdcorn_id,
+     &         nc_srcgrdrank_id, nc_dstgrdrank_id,
+     &         nc_srcgrddims_id, nc_dstgrddims_id,
+     &         nc_numlinks_id, nc_numwgts_id, 
+     &         nc_srcgrdimask_id, nc_dstgrdimask_id,
+     &         nc_srcgrdcntrlat_id, nc_srcgrdcntrlon_id,
+     &         nc_srcgrdcrnrlat_id, nc_srcgrdcrnrlon_id,
+     &         nc_srcgrdarea_id, nc_srcgrdfrac_id,
+     &         nc_dstgrdcntrlat_id, nc_dstgrdcntrlon_id,
+     &         nc_dstgrdcrnrlat_id, nc_dstgrdcrnrlon_id,
+     &         nc_dstgrdarea_id, nc_dstgrdfrac_id,
+     &         nc_srcgrdadd_id, nc_dstgrdadd_id, nc_rmpmatrix_id
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine read_remap(map_name, interp_file)
+
+!-----------------------------------------------------------------------
+!
+!     this driver routine reads some global attributes and then
+!     calls a specific read routine based on file conventions
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) ::
+     &  interp_file        ! filename for remap data
+
+!-----------------------------------------------------------------------
+!
+!     output variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(out) ::
+     &  map_name            ! name for mapping
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len) :: 
+     &   map_method       ! character string for map_type
+     &,  normalize_opt    ! character string for normalization option
+     &,  convention       ! character string for output convention
+
+!-----------------------------------------------------------------------
+!
+!     open file and read some global information
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_open(interp_file, NF_NOWRITE, nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** map name
+      !***
+      map_name = ' '
+      ncstat = nf_get_att_text(nc_file_id, NF_GLOBAL, 'title',
+     &                         map_name)
+      call netcdf_error_handler(ncstat)
+
+      print *,'Reading remapping:',trim(map_name)
+      print *,'From file:',trim(interp_file)
+
+      !***
+      !*** normalization option
+      !***
+      normalize_opt = ' '
+      ncstat = nf_get_att_text(nc_file_id, NF_GLOBAL, 'normalization',
+     &                         normalize_opt)
+      call netcdf_error_handler(ncstat)
+
+      select case(normalize_opt)
+      case ('none')
+        norm_opt = norm_opt_none
+      case ('fracarea')
+        norm_opt = norm_opt_frcarea
+      case ('destarea')
+        norm_opt = norm_opt_dstarea
+      case default
+        print *,'normalize_opt = ',normalize_opt
+        stop 'Invalid normalization option'
+      end select
+
+      !***
+      !*** map method
+      !***
+      map_method = ' '
+      ncstat = nf_get_att_text (nc_file_id, NF_GLOBAL, 'map_method',
+     &                          map_method)
+      call netcdf_error_handler(ncstat)
+
+      select case(map_method)
+      case('Conservative remapping')
+        map_type = map_type_conserv
+      case('Bilinear remapping')
+        map_type = map_type_bilinear
+      case('Distance weighted avg of nearest neighbors')
+        map_type = map_type_distwgt
+      case('Bicubic remapping')
+        map_type = map_type_bicubic
+      case default
+        print *,'map_type = ',map_method
+        stop 'Invalid Map Type'
+      end select
+
+      !***
+      !*** file convention
+      !***
+      convention = ' '
+      ncstat = nf_get_att_text (nc_file_id, NF_GLOBAL, 'conventions',
+     &                          convention)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     call appropriate read routine based on output convention
+!
+!-----------------------------------------------------------------------
+
+      select case(convention)
+      case ('SCRIP')
+        call read_remap_scrip
+      case ('NCAR-CSM')
+        call read_remap_csm
+      case default
+        print *,'convention = ',convention
+        stop 'unknown output file convention'
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine read_remap
+
+!***********************************************************************
+
+      subroutine read_remap_scrip
+
+!-----------------------------------------------------------------------
+!
+!     the routine reads a netCDF file to extract remapping info
+!     in SCRIP format
+!
+!-----------------------------------------------------------------------
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) ::
+     &  grid1_name           ! grid name for source grid
+     &, grid2_name           ! grid name for dest   grid
+
+      integer (kind=int_kind) ::  
+     &  n                    ! dummy index
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &  grid1_mask_int,      ! integer masks to determine
+     &  grid2_mask_int       ! cells that participate in map
+
+!-----------------------------------------------------------------------
+!
+!     read some additional global attributes
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** source and destination grid names
+      !***
+
+      grid1_name = ' '
+      grid2_name = ' '
+      ncstat = nf_get_att_text (nc_file_id, NF_GLOBAL, 'source_grid',
+     &                          grid1_name)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text (nc_file_id, NF_GLOBAL, 'dest_grid',
+     &                          grid2_name)
+      call netcdf_error_handler(ncstat)
+
+      print *,' '
+      print *,'Remapping between:',trim(grid1_name)
+      print *,'and ',trim(grid2_name)
+      print *,' '
+
+!-----------------------------------------------------------------------
+!
+!     read dimension information
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_inq_dimid(nc_file_id, 'src_grid_size', 
+     &                      nc_srcgrdsize_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_srcgrdsize_id, grid1_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'dst_grid_size', 
+     &                      nc_dstgrdsize_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_dstgrdsize_id, grid2_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'src_grid_corners', 
+     &                      nc_srcgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_srcgrdcorn_id, 
+     &                       grid1_corners)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'dst_grid_corners', 
+     &                      nc_dstgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_dstgrdcorn_id, 
+     &                       grid2_corners)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'src_grid_rank', 
+     &                      nc_srcgrdrank_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_srcgrdrank_id, 
+     &                       grid1_rank)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'dst_grid_rank', 
+     &                      nc_dstgrdrank_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_dstgrdrank_id, 
+     &                       grid2_rank)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'num_links', 
+     &                      nc_numlinks_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_numlinks_id, 
+     &                       num_links_map1)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'num_wgts', 
+     &                      nc_numwgts_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_numwgts_id, num_wts)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_dims      (grid1_rank),
+     &          grid1_center_lat(grid1_size), 
+     &          grid1_center_lon(grid1_size),
+     &          grid1_area      (grid1_size),
+     &          grid1_frac      (grid1_size),
+     &          grid1_mask      (grid1_size),
+     &          grid1_mask_int  (grid1_size),
+     &          grid1_corner_lat(grid1_corners, grid1_size),
+     &          grid1_corner_lon(grid1_corners, grid1_size) )
+
+      allocate( grid2_dims      (grid2_rank),
+     &          grid2_center_lat(grid2_size), 
+     &          grid2_center_lon(grid2_size),
+     &          grid2_area      (grid2_size),
+     &          grid2_frac      (grid2_size),
+     &          grid2_mask      (grid2_size),
+     &          grid2_mask_int  (grid2_size),
+     &          grid2_corner_lat(grid2_corners, grid2_size),
+     &          grid2_corner_lon(grid2_corners, grid2_size) )
+
+      allocate( grid1_add_map1(num_links_map1),
+     &          grid2_add_map1(num_links_map1),
+     &          wts_map1(num_wts,num_links_map1) )
+
+!-----------------------------------------------------------------------
+!
+!     get variable ids
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_dims', 
+     &                      nc_srcgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_imask', 
+     &                      nc_srcgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_center_lat', 
+     &                                   nc_srcgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_center_lon', 
+     &                                   nc_srcgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_corner_lat', 
+     &                                   nc_srcgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_corner_lon', 
+     &                                   nc_srcgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_area', 
+     &                                   nc_srcgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_frac', 
+     &                                   nc_srcgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_dims', 
+     &                      nc_dstgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_imask', 
+     &                      nc_dstgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_center_lat', 
+     &                                   nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_center_lon', 
+     &                                   nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_corner_lat', 
+     &                                   nc_dstgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_corner_lon', 
+     &                                   nc_dstgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_area', 
+     &                                   nc_dstgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_frac', 
+     &                                   nc_dstgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_address', 
+     &                                   nc_srcgrdadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_address', 
+     &                                   nc_dstgrdadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'remap_matrix', 
+     &                                   nc_rmpmatrix_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     read all variables
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_get_var_int(nc_file_id, nc_srcgrddims_id, 
+     &                        grid1_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_srcgrdimask_id, 
+     &                        grid1_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcntrlat_id, 
+     &                                       grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcntrlon_id, 
+     &                                       grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      grid1_units = ' '
+      ncstat = nf_get_att_text(nc_file_id, nc_srcgrdcntrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+        grid1_center_lat = grid1_center_lat*deg2rad
+        grid1_center_lon = grid1_center_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid1 center lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcrnrlat_id, 
+     &                                       grid1_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcrnrlon_id, 
+     &                                       grid1_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      grid1_units = ' '
+      ncstat = nf_get_att_text(nc_file_id, nc_srcgrdcrnrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+        grid1_corner_lat = grid1_corner_lat*deg2rad
+        grid1_corner_lon = grid1_corner_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid1 corner lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdarea_id, 
+     &                                       grid1_area)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdfrac_id, 
+     &                                       grid1_frac)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_dstgrddims_id, 
+     &                        grid2_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_dstgrdimask_id, 
+     &                        grid2_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcntrlat_id, 
+     &                                       grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcntrlon_id, 
+     &                                       grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      grid2_units = ' '
+      ncstat = nf_get_att_text(nc_file_id, nc_dstgrdcntrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+        grid2_center_lat = grid2_center_lat*deg2rad
+        grid2_center_lon = grid2_center_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid2 center lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcrnrlat_id, 
+     &                                       grid2_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcrnrlon_id, 
+     &                                       grid2_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      grid2_units = ' '
+      ncstat = nf_get_att_text(nc_file_id, nc_dstgrdcrnrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+        grid2_corner_lat = grid2_corner_lat*deg2rad
+        grid2_corner_lon = grid2_corner_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid2 corner lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdarea_id, 
+     &                                       grid2_area)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdfrac_id, 
+     &                                       grid2_frac)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_srcgrdadd_id, 
+     &                        grid1_add_map1)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_dstgrdadd_id, 
+     &                        grid2_add_map1)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                                       wts_map1)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     initialize logical mask 
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_mask_int == 1)
+        grid1_mask = .true.
+      elsewhere
+        grid1_mask = .false.
+      endwhere
+      where (grid2_mask_int == 1)
+        grid2_mask = .true.
+      elsewhere
+        grid2_mask = .false.
+      endwhere
+      deallocate(grid1_mask_int, grid2_mask_int)
+
+!-----------------------------------------------------------------------
+!
+!     close input file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_close(nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+
+      end subroutine read_remap_scrip
+
+!***********************************************************************
+
+      subroutine read_remap_csm
+
+!-----------------------------------------------------------------------
+!
+!     the routine reads a netCDF file to extract remapping info
+!     in NCAR-CSM format
+!
+!-----------------------------------------------------------------------
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) ::
+     &  grid1_name           ! grid name for source grid
+     &, grid2_name           ! grid name for dest   grid
+
+      integer (kind=int_kind) ::
+     &  nc_numwgts1_id    ! extra netCDF id for num_wgts > 1 
+     &, nc_rmpmatrix2_id  ! extra netCDF id for high-order remap matrix
+
+      real (kind=dbl_kind), dimension(:),allocatable ::
+     &  wts1              ! CSM wants single array for 1st-order wts
+
+      real (kind=dbl_kind), dimension(:,:),allocatable ::
+     &  wts2              ! write remaining weights in different array
+
+      integer (kind=int_kind) ::  
+     &  n                    ! dummy index
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &  grid1_mask_int,      ! integer masks to determine
+     &  grid2_mask_int       ! cells that participate in map
+
+!-----------------------------------------------------------------------
+!
+!     read some additional global attributes
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** source and destination grid names
+      !***
+
+      grid1_name = ' '
+      grid2_name = ' '
+      ncstat = nf_get_att_text (nc_file_id, NF_GLOBAL, 'domain_a',
+     &                          grid1_name)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text (nc_file_id, NF_GLOBAL, 'domain_b',
+     &                          grid2_name)
+      call netcdf_error_handler(ncstat)
+
+      print *,' '
+      print *,'Remapping between:',trim(grid1_name)
+      print *,'and ',trim(grid2_name)
+      print *,' '
+
+!-----------------------------------------------------------------------
+!
+!     read dimension information
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_inq_dimid(nc_file_id, 'n_a', nc_srcgrdsize_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_srcgrdsize_id, grid1_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'n_b', nc_dstgrdsize_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_dstgrdsize_id, grid2_size)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'nv_a', nc_srcgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_srcgrdcorn_id, 
+     &                       grid1_corners)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'nv_b', nc_dstgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_dstgrdcorn_id, 
+     &                       grid2_corners)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'src_grid_rank', 
+     &                      nc_srcgrdrank_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_srcgrdrank_id, 
+     &                       grid1_rank)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'dst_grid_rank', 
+     &                      nc_dstgrdrank_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_dstgrdrank_id, 
+     &                       grid2_rank)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'n_s', 
+     &                      nc_numlinks_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_numlinks_id, 
+     &                       num_links_map1)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_dimid(nc_file_id, 'num_wgts', 
+     &                      nc_numwgts_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_inq_dimlen(nc_file_id, nc_numwgts_id, num_wts)
+      call netcdf_error_handler(ncstat)
+
+      if (num_wts > 1) then
+        ncstat = nf_inq_dimid(nc_file_id, 'num_wgts1', 
+     &                        nc_numwgts1_id)
+        call netcdf_error_handler(ncstat)
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate( grid1_dims      (grid1_rank),
+     &          grid1_center_lat(grid1_size), 
+     &          grid1_center_lon(grid1_size),
+     &          grid1_area      (grid1_size),
+     &          grid1_frac      (grid1_size),
+     &          grid1_mask      (grid1_size),
+     &          grid1_mask_int  (grid1_size),
+     &          grid1_corner_lat(grid1_corners, grid1_size),
+     &          grid1_corner_lon(grid1_corners, grid1_size) )
+
+      allocate( grid2_dims      (grid2_rank),
+     &          grid2_center_lat(grid2_size), 
+     &          grid2_center_lon(grid2_size),
+     &          grid2_area      (grid2_size),
+     &          grid2_frac      (grid2_size),
+     &          grid2_mask      (grid2_size),
+     &          grid2_mask_int  (grid2_size),
+     &          grid2_corner_lat(grid2_corners, grid2_size),
+     &          grid2_corner_lon(grid2_corners, grid2_size) )
+
+      allocate( grid1_add_map1(num_links_map1),
+     &          grid2_add_map1(num_links_map1),
+     &          wts_map1(num_wts,num_links_map1),
+     &          wts1(num_links_map1),
+     &          wts2(num_wts-1,num_links_map1) )
+
+!-----------------------------------------------------------------------
+!
+!     get variable ids
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_inq_varid(nc_file_id, 'src_grid_dims', 
+     &                      nc_srcgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'mask_a', 
+     &                      nc_srcgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'yc_a', nc_srcgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'xc_a', nc_srcgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'yv_a', nc_srcgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'xv_a', nc_srcgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'area_a', nc_srcgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'frac_a', nc_srcgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'dst_grid_dims', 
+     &                      nc_dstgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'mask_b', 
+     &                      nc_dstgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'yc_b', nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'xc_b', nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'yv_b', nc_dstgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'xv_b', nc_dstgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'area_b', nc_dstgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'frac_b', nc_dstgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'col', nc_srcgrdadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'row', nc_dstgrdadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_inq_varid(nc_file_id, 'S', nc_rmpmatrix_id)
+      call netcdf_error_handler(ncstat)
+
+      if (num_wts > 1) then
+        ncstat = nf_inq_varid(nc_file_id, 'S2', nc_rmpmatrix2_id)
+        call netcdf_error_handler(ncstat)
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     read all variables
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_get_var_int(nc_file_id, nc_srcgrddims_id, 
+     &                        grid1_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_srcgrdimask_id, 
+     &                        grid1_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcntrlat_id, 
+     &                                       grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcntrlon_id, 
+     &                                       grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text(nc_file_id, nc_srcgrdcntrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+        grid1_center_lat = grid1_center_lat*deg2rad
+        grid1_center_lon = grid1_center_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid1 center lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcrnrlat_id, 
+     &                                       grid1_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdcrnrlon_id, 
+     &                                       grid1_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text(nc_file_id, nc_srcgrdcrnrlat_id, 'units',
+     &                         grid1_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid1_units(1:7))
+      case ('degrees')
+        grid1_corner_lat = grid1_corner_lat*deg2rad
+        grid1_corner_lon = grid1_corner_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid1 corner lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdarea_id, 
+     &                                       grid1_area)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_srcgrdfrac_id, 
+     &                                       grid1_frac)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_dstgrddims_id, 
+     &                        grid2_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_dstgrdimask_id, 
+     &                        grid2_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcntrlat_id, 
+     &                                       grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcntrlon_id, 
+     &                                       grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_att_text(nc_file_id, nc_dstgrdcntrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+        grid2_center_lat = grid2_center_lat*deg2rad
+        grid2_center_lon = grid2_center_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid2 center lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcrnrlat_id, 
+     &                                       grid2_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdcrnrlon_id, 
+     &                                       grid2_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+
+      ncstat = nf_get_att_text(nc_file_id, nc_dstgrdcrnrlat_id, 'units',
+     &                         grid2_units)
+      call netcdf_error_handler(ncstat)
+
+      select case (grid2_units(1:7))
+      case ('degrees')
+        grid2_corner_lat = grid2_corner_lat*deg2rad
+        grid2_corner_lon = grid2_corner_lon*deg2rad
+      case ('radians')
+        !*** no conversion necessary
+      case default
+        print *,'unknown units supplied for grid2 corner lat/lon: '
+        print *,'proceeding assuming radians'
+      end select
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdarea_id, 
+     &                                       grid2_area)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_dstgrdfrac_id, 
+     &                                       grid2_frac)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_srcgrdadd_id, 
+     &                        grid1_add_map1)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_int(nc_file_id, nc_dstgrdadd_id, 
+     &                        grid2_add_map1)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_get_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                                       wts1)
+      wts_map1(1,:) = wts1
+      deallocate(wts1)
+
+      if (num_wts > 1) then
+        ncstat = nf_get_var_double(nc_file_id, nc_rmpmatrix2_id, 
+     &                                         wts2)
+        wts_map1(2:,:) = wts2
+        deallocate(wts2)
+      endif
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     initialize logical mask 
+!
+!-----------------------------------------------------------------------
+
+      where (grid1_mask_int == 1)
+        grid1_mask = .true.
+      elsewhere
+        grid1_mask = .false.
+      endwhere
+      where (grid2_mask_int == 1)
+        grid2_mask = .true.
+      elsewhere
+        grid2_mask = .false.
+      endwhere
+      deallocate(grid1_mask_int, grid2_mask_int)
+
+!-----------------------------------------------------------------------
+!
+!     close input file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_close(nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+
+      end subroutine read_remap_csm
+
+!***********************************************************************
+
+      end module remap_read
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 302 - 0
interpolation/scrip_sources/remap_vars.f

@@ -0,0 +1,302 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this module contains necessary variables for remapping between
+!     two grids.  also routines for resizing and initializing these
+!     variables.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_vars.f,v 1.5 2000/04/19 21:56:26 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_vars
+
+      use kinds_mod
+      use constants
+      use grids
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     module variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), parameter ::
+     &      norm_opt_none    = 1
+     &,     norm_opt_dstarea = 2
+     &,     norm_opt_frcarea = 3
+
+      integer (kind=int_kind), parameter ::
+     &      map_type_conserv  = 1
+     &,     map_type_bilinear = 2
+     &,     map_type_bicubic  = 3
+     &,     map_type_distwgt  = 4
+
+      integer (kind=int_kind), save :: 
+     &      max_links_map1  ! current size of link arrays
+     &,     num_links_map1  ! actual number of links for remapping
+     &,     max_links_map2  ! current size of link arrays
+     &,     num_links_map2  ! actual number of links for remapping
+     &,     num_maps        ! num of remappings for this grid pair
+     &,     num_wts         ! num of weights used in remapping
+     &,     map_type        ! identifier for remapping method
+     &,     norm_opt        ! option for normalization (conserv only)
+     &,     resize_increment ! default amount to increase array size
+
+      integer (kind=int_kind), dimension(:), allocatable, save ::
+     &      grid1_add_map1, ! grid1 address for each link in mapping 1
+     &      grid2_add_map1, ! grid2 address for each link in mapping 1
+     &      grid1_add_map2, ! grid1 address for each link in mapping 2
+     &      grid2_add_map2  ! grid2 address for each link in mapping 2
+
+      real (kind=dbl_kind), dimension(:,:), allocatable, save ::
+     &      wts_map1, ! map weights for each link (num_wts,max_links)
+     &      wts_map2  ! map weights for each link (num_wts,max_links)
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine init_remap_vars
+
+!-----------------------------------------------------------------------
+!
+!     this routine initializes some variables and provides an initial
+!     allocation of arrays (fairly large so frequent resizing 
+!     unnecessary).
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     determine the number of weights
+!
+!-----------------------------------------------------------------------
+
+      select case (map_type)
+      case(map_type_conserv)
+        num_wts = 3
+      case(map_type_bilinear)
+        num_wts = 1
+      case(map_type_bicubic)
+        num_wts = 4
+      case(map_type_distwgt)
+        num_wts = 1
+      end select
+
+!-----------------------------------------------------------------------
+!
+!     initialize num_links and set max_links to four times the largest 
+!     of the destination grid sizes initially (can be changed later).
+!     set a default resize increment to increase the size of link
+!     arrays if the number of links exceeds the initial size
+!   
+!-----------------------------------------------------------------------
+
+      num_links_map1 = 0
+      max_links_map1 = 4*grid2_size
+      if (num_maps > 1) then
+        num_links_map2 = 0
+        max_links_map1 = max(4*grid1_size,4*grid2_size)
+        max_links_map2 = max_links_map1
+      endif
+
+      resize_increment = 0.1*max(grid1_size,grid2_size)
+
+!-----------------------------------------------------------------------
+!
+!     allocate address and weight arrays for mapping 1
+!   
+!-----------------------------------------------------------------------
+
+      allocate (grid1_add_map1(max_links_map1),
+     &          grid2_add_map1(max_links_map1),
+     &          wts_map1(num_wts, max_links_map1))
+
+!-----------------------------------------------------------------------
+!
+!     allocate address and weight arrays for mapping 2 if necessary 
+!   
+!-----------------------------------------------------------------------
+
+      if (num_maps > 1) then
+        allocate (grid1_add_map2(max_links_map2),
+     &            grid2_add_map2(max_links_map2),
+     &            wts_map2(num_wts, max_links_map2))
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine init_remap_vars
+
+!***********************************************************************
+
+      subroutine resize_remap_vars(nmap, increment)
+
+!-----------------------------------------------------------------------
+!
+!     this routine resizes remapping arrays by increasing(decreasing)
+!     the max_links by increment
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::
+     &     nmap,      ! identifies which mapping array to resize
+     &     increment  ! the number of links to add(subtract) to arrays
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) ::
+     &   ierr,     ! error flag
+     &   mxlinks   ! size of link arrays
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &   add1_tmp, ! temp array for resizing address arrays
+     &   add2_tmp  ! temp array for resizing address arrays
+
+      real (kind=dbl_kind), dimension(:,:), allocatable ::
+     &   wts_tmp   ! temp array for resizing weight arrays
+
+!-----------------------------------------------------------------------
+!
+!     resize map 1 arrays if required.
+!
+!-----------------------------------------------------------------------
+
+      select case (nmap)
+      case(1)
+
+        !***
+        !*** allocate temporaries to hold original values
+        !***
+
+        mxlinks = size(grid1_add_map1)
+        allocate (add1_tmp(mxlinks), add2_tmp(mxlinks), 
+     &            wts_tmp(num_wts,mxlinks))
+
+        add1_tmp = grid1_add_map1
+        add2_tmp = grid2_add_map1
+        wts_tmp  = wts_map1
+        
+        !***
+        !*** deallocate originals and increment max_links then
+        !*** reallocate arrays at new size
+        !***
+
+        deallocate (grid1_add_map1, grid2_add_map1, wts_map1)
+        max_links_map1 = mxlinks + increment
+        allocate (grid1_add_map1(max_links_map1),
+     &            grid2_add_map1(max_links_map1),
+     &            wts_map1(num_wts,max_links_map1))
+
+        !***
+        !*** restore original values from temp arrays and
+        !*** deallocate temps
+        !***
+
+        mxlinks = min(mxlinks, max_links_map1)
+        grid1_add_map1(1:mxlinks) = add1_tmp (1:mxlinks)
+        grid2_add_map1(1:mxlinks) = add2_tmp (1:mxlinks)
+        wts_map1    (:,1:mxlinks) = wts_tmp(:,1:mxlinks)
+        deallocate(add1_tmp, add2_tmp, wts_tmp)
+
+!-----------------------------------------------------------------------
+!
+!     resize map 2 arrays if required.
+!
+!-----------------------------------------------------------------------
+
+      case(2)
+
+        !***
+        !*** allocate temporaries to hold original values
+        !***
+
+        mxlinks = size(grid1_add_map2)
+        allocate (add1_tmp(mxlinks), add2_tmp(mxlinks), 
+     &            wts_tmp(num_wts,mxlinks),stat=ierr)
+        if (ierr .ne. 0) then
+          print *,'error allocating temps in resize: ',ierr
+          stop
+        endif
+
+        add1_tmp = grid1_add_map2
+        add2_tmp = grid2_add_map2
+        wts_tmp  = wts_map2
+        
+        !***
+        !*** deallocate originals and increment max_links then
+        !*** reallocate arrays at new size
+        !***
+
+        deallocate (grid1_add_map2, grid2_add_map2, wts_map2)
+        max_links_map2 = mxlinks + increment
+        allocate (grid1_add_map2(max_links_map2),
+     &            grid2_add_map2(max_links_map2),
+     &            wts_map2(num_wts,max_links_map2),stat=ierr)
+        if (ierr .ne. 0) then
+          print *,'error allocating new arrays in resize: ',ierr
+          stop
+        endif
+
+
+        !***
+        !*** restore original values from temp arrays and
+        !*** deallocate temps
+        !***
+
+        mxlinks = min(mxlinks, max_links_map2)
+        grid1_add_map2(1:mxlinks) = add1_tmp (1:mxlinks)
+        grid2_add_map2(1:mxlinks) = add2_tmp (1:mxlinks)
+        wts_map2    (:,1:mxlinks) = wts_tmp(:,1:mxlinks)
+        deallocate(add1_tmp, add2_tmp, wts_tmp)
+
+      end select
+
+!-----------------------------------------------------------------------
+
+      end subroutine resize_remap_vars
+
+!***********************************************************************
+
+      end module remap_vars
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 1763 - 0
interpolation/scrip_sources/remap_write.f

@@ -0,0 +1,1763 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module contains routines for writing the remapping data to 
+!     a file.  Before writing the data for each mapping, the links are 
+!     sorted by destination grid address.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: remap_write.f,v 1.7 2001/08/21 21:06:42 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module remap_write
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines common scalar constants
+      use grids         ! module containing grid information
+      use remap_vars    ! module containing remap information
+      use netcdf_mod    ! module with netCDF stuff
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     module variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), private :: 
+     &   map_method       ! character string for map_type
+     &,  normalize_opt    ! character string for normalization option
+     &,  history          ! character string for history information
+     &,  convention       ! character string for output convention
+
+      character(8), private :: 
+     &   cdate            ! character date string
+
+      integer (kind=int_kind), dimension(:), allocatable, private ::
+     &   src_mask_int     ! integer masks to determine
+     &,  dst_mask_int     ! cells that participate in map
+
+!-----------------------------------------------------------------------
+!
+!     various netCDF identifiers used by output routines
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), private ::
+     &   ncstat               ! error flag for netCDF calls 
+     &,  nc_file_id           ! id for netCDF file
+     &,  nc_srcgrdsize_id     ! id for source grid size
+     &,  nc_dstgrdsize_id     ! id for destination grid size
+     &,  nc_srcgrdcorn_id     ! id for number of source grid corners
+     &,  nc_dstgrdcorn_id     ! id for number of dest grid corners
+     &,  nc_srcgrdrank_id     ! id for source grid rank
+     &,  nc_dstgrdrank_id     ! id for dest grid rank
+     &,  nc_numlinks_id       ! id for number of links in mapping
+     &,  nc_numwgts_id        ! id for number of weights for mapping
+     &,  nc_srcgrddims_id     ! id for source grid dimensions
+     &,  nc_dstgrddims_id     ! id for dest grid dimensions
+     &,  nc_srcgrdcntrlat_id  ! id for source grid center latitude
+     &,  nc_dstgrdcntrlat_id  ! id for dest grid center latitude
+     &,  nc_srcgrdcntrlon_id  ! id for source grid center longitude
+     &,  nc_dstgrdcntrlon_id  ! id for dest grid center longitude
+     &,  nc_srcgrdimask_id    ! id for source grid mask
+     &,  nc_dstgrdimask_id    ! id for dest grid mask
+     &,  nc_srcgrdcrnrlat_id  ! id for latitude of source grid corners
+     &,  nc_srcgrdcrnrlon_id  ! id for longitude of source grid corners
+     &,  nc_dstgrdcrnrlat_id  ! id for latitude of dest grid corners
+     &,  nc_dstgrdcrnrlon_id  ! id for longitude of dest grid corners
+     &,  nc_srcgrdarea_id     ! id for area of source grid cells
+     &,  nc_dstgrdarea_id     ! id for area of dest grid cells
+     &,  nc_srcgrdfrac_id     ! id for area fraction on source grid
+     &,  nc_dstgrdfrac_id     ! id for area fraction on dest grid
+     &,  nc_srcadd_id         ! id for map source address
+     &,  nc_dstadd_id         ! id for map destination address
+     &,  nc_rmpmatrix_id      ! id for remapping matrix
+
+      integer (kind=int_kind), dimension(2), private ::
+     &   nc_dims2_id  ! netCDF ids for 2d array dims
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine write_remap(map1_name, map2_name, 
+     &                       interp_file1, interp_file2, output_opt)
+
+!-----------------------------------------------------------------------
+!
+!     calls correct output routine based on output format choice
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) ::
+     &            map1_name,    ! name for mapping grid1 to grid2
+     &            map2_name,    ! name for mapping grid2 to grid1
+     &            interp_file1, ! filename for map1 remap data
+     &            interp_file2, ! filename for map2 remap data
+     &            output_opt    ! option for output conventions
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     define some common variables to be used in all routines
+!
+!-----------------------------------------------------------------------
+
+      select case(norm_opt)
+      case (norm_opt_none)
+        normalize_opt = 'none'
+      case (norm_opt_frcarea)
+        normalize_opt = 'fracarea'
+      case (norm_opt_dstarea)
+        normalize_opt = 'destarea'
+      end select
+
+      select case(map_type)
+      case(map_type_conserv)
+        map_method = 'Conservative remapping'
+      case(map_type_bilinear)
+        map_method = 'Bilinear remapping'
+      case(map_type_distwgt)
+        map_method = 'Distance weighted avg of nearest neighbors'
+      case(map_type_bicubic)
+        map_method = 'Bicubic remapping'
+      case default
+        stop 'Invalid Map Type'
+      end select
+
+      call date_and_time(date=cdate)
+      write (history,1000) cdate(5:6),cdate(7:8),cdate(1:4)
+ 1000 format('Created: ',a2,'-',a2,'-',a4)
+
+!-----------------------------------------------------------------------
+!
+!     sort address and weight arrays
+!
+!-----------------------------------------------------------------------
+
+      call sort_add(grid2_add_map1, grid1_add_map1, wts_map1)
+      if (num_maps > 1) then
+        call sort_add(grid1_add_map2, grid2_add_map2, wts_map2)
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     call appropriate output routine
+!
+!-----------------------------------------------------------------------
+
+      select case(output_opt)
+      case ('scrip')
+        call write_remap_scrip(map1_name, interp_file1, 1)
+      case ('ncar-csm')
+        call write_remap_csm  (map1_name, interp_file1, 1)
+      case default
+        stop 'unknown output file convention'
+      end select
+
+!-----------------------------------------------------------------------
+!
+!     call appropriate output routine for second mapping if required
+!
+!-----------------------------------------------------------------------
+
+      if (num_maps > 1) then
+        select case(output_opt)
+        case ('scrip')
+          call write_remap_scrip(map2_name, interp_file2, 2)
+        case ('ncar-csm')
+          call write_remap_csm  (map2_name, interp_file2, 2)
+        case default
+          stop 'unknown output file convention'
+        end select
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine write_remap
+
+!***********************************************************************
+
+      subroutine write_remap_scrip(map_name, interp_file, direction)
+
+!-----------------------------------------------------------------------
+!
+!     writes remap data to a netCDF file using SCRIP conventions
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) ::
+     &            map_name     ! name for mapping 
+     &,           interp_file  ! filename for remap data
+
+      integer (kind=int_kind), intent(in) ::
+     &  direction              ! direction of map (1=grid1 to grid2
+                               !                   2=grid2 to grid1)
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len) ::
+     &  grid1_ctmp        ! character temp for grid1 names
+     &, grid2_ctmp        ! character temp for grid2 names
+
+      integer (kind=int_kind) ::
+     &  itmp1             ! integer temp
+     &, itmp2             ! integer temp
+     &, itmp3             ! integer temp
+     &, itmp4             ! integer temp
+
+!-----------------------------------------------------------------------
+!
+!     create netCDF file for mapping and define some global attributes
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_create (interp_file, NF_CLOBBER, nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** map name
+      !***
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'title',
+     &                          len_trim(map_name), map_name)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** normalization option
+      !***
+      ncstat = nf_put_att_text(nc_file_id, NF_GLOBAL, 'normalization',
+     &                         len_trim(normalize_opt), normalize_opt)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** map method
+      !***
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'map_method',
+     &                          len_trim(map_method), map_method)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** history
+      !***
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'history',
+     &                          len_trim(history), history)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** file convention
+      !***
+      convention = 'SCRIP'
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'conventions',
+     &                          len_trim(convention), convention)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** source and destination grid names
+      !***
+
+      if (direction == 1) then
+        grid1_ctmp = 'source_grid'
+        grid2_ctmp = 'dest_grid'
+      else
+        grid1_ctmp = 'dest_grid'
+        grid2_ctmp = 'source_grid'
+      endif
+
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, trim(grid1_ctmp),
+     &                          len_trim(grid1_name), grid1_name)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, trim(grid2_ctmp),
+     &                          len_trim(grid2_name), grid2_name)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     prepare netCDF dimension info
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** define grid size dimensions
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_size
+        itmp2 = grid2_size
+      else
+        itmp1 = grid2_size
+        itmp2 = grid1_size
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'src_grid_size', itmp1, 
+     &                     nc_srcgrdsize_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'dst_grid_size', itmp2, 
+     &                     nc_dstgrdsize_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid corner dimension
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_corners
+        itmp2 = grid2_corners
+      else
+        itmp1 = grid2_corners
+        itmp2 = grid1_corners
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'src_grid_corners', 
+     &                     itmp1, nc_srcgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'dst_grid_corners', 
+     &                     itmp2, nc_dstgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid rank dimension
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_rank
+        itmp2 = grid2_rank
+      else
+        itmp1 = grid2_rank
+        itmp2 = grid1_rank
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'src_grid_rank', 
+     &                     itmp1, nc_srcgrdrank_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'dst_grid_rank', 
+     &                     itmp2, nc_dstgrdrank_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define map size dimensions
+      !***
+
+      if (direction == 1) then
+        itmp1 = num_links_map1
+      else
+        itmp1 = num_links_map2
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'num_links', 
+     &                     itmp1, nc_numlinks_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'num_wgts', 
+     &                     num_wts, nc_numwgts_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid dimensions
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_dims', NF_INT,
+     &                     1, nc_srcgrdrank_id, nc_srcgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_dims', NF_INT,
+     &                     1, nc_dstgrdrank_id, nc_dstgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     define all arrays for netCDF descriptors
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** define grid center latitude array
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_center_lat', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_center_lat', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid center longitude array
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_center_lon', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_center_lon', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid corner lat/lon arrays
+      !***
+
+      nc_dims2_id(1) = nc_srcgrdcorn_id
+      nc_dims2_id(2) = nc_srcgrdsize_id
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_corner_lat', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_srcgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_corner_lon', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_srcgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      nc_dims2_id(1) = nc_dstgrdcorn_id
+      nc_dims2_id(2) = nc_dstgrdsize_id
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_corner_lat', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_dstgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_corner_lon', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_dstgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define units for all coordinate arrays
+      !***
+
+      if (direction == 1) then
+        grid1_ctmp = grid1_units
+        grid2_ctmp = grid2_units
+      else
+        grid1_ctmp = grid2_units
+        grid2_ctmp = grid1_units
+      endif
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcntrlat_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcntrlat_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcntrlon_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcntrlon_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcrnrlat_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcrnrlon_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcrnrlat_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcrnrlon_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid mask
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_imask', NF_INT,
+     &                     1, nc_srcgrdsize_id, nc_srcgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdimask_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_imask', NF_INT,
+     &                     1, nc_dstgrdsize_id, nc_dstgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdimask_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid area arrays
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_area', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdarea_id, 
+     &                          'units', 14, 'square radians')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_area', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdarea_id, 
+     &                          'units', 14, 'square radians')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid fraction arrays
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_frac', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdfrac_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_frac', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdfrac_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define mapping arrays
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_address', 
+     &                     NF_INT, 1, nc_numlinks_id, 
+     &                     nc_srcadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_address', 
+     &                     NF_INT, 1, nc_numlinks_id, 
+     &                     nc_dstadd_id)
+      call netcdf_error_handler(ncstat)
+
+      nc_dims2_id(1) = nc_numwgts_id
+      nc_dims2_id(2) = nc_numlinks_id
+
+      ncstat = nf_def_var (nc_file_id, 'remap_matrix', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_rmpmatrix_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** end definition stage
+      !***
+
+      ncstat = nf_enddef(nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     compute integer masks
+!
+!-----------------------------------------------------------------------
+
+      if (direction == 1) then
+        allocate (src_mask_int(grid1_size),
+     &            dst_mask_int(grid2_size))
+
+        where (grid2_mask)
+          dst_mask_int = 1
+        elsewhere
+          dst_mask_int = 0
+        endwhere
+
+        where (grid1_mask)
+          src_mask_int = 1
+        elsewhere
+          src_mask_int = 0
+        endwhere
+      else
+        allocate (src_mask_int(grid2_size),
+     &            dst_mask_int(grid1_size))
+
+        where (grid1_mask)
+          dst_mask_int = 1
+        elsewhere
+          dst_mask_int = 0
+        endwhere
+
+        where (grid2_mask)
+          src_mask_int = 1
+        elsewhere
+          src_mask_int = 0
+        endwhere
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     change units of lat/lon coordinates if input units different
+!     from radians
+!
+!-----------------------------------------------------------------------
+
+      if (grid1_units(1:7) == 'degrees' .and. direction == 1) then
+        grid1_center_lat = grid1_center_lat/deg2rad
+        grid1_center_lon = grid1_center_lon/deg2rad
+        grid1_corner_lat = grid1_corner_lat/deg2rad
+        grid1_corner_lon = grid1_corner_lon/deg2rad
+      endif
+
+      if (grid2_units(1:7) == 'degrees' .and. direction == 1) then
+        grid2_center_lat = grid2_center_lat/deg2rad
+        grid2_center_lon = grid2_center_lon/deg2rad
+        grid2_corner_lat = grid2_corner_lat/deg2rad
+        grid2_corner_lon = grid2_corner_lon/deg2rad
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     write mapping data
+!
+!-----------------------------------------------------------------------
+
+      if (direction == 1) then
+        itmp1 = nc_srcgrddims_id
+        itmp2 = nc_dstgrddims_id
+      else
+        itmp2 = nc_srcgrddims_id
+        itmp1 = nc_dstgrddims_id
+      endif
+
+      ncstat = nf_put_var_int(nc_file_id, itmp1, grid1_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_file_id, itmp2, grid2_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_file_id, nc_srcgrdimask_id, 
+     &                        src_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_file_id, nc_dstgrdimask_id,
+     &                        dst_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      deallocate(src_mask_int, dst_mask_int)
+
+      if (direction == 1) then
+        itmp1 = nc_srcgrdcntrlat_id
+        itmp2 = nc_srcgrdcntrlon_id
+        itmp3 = nc_srcgrdcrnrlat_id
+        itmp4 = nc_srcgrdcrnrlon_id
+      else
+        itmp1 = nc_dstgrdcntrlat_id
+        itmp2 = nc_dstgrdcntrlon_id
+        itmp3 = nc_dstgrdcrnrlat_id
+        itmp4 = nc_dstgrdcrnrlon_id
+      endif
+
+      ncstat = nf_put_var_double(nc_file_id, itmp1, grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp2, grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp3, grid1_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp4, grid1_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (direction == 1) then
+        itmp1 = nc_dstgrdcntrlat_id
+        itmp2 = nc_dstgrdcntrlon_id
+        itmp3 = nc_dstgrdcrnrlat_id
+        itmp4 = nc_dstgrdcrnrlon_id
+      else
+        itmp1 = nc_srcgrdcntrlat_id
+        itmp2 = nc_srcgrdcntrlon_id
+        itmp3 = nc_srcgrdcrnrlat_id
+        itmp4 = nc_srcgrdcrnrlon_id
+      endif
+
+      ncstat = nf_put_var_double(nc_file_id, itmp1, grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp2, grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp3, grid2_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp4, grid2_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (direction == 1) then
+        itmp1 = nc_srcgrdarea_id
+        itmp2 = nc_srcgrdfrac_id
+        itmp3 = nc_dstgrdarea_id
+        itmp4 = nc_dstgrdfrac_id
+      else
+        itmp1 = nc_dstgrdarea_id
+        itmp2 = nc_dstgrdfrac_id
+        itmp3 = nc_srcgrdarea_id
+        itmp4 = nc_srcgrdfrac_id
+      endif
+
+      if (luse_grid1_area) then
+        ncstat = nf_put_var_double(nc_file_id, itmp1, grid1_area_in)
+      else
+        ncstat = nf_put_var_double(nc_file_id, itmp1, grid1_area)
+      endif
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp2, grid1_frac)
+      call netcdf_error_handler(ncstat)
+
+      if (luse_grid2_area) then
+        ncstat = nf_put_var_double(nc_file_id, itmp3, grid2_area_in)
+      else
+        ncstat = nf_put_var_double(nc_file_id, itmp3, grid2_area)
+      endif
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp4, grid2_frac)
+      call netcdf_error_handler(ncstat)
+
+      if (direction == 1) then
+        ncstat = nf_put_var_int(nc_file_id, nc_srcadd_id, 
+     &                          grid1_add_map1)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_int(nc_file_id, nc_dstadd_id, 
+     &                          grid2_add_map1)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                             wts_map1)
+        call netcdf_error_handler(ncstat)
+      else
+        ncstat = nf_put_var_int(nc_file_id, nc_srcadd_id, 
+     &                          grid2_add_map2)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_int(nc_file_id, nc_dstadd_id, 
+     &                          grid1_add_map2)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                             wts_map2)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      ncstat = nf_close(nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+
+      end subroutine write_remap_scrip
+
+!***********************************************************************
+
+      subroutine write_remap_csm(map_name, interp_file, direction)
+
+!-----------------------------------------------------------------------
+!
+!     writes remap data to a netCDF file using NCAR-CSM conventions
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     input variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len), intent(in) ::
+     &            map_name     ! name for mapping 
+     &,           interp_file  ! filename for remap data
+
+      integer (kind=int_kind), intent(in) ::
+     &  direction              ! direction of map (1=grid1 to grid2
+                               !                   2=grid2 to grid1)
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character(char_len) ::
+     &  grid1_ctmp        ! character temp for grid1 names
+     &, grid2_ctmp        ! character temp for grid2 names
+
+      integer (kind=int_kind) ::
+     &  itmp1             ! integer temp
+     &, itmp2             ! integer temp
+     &, itmp3             ! integer temp
+     &, itmp4             ! integer temp
+     &, nc_numwgts1_id    ! extra netCDF id for additional weights
+     &, nc_src_isize_id   ! extra netCDF id for ni_a
+     &, nc_src_jsize_id   ! extra netCDF id for nj_a
+     &, nc_dst_isize_id   ! extra netCDF id for ni_b
+     &, nc_dst_jsize_id   ! extra netCDF id for nj_b
+     &, nc_rmpmatrix2_id  ! extra netCDF id for high-order remap matrix
+
+      real (kind=dbl_kind), dimension(:),allocatable ::
+     &  wts1              ! CSM wants single array for 1st-order wts
+
+      real (kind=dbl_kind), dimension(:,:),allocatable ::
+     &  wts2              ! write remaining weights in different array
+
+!-----------------------------------------------------------------------
+!
+!     create netCDF file for mapping and define some global attributes
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_create (interp_file, NF_CLOBBER, nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** map name
+      !***
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'title',
+     &                          len_trim(map_name), map_name)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** normalization option
+      !***
+      ncstat = nf_put_att_text(nc_file_id, NF_GLOBAL, 'normalization',
+     &                         len_trim(normalize_opt), normalize_opt)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** map method
+      !***
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'map_method',
+     &                          len_trim(map_method), map_method)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** history
+      !***
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'history',
+     &                          len_trim(history), history)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** file convention
+      !***
+      convention = 'NCAR-CSM'
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, 'conventions',
+     &                          len_trim(convention), convention)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** source and destination grid names
+      !***
+
+      if (direction == 1) then
+        grid1_ctmp = 'domain_a'
+        grid2_ctmp = 'domain_b'
+      else
+        grid1_ctmp = 'domain_b'
+        grid2_ctmp = 'domain_a'
+      endif
+
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, trim(grid1_ctmp),
+     &                          len_trim(grid1_name), grid1_name)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, NF_GLOBAL, trim(grid2_ctmp),
+     &                          len_trim(grid2_name), grid2_name)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     prepare netCDF dimension info
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** define grid size dimensions
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_size
+        itmp2 = grid2_size
+      else
+        itmp1 = grid2_size
+        itmp2 = grid1_size
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'n_a', itmp1, nc_srcgrdsize_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'n_b', itmp2, nc_dstgrdsize_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid corner dimension
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_corners
+        itmp2 = grid2_corners
+      else
+        itmp1 = grid2_corners
+        itmp2 = grid1_corners
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'nv_a', itmp1, nc_srcgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'nv_b', itmp2, nc_dstgrdcorn_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid rank dimension
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_rank
+        itmp2 = grid2_rank
+      else
+        itmp1 = grid2_rank
+        itmp2 = grid1_rank
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'src_grid_rank', 
+     &                     itmp1, nc_srcgrdrank_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'dst_grid_rank', 
+     &                     itmp2, nc_dstgrdrank_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define first two dims as if 2-d cartesian domain
+      !***
+
+      if (direction == 1) then
+        itmp1 = grid1_dims(1)
+        if (grid1_rank > 1) then
+          itmp2 = grid1_dims(2)
+        else
+          itmp2 = 0
+        endif
+        itmp3 = grid2_dims(1)
+        if (grid2_rank > 1) then
+          itmp4 = grid2_dims(2)
+        else
+          itmp4 = 0
+        endif
+      else
+        itmp1 = grid2_dims(1)
+        if (grid2_rank > 1) then
+          itmp2 = grid2_dims(2)
+        else
+          itmp2 = 0
+        endif
+        itmp3 = grid1_dims(1)
+        if (grid1_rank > 1) then
+          itmp4 = grid1_dims(2)
+        else
+          itmp4 = 0
+        endif
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'ni_a', itmp1, nc_src_isize_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'nj_a', itmp2, nc_src_jsize_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'ni_b', itmp3, nc_dst_isize_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'nj_b', itmp4, nc_dst_jsize_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define map size dimensions
+      !***
+
+      if (direction == 1) then
+        itmp1 = num_links_map1
+      else
+        itmp1 = num_links_map2
+      endif
+
+      ncstat = nf_def_dim (nc_file_id, 'n_s', itmp1, nc_numlinks_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_file_id, 'num_wgts', 
+     &                     num_wts, nc_numwgts_id)
+      call netcdf_error_handler(ncstat)
+
+      if (num_wts > 1) then
+        ncstat = nf_def_dim (nc_file_id, 'num_wgts1', 
+     &                       num_wts-1, nc_numwgts1_id)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      !***
+      !*** define grid dimensions
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'src_grid_dims', NF_INT,
+     &                     1, nc_srcgrdrank_id, nc_srcgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'dst_grid_dims', NF_INT,
+     &                     1, nc_dstgrdrank_id, nc_dstgrddims_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     define all arrays for netCDF descriptors
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** define grid center latitude array
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'yc_a',
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'yc_b', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid center longitude array
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'xc_a', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'xc_b', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid corner lat/lon arrays
+      !***
+
+      nc_dims2_id(1) = nc_srcgrdcorn_id
+      nc_dims2_id(2) = nc_srcgrdsize_id
+
+      ncstat = nf_def_var (nc_file_id, 'yv_a', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_srcgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'xv_a', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_srcgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      nc_dims2_id(1) = nc_dstgrdcorn_id
+      nc_dims2_id(2) = nc_dstgrdsize_id
+
+      ncstat = nf_def_var (nc_file_id, 'yv_b', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_dstgrdcrnrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'xv_b', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_dstgrdcrnrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** CSM wants all in degrees
+      !***
+
+      grid1_units = 'degrees'
+      grid2_units = 'degrees'
+
+      if (direction == 1) then
+        grid1_ctmp = grid1_units
+        grid2_ctmp = grid2_units
+      else
+        grid1_ctmp = grid2_units
+        grid2_ctmp = grid1_units
+      endif
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcntrlat_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcntrlat_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcntrlon_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcntrlon_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcrnrlat_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdcrnrlon_id, 
+     &                          'units', 7, grid1_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcrnrlat_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdcrnrlon_id, 
+     &                          'units', 7, grid2_ctmp)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid mask
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'mask_a', NF_INT,
+     &                     1, nc_srcgrdsize_id, nc_srcgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdimask_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'mask_b', NF_INT,
+     &                     1, nc_dstgrdsize_id, nc_dstgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdimask_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid area arrays
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'area_a', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdarea_id, 
+     &                          'units', 14, 'square radians')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'area_b', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdarea_id, 
+     &                          'units', 14, 'square radians')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid fraction arrays
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'frac_a', 
+     &                     NF_DOUBLE, 1, nc_srcgrdsize_id, 
+     &                     nc_srcgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_srcgrdfrac_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'frac_b', 
+     &                     NF_DOUBLE, 1, nc_dstgrdsize_id, 
+     &                     nc_dstgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_file_id, nc_dstgrdfrac_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define mapping arrays
+      !***
+
+      ncstat = nf_def_var (nc_file_id, 'col', 
+     &                     NF_INT, 1, nc_numlinks_id, 
+     &                     nc_srcadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'row', 
+     &                     NF_INT, 1, nc_numlinks_id, 
+     &                     nc_dstadd_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_file_id, 'S', 
+     &                     NF_DOUBLE, 1, nc_numlinks_id, 
+     &                     nc_rmpmatrix_id)
+      call netcdf_error_handler(ncstat)
+
+      if (num_wts > 1) then
+        nc_dims2_id(1) = nc_numwgts1_id
+        nc_dims2_id(2) = nc_numlinks_id
+
+        ncstat = nf_def_var (nc_file_id, 'S2', 
+     &                     NF_DOUBLE, 2, nc_dims2_id, 
+     &                     nc_rmpmatrix2_id)
+        call netcdf_error_handler(ncstat)
+      endif
+
+      !***
+      !*** end definition stage
+      !***
+
+      ncstat = nf_enddef(nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     compute integer masks
+!
+!-----------------------------------------------------------------------
+
+      if (direction == 1) then
+        allocate (src_mask_int(grid1_size),
+     &            dst_mask_int(grid2_size))
+
+        where (grid2_mask)
+          dst_mask_int = 1
+        elsewhere
+          dst_mask_int = 0
+        endwhere
+
+        where (grid1_mask)
+          src_mask_int = 1
+        elsewhere
+          src_mask_int = 0
+        endwhere
+      else
+        allocate (src_mask_int(grid2_size),
+     &            dst_mask_int(grid1_size))
+
+        where (grid1_mask)
+          dst_mask_int = 1
+        elsewhere
+          dst_mask_int = 0
+        endwhere
+
+        where (grid2_mask)
+          src_mask_int = 1
+        elsewhere
+          src_mask_int = 0
+        endwhere
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     change units of lat/lon coordinates if input units different
+!     from radians. if this is the second mapping, the conversion has
+!     alread been done.
+!
+!-----------------------------------------------------------------------
+
+      if (grid1_units(1:7) == 'degrees' .and. direction == 1) then
+        grid1_center_lat = grid1_center_lat/deg2rad
+        grid1_center_lon = grid1_center_lon/deg2rad
+        grid1_corner_lat = grid1_corner_lat/deg2rad
+        grid1_corner_lon = grid1_corner_lon/deg2rad
+      endif
+
+      if (grid2_units(1:7) == 'degrees' .and. direction == 1) then
+        grid2_center_lat = grid2_center_lat/deg2rad
+        grid2_center_lon = grid2_center_lon/deg2rad
+        grid2_corner_lat = grid2_corner_lat/deg2rad
+        grid2_corner_lon = grid2_corner_lon/deg2rad
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     write mapping data
+!
+!-----------------------------------------------------------------------
+
+      if (direction == 1) then
+        itmp1 = nc_srcgrddims_id
+        itmp2 = nc_dstgrddims_id
+      else
+        itmp2 = nc_srcgrddims_id
+        itmp1 = nc_dstgrddims_id
+      endif
+
+      ncstat = nf_put_var_int(nc_file_id, itmp1, grid1_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_file_id, itmp2, grid2_dims)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_file_id, nc_srcgrdimask_id, 
+     &                        src_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_file_id, nc_dstgrdimask_id,
+     &                        dst_mask_int)
+      call netcdf_error_handler(ncstat)
+
+      deallocate(src_mask_int, dst_mask_int)
+
+      if (direction == 1) then
+        itmp1 = nc_srcgrdcntrlat_id
+        itmp2 = nc_srcgrdcntrlon_id
+        itmp3 = nc_srcgrdcrnrlat_id
+        itmp4 = nc_srcgrdcrnrlon_id
+      else
+        itmp1 = nc_dstgrdcntrlat_id
+        itmp2 = nc_dstgrdcntrlon_id
+        itmp3 = nc_dstgrdcrnrlat_id
+        itmp4 = nc_dstgrdcrnrlon_id
+      endif
+
+      ncstat = nf_put_var_double(nc_file_id, itmp1, grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp2, grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp3, grid1_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp4, grid1_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (direction == 1) then
+        itmp1 = nc_dstgrdcntrlat_id
+        itmp2 = nc_dstgrdcntrlon_id
+        itmp3 = nc_dstgrdcrnrlat_id
+        itmp4 = nc_dstgrdcrnrlon_id
+      else
+        itmp1 = nc_srcgrdcntrlat_id
+        itmp2 = nc_srcgrdcntrlon_id
+        itmp3 = nc_srcgrdcrnrlat_id
+        itmp4 = nc_srcgrdcrnrlon_id
+      endif
+
+      ncstat = nf_put_var_double(nc_file_id, itmp1, grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp2, grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp3, grid2_corner_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp4, grid2_corner_lon)
+      call netcdf_error_handler(ncstat)
+
+      if (direction == 1) then
+        itmp1 = nc_srcgrdarea_id
+        itmp2 = nc_srcgrdfrac_id
+        itmp3 = nc_dstgrdarea_id
+        itmp4 = nc_dstgrdfrac_id
+      else
+        itmp1 = nc_dstgrdarea_id
+        itmp2 = nc_dstgrdfrac_id
+        itmp3 = nc_srcgrdarea_id
+        itmp4 = nc_srcgrdfrac_id
+      endif
+
+      if (luse_grid1_area) then
+        ncstat = nf_put_var_double(nc_file_id, itmp1, grid1_area_in)
+      else
+        ncstat = nf_put_var_double(nc_file_id, itmp1, grid1_area)
+      endif
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp2, grid1_frac)
+      call netcdf_error_handler(ncstat)
+
+      if (luse_grid2_area) then
+        ncstat = nf_put_var_double(nc_file_id, itmp3, grid2_area)
+      else
+        ncstat = nf_put_var_double(nc_file_id, itmp3, grid2_area)
+      endif
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_file_id, itmp4, grid2_frac)
+      call netcdf_error_handler(ncstat)
+
+      if (direction == 1) then
+        ncstat = nf_put_var_int(nc_file_id, nc_srcadd_id, 
+     &                          grid1_add_map1)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_int(nc_file_id, nc_dstadd_id, 
+     &                          grid2_add_map1)
+        call netcdf_error_handler(ncstat)
+
+        if (num_wts == 1) then
+          ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                               wts_map1)
+          call netcdf_error_handler(ncstat)
+        else
+          allocate(wts1(num_links_map1),wts2(num_wts-1,num_links_map1))
+
+          wts1 = wts_map1(1,:)
+          wts2 = wts_map1(2:,:)
+
+          ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                               wts1)
+          call netcdf_error_handler(ncstat)
+          ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix2_id, 
+     &                               wts2)
+          call netcdf_error_handler(ncstat)
+          deallocate(wts1,wts2)
+        endif
+      else
+        ncstat = nf_put_var_int(nc_file_id, nc_srcadd_id, 
+     &                          grid2_add_map2)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_int(nc_file_id, nc_dstadd_id, 
+     &                          grid1_add_map2)
+        call netcdf_error_handler(ncstat)
+
+        if (num_wts == 1) then
+          ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                               wts_map2)
+          call netcdf_error_handler(ncstat)
+        else
+          allocate(wts1(num_links_map2),wts2(num_wts-1,num_links_map2))
+
+          wts1 = wts_map2(1,:)
+          wts2 = wts_map2(2:,:)
+
+          ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix_id, 
+     &                               wts1)
+          call netcdf_error_handler(ncstat)
+          ncstat = nf_put_var_double(nc_file_id, nc_rmpmatrix2_id, 
+     &                               wts2)
+          call netcdf_error_handler(ncstat)
+          deallocate(wts1,wts2)
+        endif
+      endif
+
+      ncstat = nf_close(nc_file_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+
+      end subroutine write_remap_csm
+
+!***********************************************************************
+
+      subroutine sort_add(add1, add2, weights)
+
+!-----------------------------------------------------------------------
+!
+!     this routine sorts address and weight arrays based on the
+!     destination address with the source address as a secondary
+!     sorting criterion.  the method is a standard heap sort.
+!
+!-----------------------------------------------------------------------
+
+      use kinds_mod     ! defines common data types
+      use constants     ! defines common scalar constants
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     Input and Output arrays
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(inout), dimension(:) ::
+     &        add1,       ! destination address array (num_links)
+     &        add2        ! source      address array
+
+      real (kind=dbl_kind), intent(inout), dimension(:,:) ::
+     &        weights     ! remapping weights (num_wts, num_links)
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) ::
+     &          num_links,          ! num of links for this mapping
+     &          num_wts,            ! num of weights for this mapping
+     &          add1_tmp, add2_tmp, ! temp for addresses during swap
+     &          nwgt,
+     &          lvl, final_lvl,     ! level indexes for heap sort levels
+     &          chk_lvl1, chk_lvl2, max_lvl
+
+      real (kind=dbl_kind), dimension(SIZE(weights,DIM=1)) ::
+     &          wgttmp              ! temp for holding wts during swap
+
+!-----------------------------------------------------------------------
+!
+!     determine total number of links to sort and number of weights
+!
+!-----------------------------------------------------------------------
+
+      num_links = SIZE(add1)
+      num_wts   = SIZE(weights, DIM=1)
+
+!-----------------------------------------------------------------------
+!
+!     start at the lowest level (N/2) of the tree and sift lower 
+!     values to the bottom of the tree, promoting the larger numbers
+!
+!-----------------------------------------------------------------------
+
+      do lvl=num_links/2,1,-1
+
+        final_lvl = lvl
+        add1_tmp = add1(lvl)
+        add2_tmp = add2(lvl)
+        wgttmp(:) = weights(:,lvl)
+
+        !***
+        !*** loop until proper level is found for this link, or reach
+        !*** bottom
+        !***
+
+        sift_loop1: do
+
+          !***
+          !*** find the largest of the two daughters
+          !***
+
+          chk_lvl1 = 2*final_lvl
+          chk_lvl2 = 2*final_lvl+1
+          if (chk_lvl1 .EQ. num_links) chk_lvl2 = chk_lvl1
+
+          if ((add1(chk_lvl1) >  add1(chk_lvl2)) .OR.
+     &       ((add1(chk_lvl1) == add1(chk_lvl2)) .AND.
+     &        (add2(chk_lvl1) >  add2(chk_lvl2)))) then
+            max_lvl = chk_lvl1
+          else 
+            max_lvl = chk_lvl2
+          endif
+
+          !***
+          !*** if the parent is greater than both daughters,
+          !*** the correct level has been found
+          !***
+
+          if ((add1_tmp .GT. add1(max_lvl)) .OR.
+     &       ((add1_tmp .EQ. add1(max_lvl)) .AND.
+     &        (add2_tmp .GT. add2(max_lvl)))) then
+            add1(final_lvl) = add1_tmp
+            add2(final_lvl) = add2_tmp
+            weights(:,final_lvl) = wgttmp(:)
+            exit sift_loop1
+
+          !***
+          !*** otherwise, promote the largest daughter and push
+          !*** down one level in the tree.  if haven't reached
+          !*** the end of the tree, repeat the process.  otherwise
+          !*** store last values and exit the loop
+          !***
+
+          else 
+            add1(final_lvl) = add1(max_lvl)
+            add2(final_lvl) = add2(max_lvl)
+            weights(:,final_lvl) = weights(:,max_lvl)
+
+            final_lvl = max_lvl
+            if (2*final_lvl > num_links) then
+              add1(final_lvl) = add1_tmp
+              add2(final_lvl) = add2_tmp
+              weights(:,final_lvl) = wgttmp(:)
+              exit sift_loop1
+            endif
+          endif
+        end do sift_loop1
+      end do
+
+!-----------------------------------------------------------------------
+!
+!     now that the heap has been sorted, strip off the top (largest)
+!     value and promote the values below
+!
+!-----------------------------------------------------------------------
+
+      do lvl=num_links,3,-1
+
+        !***
+        !*** move the top value and insert it into the correct place
+        !***
+
+        add1_tmp = add1(lvl)
+        add1(lvl) = add1(1)
+
+        add2_tmp = add2(lvl)
+        add2(lvl) = add2(1)
+
+        wgttmp(:) = weights(:,lvl)
+        weights(:,lvl) = weights(:,1)
+
+        !***
+        !*** as above this loop sifts the tmp values down until proper 
+        !*** level is reached
+        !***
+
+        final_lvl = 1
+
+        sift_loop2: do
+
+          !***
+          !*** find the largest of the two daughters
+          !***
+
+          chk_lvl1 = 2*final_lvl
+          chk_lvl2 = 2*final_lvl+1
+          if (chk_lvl2 >= lvl) chk_lvl2 = chk_lvl1
+
+          if ((add1(chk_lvl1) >  add1(chk_lvl2)) .OR.
+     &       ((add1(chk_lvl1) == add1(chk_lvl2)) .AND.
+     &        (add2(chk_lvl1) >  add2(chk_lvl2)))) then
+            max_lvl = chk_lvl1
+          else 
+            max_lvl = chk_lvl2
+          endif
+
+          !***
+          !*** if the parent is greater than both daughters,
+          !*** the correct level has been found
+          !***
+
+          if ((add1_tmp >  add1(max_lvl)) .OR.
+     &       ((add1_tmp == add1(max_lvl)) .AND.
+     &        (add2_tmp >  add2(max_lvl)))) then
+            add1(final_lvl) = add1_tmp
+            add2(final_lvl) = add2_tmp
+            weights(:,final_lvl) = wgttmp(:)
+            exit sift_loop2
+
+          !***
+          !*** otherwise, promote the largest daughter and push
+          !*** down one level in the tree.  if haven't reached
+          !*** the end of the tree, repeat the process.  otherwise
+          !*** store last values and exit the loop
+          !***
+
+          else 
+            add1(final_lvl) = add1(max_lvl)
+            add2(final_lvl) = add2(max_lvl)
+            weights(:,final_lvl) = weights(:,max_lvl)
+
+            final_lvl = max_lvl
+            if (2*final_lvl >= lvl) then
+              add1(final_lvl) = add1_tmp
+              add2(final_lvl) = add2_tmp
+              weights(:,final_lvl) = wgttmp(:)
+              exit sift_loop2
+            endif
+          endif
+        end do sift_loop2
+      end do
+
+      !***
+      !*** swap the last two entries
+      !***
+
+
+      add1_tmp = add1(2)
+      add1(2)  = add1(1)
+      add1(1)  = add1_tmp
+
+      add2_tmp = add2(2)
+      add2(2)  = add2(1)
+      add2(1)  = add2_tmp
+
+      wgttmp (:)   = weights(:,2)
+      weights(:,2) = weights(:,1)
+      weights(:,1) = wgttmp (:)
+
+!-----------------------------------------------------------------------
+
+      end subroutine sort_add
+
+!***********************************************************************
+
+      end module remap_write
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 214 - 0
interpolation/scrip_sources/scrip.f

@@ -0,0 +1,214 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This routine is the driver for computing the addresses and weights 
+!     for interpolating between two grids on a sphere.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: scrip.f,v 1.6 2001/08/21 21:06:44 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      program scrip
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod                  ! module defining data types
+      use constants                  ! module for common constants
+      use iounits                    ! I/O unit manager
+      use timers                     ! CPU timers
+      use grids                      ! module with grid information
+      use remap_vars                 ! common remapping variables
+      use remap_conservative         ! routines for conservative remap
+      use remap_distance_weight      ! routines for dist-weight remap
+      use remap_bilinear             ! routines for bilinear interp
+      use remap_bicubic              ! routines for bicubic  interp
+      use remap_write                ! routines for remap output
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     input namelist variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &           grid1_file,   ! filename of grid file containing grid1
+     &           grid2_file,   ! filename of grid file containing grid2
+     &           interp_file1, ! filename for output remap data (map1)
+     &           interp_file2, ! filename for output remap data (map2)
+     &           map1_name,    ! name for mapping from grid1 to grid2
+     &           map2_name,    ! name for mapping from grid2 to grid1
+     &           map_method,   ! choice for mapping method
+     &           normalize_opt,! option for normalizing weights
+     &           output_opt    ! option for output conventions
+
+      integer (kind=int_kind) ::
+     &           nmap          ! number of mappings to compute (1 or 2)
+
+      namelist /remap_inputs/ grid1_file, grid2_file, 
+     &                        interp_file1, interp_file2,
+     &                        map1_name, map2_name, num_maps,
+     &                        luse_grid1_area, luse_grid2_area,
+     &                        map_method, normalize_opt, output_opt,
+     &                        restrict_type, num_srch_bins
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: n,     ! dummy counter
+     &                           iunit  ! unit number for namelist file
+
+!-----------------------------------------------------------------------
+!
+!     initialize timers
+!
+!-----------------------------------------------------------------------
+
+      call timers_init
+      do n=1,max_timers
+        call timer_clear(n)
+      end do
+
+!-----------------------------------------------------------------------
+!
+!     read input namelist
+!
+!-----------------------------------------------------------------------
+
+      grid1_file    = 'unknown'
+      grid2_file    = 'unknown'
+      interp_file1  = 'unknown'
+      interp_file2  = 'unknown'
+      map1_name     = 'unknown'
+      map2_name     = 'unknown'
+      luse_grid1_area = .false.
+      luse_grid2_area = .false.
+      num_maps      = 2
+      map_type      = 1
+      normalize_opt = 'fracarea'
+      output_opt    = 'scrip'
+      restrict_type = 'latitude'
+      num_srch_bins = 900
+
+      call get_unit(iunit)
+      open(iunit, file='scrip_in', status='old', form='formatted')
+      read(iunit, nml=remap_inputs)
+      call release_unit(iunit)
+
+      select case(map_method)
+      case ('conservative')
+        map_type = map_type_conserv
+        luse_grid_centers = .false.
+      case ('bilinear')
+        map_type = map_type_bilinear
+        luse_grid_centers = .true.
+      case ('bicubic')
+        map_type = map_type_bicubic
+        luse_grid_centers = .true.
+      case ('distwgt')
+        map_type = map_type_distwgt
+        luse_grid_centers = .true.
+      case default
+        stop 'unknown mapping method'
+      end select
+
+      select case(normalize_opt(1:4))
+      case ('none')
+        norm_opt = norm_opt_none
+      case ('frac')
+        norm_opt = norm_opt_frcarea
+      case ('dest')
+        norm_opt = norm_opt_dstarea
+      case default
+        stop 'unknown normalization option'
+      end select
+
+!-----------------------------------------------------------------------
+!
+!     initialize grid information for both grids
+!
+!-----------------------------------------------------------------------
+
+      call grid_init(grid1_file, grid2_file)
+
+      write(stdout, *) ' Computing remappings between: ',grid1_name
+      write(stdout, *) '                          and  ',grid2_name
+
+!-----------------------------------------------------------------------
+!
+!     initialize some remapping variables.
+!
+!-----------------------------------------------------------------------
+
+      call init_remap_vars
+
+!-----------------------------------------------------------------------
+!
+!     call appropriate interpolation setup routine based on type of
+!     remapping requested.
+!
+!-----------------------------------------------------------------------
+
+      select case(map_type)
+      case(map_type_conserv)
+        call remap_conserv
+      case(map_type_bilinear)
+        call remap_bilin
+      case(map_type_distwgt)
+        call remap_distwgt
+      case(map_type_bicubic)
+        call remap_bicub
+      case default
+        stop 'Invalid Map Type'
+      end select
+
+!-----------------------------------------------------------------------
+!
+!     reduce size of remapping arrays and then write remapping info
+!     to a file.
+!
+!-----------------------------------------------------------------------
+
+      if (num_links_map1 /= max_links_map1) then
+        call resize_remap_vars(1, num_links_map1-max_links_map1)
+      endif
+      if ((num_maps > 1) .and. (num_links_map2 /= max_links_map2)) then
+        call resize_remap_vars(2, num_links_map2-max_links_map2)
+      endif
+
+      call write_remap(map1_name, map2_name, 
+     &                 interp_file1, interp_file2, output_opt)
+
+!-----------------------------------------------------------------------
+
+      end program scrip
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 981 - 0
interpolation/scrip_sources/scrip_test.f

@@ -0,0 +1,981 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     this program is a short driver that tests the remappings using
+!     a simple analytic field.  the results are written in netCDF
+!     format.
+!
+!     CVS: $Id: scrip_test.f,v 1.6 2000/04/19 21:45:09 pwjones Exp $
+!
+!-----------------------------------------------------------------------
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      program remap_test
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines common data types
+      use constants    ! defines common constants
+      use iounits      ! I/O unit manager
+      use netcdf_mod   ! netcdf I/O stuff
+      use grids        ! module containing grid info
+      use remap_vars   ! module containing remapping info
+      use remap_mod    ! module containing remapping routines
+      use remap_read   ! routines for reading remap files
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     input namelist variables
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) ::
+     &  field_choice   ! choice of field to be interpolated
+
+      character (char_len) :: 
+     &  interp_file,   ! filename containing remap data (map1)
+     &  output_file    ! filename for test results
+
+      namelist /remap_inputs/ field_choice, interp_file, output_file
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &        map_name      ! name for mapping from grid1 to grid2
+
+      integer (kind=int_kind) ::    ! netCDF ids for files and arrays
+     &        ncstat, nc_outfile_id, 
+     &        nc_srcgrdcntrlat_id, nc_srcgrdcntrlon_id,
+     &        nc_dstgrdcntrlat_id, nc_dstgrdcntrlon_id,
+     &        nc_srcgrdrank_id, nc_dstgrdrank_id,
+     &        nc_srcgrdimask_id, nc_dstgrdimask_id,
+     &        nc_srcgrdarea_id, nc_dstgrdarea_id,
+     &        nc_srcgrdfrac_id, nc_dstgrdfrac_id,
+     &        nc_srcarray_id, nc_srcgradlat_id, nc_srcgradlon_id,
+     &        nc_dstarray1_id, nc_dstarray1a_id, nc_dstarray2_id,
+     &        nc_dsterror1_id, nc_dsterror1a_id, nc_dsterror2_id
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &        nc_grid1size_id, nc_grid2size_id
+
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &          dim_name    ! netCDF dimension name
+
+      integer (kind=int_kind) :: i,j,n,imin,imax,idiff,
+     &    ip1,im1,jp1,jm1,nx,ny, ! for computing bicub gradients
+     &    in,is,ie,iw,ine,inw,ise,isw,
+     &    iunit                  ! unit number for namelist file
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &    grid1_imask, grid2_imask, grid2_count
+
+      real (kind=dbl_kind) ::
+     &    delew, delns,     ! variables for computing bicub gradients
+     &    length            ! length scale for cosine hill test field
+
+      real (kind=dbl_kind), dimension(:), allocatable ::
+     &    grid1_array, 
+     &    grid1_tmp, 
+     &    grad1_lat, 
+     &    grad1_lon, 
+     &    grad1_latlon, 
+     &    grad1_lat_zero, 
+     &    grad1_lon_zero, 
+     &    grid2_array, 
+     &    grid2_err,
+     &    grid2_tmp
+
+!-----------------------------------------------------------------------
+!
+!     read namelist for file and mapping info
+!
+!-----------------------------------------------------------------------
+
+      call get_unit(iunit)
+      open(iunit, file='scrip_test_in', status='old', form='formatted')
+      read(iunit, nml=remap_inputs)
+      call release_unit(iunit)
+      write(*,nml=remap_inputs)
+
+!-----------------------------------------------------------------------
+!
+!     read remapping data
+!
+!-----------------------------------------------------------------------
+
+      call read_remap(map_name, interp_file)
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate (grid1_array    (grid1_size), 
+     &          grid1_tmp      (grid1_size),
+     &          grad1_lat      (grid1_size), 
+     &          grad1_lon      (grid1_size), 
+     &          grad1_lat_zero (grid1_size), 
+     &          grad1_lon_zero (grid1_size), 
+     &          grid1_imask    (grid1_size),
+     &          grid2_array    (grid2_size), 
+     &          grid2_err      (grid2_size),
+     &          grid2_tmp      (grid2_size),
+     &          grid2_imask    (grid2_size),
+     &          grid2_count    (grid2_size))
+
+      where (grid1_mask)
+        grid1_imask = 1
+      elsewhere
+        grid1_imask = 0
+      endwhere
+      where (grid2_mask)
+        grid2_imask = 1
+      elsewhere
+        grid2_imask = 0
+      endwhere
+
+!-----------------------------------------------------------------------
+!
+!     setup a NetCDF file for output
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** create netCDF dataset 
+      !***
+
+      ncstat = nf_create (output_file, NF_CLOBBER, nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, NF_GLOBAL, 'title',
+     &                          len_trim(map_name), map_name)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid size dimensions
+      !***
+
+      allocate( nc_grid1size_id(grid1_rank),
+     &          nc_grid2size_id(grid2_rank))
+
+      do n=1,grid1_rank
+        write(dim_name,1000) 'grid1_dim',n
+        ncstat = nf_def_dim (nc_outfile_id, dim_name, 
+     &                       grid1_dims(n), nc_grid1size_id(n))
+        call netcdf_error_handler(ncstat)
+      end do
+
+      do n=1,grid2_rank
+        write(dim_name,1000) 'grid2_dim',n
+        ncstat = nf_def_dim (nc_outfile_id, dim_name, 
+     &                       grid2_dims(n), nc_grid2size_id(n))
+        call netcdf_error_handler(ncstat)
+      end do
+ 1000 format(a9,i1)
+
+      !***
+      !*** define grid center latitude array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grid_center_lat', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_srcgrdcntrlat_id, 
+     &                          'units', 7, 'radians')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_grid_center_lat', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdcntrlat_id, 
+     &                          'units', 7, 'radians')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid center longitude array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grid_center_lon', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_srcgrdcntrlon_id, 
+     &                          'units', 7, 'radians')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_grid_center_lon', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdcntrlon_id, 
+     &                          'units', 7, 'radians')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid mask
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grid_imask', NF_INT,
+     &               grid1_rank, nc_grid1size_id, nc_srcgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_srcgrdimask_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_grid_imask', NF_INT,
+     &               grid2_rank, nc_grid2size_id, nc_dstgrdimask_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdimask_id, 
+     &                          'units', 8, 'unitless')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid area arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grid_area', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_grid_area', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstgrdarea_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid fraction arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grid_frac', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_grid_frac', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstgrdfrac_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define source array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_array', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcarray_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define gradient arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grad_lat', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcgradlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'src_grad_lon', 
+     &                     NF_DOUBLE, grid1_rank, nc_grid1size_id, 
+     &                     nc_srcgradlon_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define destination arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_array1', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstarray1_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_array1a', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstarray1a_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_array2', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dstarray2_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define error arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_error1', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dsterror1_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_error1a', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dsterror1a_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_var (nc_outfile_id, 'dst_error2', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id, 
+     &                     nc_dsterror2_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** end definition stage
+      !***
+
+      ncstat = nf_enddef(nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     write some grid info
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** write grid center latitude array
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_srcgrdcntrlat_id,
+     &                           grid1_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdcntrlat_id,
+     &                           grid2_center_lat)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** write grid center longitude array
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_srcgrdcntrlon_id,
+     &                           grid1_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdcntrlon_id,
+     &                           grid2_center_lon)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** write grid mask
+      !***
+
+      ncstat = nf_put_var_int(nc_outfile_id, nc_srcgrdimask_id,
+     &                        grid1_imask)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_int(nc_outfile_id, nc_dstgrdimask_id,
+     &                        grid2_imask)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid area arrays
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_srcgrdarea_id,
+     &                           grid1_area)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdarea_id,
+     &                           grid2_area)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid fraction arrays
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_srcgrdfrac_id,
+     &                           grid1_frac)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdfrac_id,
+     &                           grid2_frac)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     set up fields for test cases based on user choice
+!
+!-----------------------------------------------------------------------
+
+      select case (field_choice)
+      case(1)  !*** cosine hill at lon=pi and lat=0
+
+        length = 0.1*pi2
+
+        grid1_array = cos(grid1_center_lat)*cos(grid1_center_lon)
+        grid2_array = cos(grid2_center_lat)*cos(grid2_center_lon)
+
+        grid1_tmp = acos(-grid1_array)/length
+        grid2_tmp = acos(-grid2_array)/length
+
+        where (grid1_tmp <= one)
+          grad1_lat   = (pi/length)*sin(pi*grid1_tmp)*
+     &                  sin(grid1_center_lat)*cos(grid1_center_lon)/
+     &                  sqrt(one-grid1_array**2)
+          grad1_lon   = (pi/length)*sin(pi*grid1_tmp)*
+     &                  sin(grid1_center_lon)/
+     &                  sqrt(one-grid1_array**2)
+          grid1_array = two + cos(pi*grid1_tmp)
+        elsewhere
+          grid1_array = one
+          grad1_lat   = zero
+          grad1_lon   = zero
+        endwhere
+        
+        where (grid2_tmp <= one)
+          grid2_array = two + cos(pi*grid2_tmp)
+        elsewhere
+          grid2_array = one
+        endwhere
+        
+        where (.not. grid1_mask)
+          grid1_array = zero
+          grad1_lat   = zero
+          grad1_lon   = zero
+        end where
+
+        where (grid2_frac < .001) grid2_array = zero
+
+      case(2)  !*** pseudo-spherical harmonic l=2,m=2
+
+        where (grid1_mask)
+          grid1_array = two + cos(grid1_center_lat)**2*
+     &                    cos(two*grid1_center_lon)
+          grad1_lat   = -sin(two*grid1_center_lat)*
+     &                   cos(two*grid1_center_lon)
+          grad1_lon   = -two*cos(grid1_center_lat)*
+     &                   sin(two*grid1_center_lon)
+        elsewhere
+          grid1_array = zero
+          grad1_lat   = zero
+          grad1_lon   = zero
+        end where
+
+        where (grid2_frac > .001) 
+          grid2_array = two + cos(grid2_center_lat)**2*
+     &                        cos(two*grid2_center_lon)
+        elsewhere
+          grid2_array = zero
+        end where
+
+      case(3)  !*** pseudo-spherical harmonic l=32, m=16
+
+        where (grid1_mask)
+          grid1_array = two + sin(two*grid1_center_lat)**16*
+     &                        cos(16.*grid1_center_lon)
+          grad1_lat   = 32.*sin(two*grid1_center_lat)**15*
+     &                      cos(two*grid1_center_lat)*
+     &                      cos(16.*grid1_center_lon)
+          grad1_lon   = -32.*sin(two*grid1_center_lat)**15*
+     &                       sin(grid1_center_lat)*
+     &                   sin(16.*grid1_center_lon)
+        elsewhere
+          grid1_array = zero
+          grad1_lat   = zero
+          grad1_lon   = zero
+        end where
+
+        where (grid2_frac > .001) 
+          grid2_array = two + sin(two*grid2_center_lat)**16*
+     &                        cos(16.*grid2_center_lon)
+        elsewhere
+          grid2_array = zero
+        end where
+
+      case default
+
+        stop 'Bad choice for field to interpolate'
+
+      end select
+
+!-----------------------------------------------------------------------
+!
+!     if bicubic, we need 3 gradients in logical space
+!
+!-----------------------------------------------------------------------
+
+      if (map_type == map_type_bicubic) then
+        allocate (grad1_latlon (grid1_size)) 
+
+        nx = grid1_dims(1)
+        ny = grid1_dims(2)
+
+        do n=1,grid1_size
+
+          grad1_lat(n) = zero
+          grad1_lon(n) = zero
+          grad1_latlon(n) = zero
+
+          if (grid1_mask(n)) then
+
+            delew = half
+            delns = half
+
+            j = (n-1)/nx + 1
+            i = n - (j-1)*nx
+
+            ip1 = i+1
+            im1 = i-1
+            jp1 = j+1
+            jm1 = j-1
+
+            if (ip1 > nx) ip1 = ip1 - nx
+            if (im1 < 1 ) im1 = nx
+            if (jp1 > ny) then
+              jp1 = j
+              delns = one
+            endif
+            if (jm1 < 1 ) then
+              jm1 = j
+              delns = one
+            endif
+
+            in  = (jp1-1)*nx + i
+            is  = (jm1-1)*nx + i
+            ie  = (j  -1)*nx + ip1
+            iw  = (j  -1)*nx + im1
+
+            ine = (jp1-1)*nx + ip1
+            inw = (jp1-1)*nx + im1
+            ise = (jm1-1)*nx + ip1
+            isw = (jm1-1)*nx + im1
+
+            !*** compute i-gradient
+
+            if (.not. grid1_mask(ie)) then
+              ie = n
+              delew = one
+            endif
+            if (.not. grid1_mask(iw)) then
+              iw = n
+              delew = one
+            endif
+ 
+            grad1_lat(n) = delew*(grid1_array(ie) - grid1_array(iw))
+
+            !*** compute j-gradient
+
+            if (.not. grid1_mask(in)) then
+              in = n
+              delns = one
+            endif
+            if (.not. grid1_mask(is)) then
+              is = n
+              delns = one
+            endif
+ 
+            grad1_lon(n) = delns*(grid1_array(in) - grid1_array(is))
+
+            !*** compute ij-gradient
+
+            delew = half
+            if (jp1 == j .or. jm1 == j) then
+              delns = one
+            else 
+              delns = half
+            endif
+
+            if (.not. grid1_mask(ine)) then
+              if (in /= n) then
+                ine = in
+                delew = one
+              else if (ie /= n) then
+                ine = ie
+                inw = iw
+                if (inw == n) delew = one
+                delns = one
+              else
+                ine = n
+                inw = iw
+                delew = one
+                delns = one
+              endif
+            endif
+
+            if (.not. grid1_mask(inw)) then
+              if (in /= n) then
+                inw = in
+                delew = one
+              else if (iw /= n) then
+                inw = iw
+                ine = ie
+                if (ie == n) delew = one
+                delns = one
+              else
+                inw = n
+                ine = ie
+                delew = one
+                delns = one
+              endif
+            endif
+
+            grad1_lat_zero(n) = delew*(grid1_array(ine) -
+     &                                 grid1_array(inw))
+
+            if (.not. grid1_mask(ise)) then
+              if (is /= n) then
+                ise = is
+                delew = one
+              else if (ie /= n) then
+                ise = ie
+                isw = iw
+                if (isw == n) delew = one
+                delns = one
+              else
+                ise = n
+                isw = iw
+                delew = one
+                delns = one
+              endif
+            endif
+
+            if (.not. grid1_mask(isw)) then
+              if (is /= n) then
+                isw = is
+                delew = one
+              else if (iw /= n) then
+                isw = iw
+                ise = ie
+                if (ie == n) delew = one
+                delns = one
+              else
+                isw = n
+                ise = ie
+                delew = one
+                delns = one
+              endif
+            endif
+
+            grad1_lon_zero(n) = delew*(grid1_array(ise) -
+     &                                 grid1_array(isw))
+
+            grad1_latlon(n) = delns*(grad1_lat_zero(n) -
+     &                               grad1_lon_zero(n))
+
+          endif
+        enddo
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     test a first-order map from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+      grad1_lat_zero = zero
+      grad1_lon_zero = zero
+
+      if (map_type /= map_type_bicubic) then
+        call remap(grid2_tmp, wts_map1, grid2_add_map1, grid1_add_map1,
+     &             grid1_array)
+      else
+        call remap(grid2_tmp, wts_map1, grid2_add_map1, grid1_add_map1,
+     &             grid1_array, src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon,
+     &                          src_grad3=grad1_latlon)
+      endif
+
+      if (map_type == map_type_conserv) then
+        select case (norm_opt)
+        case (norm_opt_none)
+          grid2_err = grid2_frac*grid2_area
+          where (grid2_err /= zero)
+            grid2_tmp = grid2_tmp/grid2_err
+          else where
+            grid2_tmp = zero
+          end where
+        case (norm_opt_frcarea)
+        case (norm_opt_dstarea)
+          where (grid2_frac /= zero)
+            grid2_tmp = grid2_tmp/grid2_frac
+          else where
+            grid2_tmp = zero
+          end where
+        end select
+      end if
+
+      where (grid2_frac > .999)
+        grid2_err = (grid2_tmp - grid2_array)/grid2_array
+      elsewhere 
+        grid2_err = zero
+      end where
+
+      print *,'First order mapping from grid1 to grid2:'
+      print *,'----------------------------------------'
+      print *,'Grid1 min,max: ',minval(grid1_array),maxval(grid1_array)
+      print *,'Grid2 min,max: ',minval(grid2_tmp  ),maxval(grid2_tmp  )
+      print *,' Err2 min,max: ',minval(grid2_err),maxval(grid2_err)
+      print *,' Err2    mean: ',sum(abs(grid2_err))/
+     &                          count(grid2_frac > .999)
+
+      !***
+      !*** Conservation Test
+      !***
+
+      print *,'Conservation:'
+      print *,'Grid1 Integral = ',sum(grid1_array*grid1_area*grid1_frac)
+      print *,'Grid2 Integral = ',sum(grid2_tmp  *grid2_area*grid2_frac)
+
+!-----------------------------------------------------------------------
+!
+!     write results to NetCDF file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_srcarray_id,
+     &                           grid1_array)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstarray1_id,
+     &                           grid2_tmp  )
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dsterror1_id,
+     &                           grid2_err)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     for conservative mappings:
+!     test a second-order map from grid1 to grid2 with only lat grads
+!
+!-----------------------------------------------------------------------
+
+      if (map_type == map_type_conserv) then
+
+        call remap(grid2_tmp, wts_map1, grid2_add_map1, grid1_add_map1,
+     &             grid1_array, src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon_zero) 
+
+        select case (norm_opt)
+        case (norm_opt_none)
+          grid2_err = grid2_frac*grid2_area
+          where (grid2_err /= zero)
+            grid2_tmp = grid2_tmp/grid2_err
+          else where
+            grid2_tmp = zero
+          end where
+        case (norm_opt_frcarea)
+        case (norm_opt_dstarea)
+          where (grid2_frac /= zero)
+            grid2_tmp = grid2_tmp/grid2_frac
+          else where
+            grid2_tmp = zero
+          end where
+        end select
+
+        where (grid2_frac > .999)
+          grid2_err = (grid2_tmp - grid2_array)/grid2_array
+        elsewhere 
+          grid2_err = zero
+        end where
+
+        print *,'Second order mapping from grid1 to grid2 (lat only):'
+        print *,'----------------------------------------'
+        print *,'Grid1 min,max: ',minval(grid1_array),
+     &                            maxval(grid1_array)
+        print *,'Grid2 min,max: ',minval(grid2_tmp  ),
+     &                            maxval(grid2_tmp  )
+        print *,' Err2 min,max: ',minval(grid2_err),maxval(grid2_err)
+        print *,' Err2    mean: ',sum(abs(grid2_err))/
+     &                            count(grid2_frac > .999)
+
+        !***
+        !*** Conservation Test
+        !***
+
+        print *,'Conservation:'
+        print *,'Grid1 Integral = ',
+     &          sum(grid1_array*grid1_area*grid1_frac)
+        print *,'Grid2 Integral = ',
+     &          sum(grid2_tmp  *grid2_area*grid2_frac)
+
+!-----------------------------------------------------------------------
+!
+!       write results to NetCDF file
+!
+!-----------------------------------------------------------------------
+
+        ncstat = nf_put_var_double(nc_outfile_id, nc_srcgradlat_id,
+     &                             grad1_lat)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_double(nc_outfile_id, nc_dstarray1a_id,
+     &                             grid2_tmp  )
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_double(nc_outfile_id, nc_dsterror1a_id,
+     &                             grid2_err)
+        call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     for conservative mappings:
+!     test a second-order map from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+        call remap(grid2_tmp,wts_map1,grid2_add_map1,grid1_add_map1,
+     &             grid1_array, src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon) 
+
+        select case (norm_opt)
+        case (norm_opt_none)
+          grid2_err = grid2_frac*grid2_area
+          where (grid2_err /= zero)
+            grid2_tmp = grid2_tmp/grid2_err
+          else where
+            grid2_tmp = zero
+          end where
+        case (norm_opt_frcarea)
+        case (norm_opt_dstarea)
+          where (grid2_frac /= zero)
+            grid2_tmp = grid2_tmp/grid2_frac
+          else where
+            grid2_tmp = zero
+          end where
+        end select
+
+        where (grid2_frac > .999)
+          grid2_err = (grid2_tmp - grid2_array)/grid2_array
+        elsewhere 
+          grid2_err = zero
+        end where
+
+        print *,'Second order mapping from grid1 to grid2:'
+        print *,'-----------------------------------------'
+        print *,'Grid1 min,max: ',minval(grid1_array),
+     &                            maxval(grid1_array)
+        print *,'Grid2 min,max: ',minval(grid2_tmp  ),
+     &                            maxval(grid2_tmp  )
+        print *,' Err2 min,max: ',minval(grid2_err),maxval(grid2_err)
+        print *,' Err2    mean: ',sum(abs(grid2_err))/
+     &                            count(grid2_frac > .999)
+
+        !***
+        !*** Conservation Test
+        !***
+
+        print *,'Conservation:'
+        print *,'Grid1 Integral = ',
+     &           sum(grid1_array*grid1_area*grid1_frac)
+        print *,'Grid2 Integral = ',
+     &           sum(grid2_tmp  *grid2_area*grid2_frac)
+
+!-----------------------------------------------------------------------
+!
+!       write results to NetCDF file
+!
+!-----------------------------------------------------------------------
+
+        ncstat = nf_put_var_double(nc_outfile_id, nc_srcgradlon_id,
+     &                             grad1_lon)
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_double(nc_outfile_id, nc_dstarray2_id,
+     &                             grid2_tmp  )
+        call netcdf_error_handler(ncstat)
+
+        ncstat = nf_put_var_double(nc_outfile_id, nc_dsterror2_id,
+     &                             grid2_err)
+        call netcdf_error_handler(ncstat)
+
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     close netCDF file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_close(nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     calculate some statistics
+!
+!-----------------------------------------------------------------------
+
+      grid2_count = zero
+      grid2_tmp   = zero
+      grid2_err   = zero
+
+      print *,'number of sparse matrix entries ',num_links_map1
+      do n=1,num_links_map1
+        grid2_count(grid2_add_map1(n)) = 
+     &  grid2_count(grid2_add_map1(n)) + 1
+        if (wts_map1(1,n) > one .or. wts_map1(1,n) < zero) then
+          grid2_tmp(grid2_add_map1(n)) = 
+     &    grid2_tmp(grid2_add_map1(n)) + 1
+          grid2_err(grid2_add_map1(n)) = max(abs(wts_map1(1,n)),
+     &    grid2_err(grid2_add_map1(n)) )
+        endif
+      end do
+
+      do n=1,grid2_size
+        if (grid2_tmp(n) > zero) print *,n,grid2_err(n)
+      end do
+
+      imin = minval(grid2_count, mask=(grid2_count > 0))
+      imax = maxval(grid2_count)
+      idiff =  (imax - imin)/10 + 1
+      print *,'total number of dest cells ',grid2_size
+      print *,'number of cells participating in remap ',
+     &   count(grid2_count > zero)
+      print *,'min no of entries/row = ',imin
+      print *,'max no of entries/row = ',imax
+
+      imax = imin + idiff
+      do n=1,10
+        print *,'num of rows with entries between ',imin,' - ',imax-1,
+     &     count(grid2_count >= imin .and. grid2_count < imax)
+        imin = imin + idiff
+        imax = imax + idiff
+      end do
+
+!-----------------------------------------------------------------------
+
+      end program remap_test
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 612 - 0
interpolation/scrip_sources/scrip_use.f

@@ -0,0 +1,612 @@
+!-----------------------------------------------------------------------
+!
+! This script interpolates a (time, latitude, longitude) netcdf file 
+! using the interpolation weights computed by SCRIP :
+! Spherical Coordinate Remapping and Interpolation Package
+!
+! The arguments are passed through a namelist named scrip_use_in:
+!&remap_inputs
+!    remap_wgt   = 'Weights from SCRIP'
+!    infile      = 'input netcdf file'
+!    invertlat   = TRUE/FALSE : should the latitudes be reverted ?
+!    var         = 'netcdf variable name'
+!    fromregular = TRUE/FALSE : is the input grid regular ?
+!    outfile     = 'output netcdf file'
+!/
+!
+! History : Virginie Guemas - Initial version                   - 2011 
+!-----------------------------------------------------------------------
+
+      program scrip_use
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines common data types
+      use constants    ! defines common constants
+      use iounits      ! I/O unit manager
+      use netcdf_mod   ! netcdf I/O stuff
+      use grids        ! module containing grid info
+      use remap_vars   ! module containing remapping info
+      use remap_mod    ! module containing remapping routines
+      use remap_read   ! routines for reading remap files
+      use read_input_file ! routines to read file to interpolate
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     input namelist variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &  remap_wgt,     ! filename containing remap data (map1)
+     &  infile,        ! filename containing input field
+     &  var,           ! var name in infile
+     &  outfile        ! filename to output interpolated field
+    
+      logical :: fromregular
+
+      namelist /remap_inputs/ remap_wgt, infile, invertlat, var,
+     &                        fromregular, outfile
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &        map_name      ! name for mapping from grid1 to grid2
+
+      integer (kind=int_kind) ::    ! netCDF ids for files and arrays
+     &        ncstat, nc_outfile_id, 
+     &        nc_dstgrdcntrlat_id, nc_dstgrdcntrlon_id,
+     &        nc_dstarray1_id, nc_dstarray1a_id, nc_dstarray2_id,
+     &        nc_vartime_id, nc_srcarray_id
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &        nc_grid2size_id, nc_grid1size_id
+
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &          dim_name, attname    ! netCDF dimension name
+
+      integer (kind=int_kind) :: i,j,n,imin,imax,idiff,
+     &    ip1,im1,jp1,jm1,nx,ny, ! for computing bicub gradients
+     &    in,is,ie,iw,ine,inw,ise,isw,jatt,
+     &    iunit,jtime            ! unit number for namelist file
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &    grid1_imask, grid2_imask, grid2_count
+
+      real (kind=dbl_kind) ::
+     &    delew, delns,     ! variables for computing bicub gradients
+     &    length            ! length scale for cosine hill test field
+
+      real (kind=dbl_kind), dimension(:), allocatable ::
+     &    grid1_tmp, 
+     &    grad1_lat, 
+     &    grad1_lon, 
+     &    grad1_latlon, 
+     &    grad1_lat_zero, 
+     &    grad1_lon_zero, 
+     &    grid2_array, 
+     &    grid2_err,
+     &    grid2_tmp,
+     &    grid1_mask_grid2
+
+!-----------------------------------------------------------------------
+!
+!     read namelist for file and mapping info
+!
+!-----------------------------------------------------------------------
+
+      call get_unit(iunit)
+      open(iunit, file='scrip_use_in', status='old', form='formatted')
+      read(iunit, nml=remap_inputs)
+      call release_unit(iunit)
+      write(*,nml=remap_inputs)
+
+!-----------------------------------------------------------------------
+!
+!     read remapping data
+!
+!-----------------------------------------------------------------------
+    
+      call read_remap(map_name, remap_wgt)
+
+!-----------------------------------------------------------------------
+!
+!     read input file
+!
+!-----------------------------------------------------------------------
+      
+      call read_input(infile, var)
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate (grid1_tmp       (grid1_size),
+     &          grad1_lat       (grid1_size), 
+     &          grad1_lon       (grid1_size), 
+     &          grad1_latlon    (grid1_size),
+     &          grad1_lat_zero  (grid1_size), 
+     &          grad1_lon_zero  (grid1_size), 
+     &          grid1_imask     (grid1_size),
+     &          grid2_array     (grid2_size), 
+     &          grid2_err       (grid2_size),
+     &          grid2_tmp       (grid2_size),
+     &          grid2_imask     (grid2_size),
+     &          grid2_count     (grid2_size),
+     &          grid1_mask_grid2(grid2_size))
+
+      where (grid1_mask)
+        grid1_imask = 1
+      elsewhere
+        grid1_imask = 0
+      endwhere
+      where (grid2_mask)
+        grid2_imask = 1
+      elsewhere
+        grid2_imask = 0
+      endwhere
+
+!-----------------------------------------------------------------------
+!
+!     setup a NetCDF file for output
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** create netCDF dataset 
+      !***
+
+      ncstat = nf_create (outfile, NF_CLOBBER, nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, NF_GLOBAL, 'title',
+     &                          len_trim(map_name), map_name)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid size dimensions
+      !***
+
+      allocate( nc_grid2size_id(grid2_rank+1))
+      allocate( nc_grid1size_id(grid1_rank+1))
+    
+      ncstat = nf_def_dim (nc_outfile_id, 'x',
+     &                       grid2_dims(1), nc_grid2size_id(1))
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_outfile_id, 'y',
+     &                       grid2_dims(2), nc_grid2size_id(2))
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_outfile_id, 'inx',
+     &                       grid1_dims(1), nc_grid1size_id(1))
+      call netcdf_error_handler(ncstat)
+
+      if ( grid1_dims(2) > 0 ) then
+        ncstat = nf_def_dim (nc_outfile_id, 'iny',
+     &                       grid1_dims(2), nc_grid1size_id(2))
+        call netcdf_error_handler(ncstat)
+      endif
+
+      !***
+      !*** Create time dimension
+      !***
+
+      ncstat = nf_def_dim (nc_outfile_id, 'time',
+     &                       NF_UNLIMITED,  nc_grid2size_id(3))
+      call netcdf_error_handler(ncstat)
+      nc_grid1size_id(grid1_rank+1)=nc_grid2size_id(3)
+      
+      ncstat = nf_def_var (nc_outfile_id, 'time',
+     &           NF_DOUBLE, 1, nc_grid2size_id(3), nc_vartime_id)
+      call netcdf_error_handler(ncstat)
+
+      if ( nc_time_id > -1 ) then
+        if ( natts >= 1 ) then 
+          do jatt = 1,natts
+            ncstat = nf_inq_attname(nc_infile_id, nc_time_id, jatt,
+     &                              attname)
+            call netcdf_error_handler(ncstat)
+
+            ncstat = nf_copy_att(nc_infile_id, nc_time_id, attname,
+     &                       nc_outfile_id, nc_vartime_id)
+          enddo
+       endif
+      endif
+  
+
+      !***
+      !*** define grid center latitude array
+      !***
+  
+      ncstat = nf_def_var (nc_outfile_id, 'latitude', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id 
+     &                     (1:grid2_rank), nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdcntrlat_id, 
+     &                          'units', 7, 'degrees')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid center longitude array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'longitude', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id 
+     &                     (1:grid2_rank), nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdcntrlon_id, 
+     &                          'units', 7, 'degrees')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define source array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'input',
+     &                     NF_DOUBLE, (grid1_rank+1), nc_grid1size_id,
+     &                     nc_srcarray_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_put_att_double (nc_outfile_id, nc_srcarray_id,
+     &                     'missing_value', NF_DOUBLE,1,dble(1e20))
+      call netcdf_error_handler(ncstat)
+      !***
+      !*** define destination arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, var, 
+     &                     NF_DOUBLE, ( grid2_rank + 1 ), 
+     &                     nc_grid2size_id, nc_dstarray1_id )
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_put_att_double (nc_outfile_id, nc_dstarray1_id,
+     &                     'missing_value', NF_DOUBLE,1,dble(1e20))
+      call netcdf_error_handler(ncstat)
+
+      if ( nvaratts >= 1 ) then
+        do jatt = 1,nvaratts
+          ncstat = nf_inq_attname(nc_infile_id, nc_invar_id, jatt,
+     &                            attname)
+          call netcdf_error_handler(ncstat)
+          
+          if ((attname .ne. '_FillValue') .and. (attname .ne.
+     &        'missing_value') ) then 
+            ncstat = nf_copy_att(nc_infile_id, nc_invar_id, attname,
+     &                         nc_outfile_id, nc_dstarray1_id)
+            call netcdf_error_handler(ncstat)
+          endif
+        enddo
+      endif
+
+      if ( nglobatts >= 1 ) then
+        do jatt = 1,nglobatts
+          ncstat = nf_inq_attname(nc_infile_id, NF_GLOBAL, jatt,
+     &                            attname)
+          call netcdf_error_handler(ncstat)
+
+          ncstat = nf_copy_att(nc_infile_id, NF_GLOBAL, attname,
+     &                       nc_outfile_id, NF_GLOBAL)
+          call netcdf_error_handler(ncstat)
+        enddo
+      endif
+
+
+      ncstat = nf_close(nc_infile_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** end definition stage
+      !***
+
+      ncstat = nf_enddef(nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     write some grid info
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** write grid center latitude array
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdcntrlat_id,
+     &                           grid2_center_lat*180./pi)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** write grid center longitude array
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdcntrlon_id,
+     &                           grid2_center_lon*180./pi)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     Interpolate the input mask
+!
+!-----------------------------------------------------------------------
+      call remap(grid1_mask_grid2, wts_map1, grid2_add_map1,
+     &           grid1_add_map1, real(grid1_imask,kind=dbl_kind))
+
+!-----------------------------------------------------------------------
+!
+!     Write time dimension
+!
+!-----------------------------------------------------------------------
+
+      do jtime = 1,ntime
+
+        ncstat = nf_put_vara_double(nc_outfile_id, nc_vartime_id,
+     &                              jtime, 1, time(jtime))
+        call netcdf_error_handler(ncstat)
+        
+!-----------------------------------------------------------------------
+!
+!     if bicubic or 2nd-order conservative, 3 gradients needed in space
+!
+!-----------------------------------------------------------------------
+
+      if ( fromregular .and. (map_type == map_type_bicubic .or.  
+     &    map_type == map_type_conserv) ) then
+
+        nx = grid1_dims(1)
+        ny = grid1_dims(2)
+
+        do n=1,grid1_size
+
+          grad1_lat(n) = zero
+          grad1_lon(n) = zero
+          grad1_latlon(n) = zero
+
+          if (grid1_mask(n)) then
+
+            delew = half
+            delns = half
+
+            j = (n-1)/nx + 1
+            i = n - (j-1)*nx
+
+            ip1 = i+1
+            im1 = i-1
+            jp1 = j+1
+            jm1 = j-1
+
+            if (ip1 > nx) ip1 = ip1 - nx
+            if (im1 < 1 ) im1 = nx
+            if (jp1 > ny) then
+              jp1 = j
+              delns = one
+            endif
+            if (jm1 < 1 ) then
+              jm1 = j
+              delns = one
+            endif
+
+            in  = (jp1-1)*nx + i
+            is  = (jm1-1)*nx + i
+            ie  = (j  -1)*nx + ip1
+            iw  = (j  -1)*nx + im1
+
+            ine = (jp1-1)*nx + ip1
+            inw = (jp1-1)*nx + im1
+            ise = (jm1-1)*nx + ip1
+            isw = (jm1-1)*nx + im1
+
+            !*** compute i-gradient
+
+            if (.not. grid1_mask(ie)) then
+              ie = n
+              delew = one
+            endif
+            if (.not. grid1_mask(iw)) then
+              iw = n
+              delew = one
+            endif
+ 
+            grad1_lat(n) = delew*(grid1_array(ie,jtime) - 
+     &                            grid1_array(iw,jtime))
+
+            !*** compute j-gradient
+
+            if (.not. grid1_mask(in)) then
+              in = n
+              delns = one
+            endif
+            if (.not. grid1_mask(is)) then
+              is = n
+              delns = one
+            endif
+ 
+            grad1_lon(n) = delns*(grid1_array(in,jtime) -
+     &                            grid1_array(is,jtime))
+
+            !*** compute ij-gradient
+
+            delew = half
+            if (jp1 == j .or. jm1 == j) then
+              delns = one
+            else 
+              delns = half
+            endif
+
+            if (.not. grid1_mask(ine)) then
+              if (in /= n) then
+                ine = in
+                delew = one
+              else if (ie /= n) then
+                ine = ie
+                inw = iw
+                if (inw == n) delew = one
+                delns = one
+              else
+                ine = n
+                inw = iw
+                delew = one
+                delns = one
+              endif
+            endif
+
+            if (.not. grid1_mask(inw)) then
+              if (in /= n) then
+                inw = in
+                delew = one
+              else if (iw /= n) then
+                inw = iw
+                ine = ie
+                if (ie == n) delew = one
+                delns = one
+              else
+                inw = n
+                ine = ie
+                delew = one
+                delns = one
+              endif
+            endif
+
+            grad1_lat_zero(n) = delew*(grid1_array(ine,jtime) -
+     &                                 grid1_array(inw,jtime))
+
+            if (.not. grid1_mask(ise)) then
+              if (is /= n) then
+                ise = is
+                delew = one
+              else if (ie /= n) then
+                ise = ie
+                isw = iw
+                if (isw == n) delew = one
+                delns = one
+              else
+                ise = n
+                isw = iw
+                delew = one
+                delns = one
+              endif
+            endif
+
+            if (.not. grid1_mask(isw)) then
+              if (is /= n) then
+                isw = is
+                delew = one
+              else if (iw /= n) then
+                isw = iw
+                ise = ie
+                if (ie == n) delew = one
+                delns = one
+              else
+                isw = n
+                ise = ie
+                delew = one
+                delns = one
+              endif
+            endif
+
+            grad1_lon_zero(n) = delew*(grid1_array(ise,jtime) -
+     &                                 grid1_array(isw,jtime))
+
+            grad1_latlon(n) = delns*(grad1_lat_zero(n) -
+     &                               grad1_lon_zero(n))
+
+          endif
+        enddo
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     remapping from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+      grad1_lat_zero = zero
+      grad1_lon_zero = zero
+
+      if (map_type == map_type_bicubic) then   
+        if (fromregular) then 
+          call remap(grid2_tmp, wts_map1, grid2_add_map1, 
+     &               grid1_add_map1, grid1_array(:,jtime), 
+     &                          src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon,
+     &                          src_grad3=grad1_latlon)
+        else 
+          print*,"Input grid is not regular, bicubic interpolation "
+          stop"              is not possible : We stop "
+        endif 
+      elseif (map_type == map_type_conserv .and. fromregular ) then
+        call remap(grid2_tmp,wts_map1,grid2_add_map1,grid1_add_map1,
+     &             grid1_array(:,jtime), src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon)
+      else    
+        call remap(grid2_tmp, wts_map1, grid2_add_map1, grid1_add_map1,
+     &             grid1_array(:,jtime))
+      endif
+
+      if (map_type == map_type_conserv) then
+        select case (norm_opt)
+        case (norm_opt_none)
+          grid2_err = grid2_frac*grid2_area
+          where (grid2_err /= zero)
+            grid2_tmp = grid2_tmp/grid2_err
+          else where
+            grid2_tmp = zero
+          end where
+        case (norm_opt_frcarea)
+        case (norm_opt_dstarea)
+          where (grid2_frac /= zero)
+            grid2_tmp = grid2_tmp/grid2_frac
+          else where
+            grid2_tmp = zero
+          end where
+        end select
+      end if
+
+!-----------------------------------------------------------------------
+!
+!     write results to NetCDF file
+!
+!-----------------------------------------------------------------------
+
+      where (grid2_imask<0.5 .or. grid1_mask_grid2 == 0. )
+        grid2_tmp=1e20
+      end where
+
+      ncstat = nf_put_vara_double(nc_outfile_id, nc_dstarray1_id,
+     &         (/1, 1, jtime/), (/grid2_dims, 1/), grid2_tmp  )
+      call netcdf_error_handler(ncstat)
+
+      where (grid1_imask<0.5)
+        grid1_array(:,jtime)=1e20
+      end where
+
+      ncstat = nf_put_vara_double(nc_outfile_id, nc_srcarray_id,
+     &         (/1, 1, jtime/), (/grid1_dims, 1/), grid1_array(:,jtime))
+      call netcdf_error_handler(ncstat)
+
+      enddo
+
+!-----------------------------------------------------------------------
+!
+!     close netCDF file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_close(nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+
+      end program scrip_use
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 682 - 0
interpolation/scrip_sources/scrip_use_extrap.f

@@ -0,0 +1,682 @@
+!-----------------------------------------------------------------------
+!
+! This script interpolates a (time, latitude, longitude) netcdf file 
+! using the interpolation weights computed by SCRIP :
+! Spherical Coordinate Remapping and Interpolation Package
+! and then extrapolates using the nearest neighbour method
+!
+! The arguments are passed through a namelist named scrip_use_in:
+!&remap_inputs
+!    remap_wgt   = 'Weights from SCRIP'
+!    infile      = 'input netcdf file'
+!    invertlat   = TRUE/FALSE : should the latitudes be reverted ?
+!    var         = 'netcdf variable name'
+!    fromregular = TRUE/FALSE : is the input grid regular ?
+!    outfile     = 'output netcdf file'
+!/
+!
+! History : Virginie Guemas - Initial version                   - 2011 
+!-----------------------------------------------------------------------
+      program scrip_use_extrap
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod    ! defines common data types
+      use constants    ! defines common constants
+      use iounits      ! I/O unit manager
+      use netcdf_mod   ! netcdf I/O stuff
+      use grids        ! module containing grid info
+      use remap_vars   ! module containing remapping info
+      use remap_mod    ! module containing remapping routines
+      use remap_read   ! routines for reading remap files
+      use read_input_file ! routines to read file to interpolate
+
+      implicit none
+
+!-----------------------------------------------------------------------
+!
+!     input namelist variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &  remap_wgt,     ! filename containing remap data (map1)
+     &  infile,        ! filename containing input field
+     &  var,           ! var name in infile
+     &  outfile        ! filename to output interpolated field
+    
+      logical :: fromregular
+
+      namelist /remap_inputs/ remap_wgt, infile, invertlat, var,
+     &                        fromregular, outfile
+
+!-----------------------------------------------------------------------
+!
+!     local variables
+!
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &        map_name      ! name for mapping from grid1 to grid2
+
+      integer (kind=int_kind) ::    ! netCDF ids for files and arrays
+     &        ncstat, nc_outfile_id, 
+     &        nc_dstgrdcntrlat_id, nc_dstgrdcntrlon_id,
+     &        nc_dstarray1_id, nc_dstarray1a_id, nc_dstarray2_id,
+     &        nc_vartime_id, nc_srcarray_id
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &        nc_grid2size_id, nc_grid1size_id
+
+!-----------------------------------------------------------------------
+
+      character (char_len) :: 
+     &          dim_name, attname    ! netCDF dimension name
+
+      integer (kind=int_kind) :: i,j,n,imin,imax,idiff,
+     &    ip1,im1,jp1,jm1,nx,ny, ! for computing bicub gradients
+     &    in,is,ie,iw,ine,inw,ise,isw,jatt,
+     &    iunit,jtime            ! unit number for namelist file
+
+      integer (kind=int_kind), dimension(:), allocatable ::
+     &    grid1_imask, grid2_imask, grid2_count,neighbour
+
+      real (kind=dbl_kind) ::
+     &    delew, delns,     ! variables for computing bicub gradients
+     &    length,           ! length scale for cosine hill test field
+     &    dist_min,distance ! Extrapolation
+
+      real (kind=dbl_kind), dimension(:), allocatable ::
+     &    grid1_tmp, 
+     &    grad1_lat, 
+     &    grad1_lon, 
+     &    grad1_latlon, 
+     &    grad1_lat_zero, 
+     &    grad1_lon_zero, 
+     &    grid2_array, 
+     &    grid2_err,
+     &    grid2_tmp,
+     &    grid1_mask_grid2,
+     &    dstcoslon,
+     &    dstcoslat,
+     &    srccoslon,
+     &    srccoslat,
+     &    dstsinlon,
+     &    dstsinlat,
+     &    srcsinlon,
+     &    srcsinlat
+
+!-----------------------------------------------------------------------
+!
+!     read namelist for file and mapping info
+!
+!-----------------------------------------------------------------------
+
+      call get_unit(iunit)
+      open(iunit, file='scrip_use_in', status='old', form='formatted')
+      read(iunit, nml=remap_inputs)
+      call release_unit(iunit)
+      write(*,nml=remap_inputs)
+
+!-----------------------------------------------------------------------
+!
+!     read remapping data
+!
+!-----------------------------------------------------------------------
+
+      call read_remap(map_name, remap_wgt)
+
+!-----------------------------------------------------------------------
+!
+!     read input file
+!
+!-----------------------------------------------------------------------
+
+      call read_input(infile, var)
+
+!-----------------------------------------------------------------------
+!
+!     allocate arrays
+!
+!-----------------------------------------------------------------------
+
+      allocate (grid1_tmp       (grid1_size),
+     &          grad1_lat       (grid1_size), 
+     &          grad1_lon       (grid1_size), 
+     &          grad1_latlon    (grid1_size),
+     &          grad1_lat_zero  (grid1_size), 
+     &          grad1_lon_zero  (grid1_size), 
+     &          grid1_imask     (grid1_size),
+     &          grid2_array     (grid2_size), 
+     &          grid2_err       (grid2_size),
+     &          grid2_tmp       (grid2_size),
+     &          grid2_imask     (grid2_size),
+     &          grid2_count     (grid2_size),
+     &          grid1_mask_grid2(grid2_size))
+
+      where (grid1_mask)
+        grid1_imask = 1
+      elsewhere
+        grid1_imask = 0
+      endwhere
+      where (grid2_mask)
+        grid2_imask = 1
+      elsewhere
+        grid2_imask = 0
+      endwhere
+
+      allocate (srccoslon       (grid1_size),
+     &          srcsinlon       (grid1_size),
+     &          srccoslat       (grid1_size),
+     &          srcsinlat       (grid1_size),
+     &          dstcoslon       (grid2_size),
+     &          dstsinlon       (grid2_size),
+     &          dstcoslat       (grid2_size),
+     &          dstsinlat       (grid2_size),
+     &          neighbour       (grid2_size)) 
+ 
+      dstcoslon = cos(grid2_center_lon*pi/180)
+      dstsinlon = sin(grid2_center_lon*pi/180)
+      srccoslon = cos(grid1_center_lon*pi/180)
+      srcsinlon = sin(grid1_center_lon*pi/180)
+
+      dstcoslat = cos(grid2_center_lat*pi/180)
+      dstsinlat = sin(grid2_center_lat*pi/180)
+      srccoslat = cos(grid1_center_lat*pi/180)
+      srcsinlat = sin(grid1_center_lat*pi/180)
+
+!-----------------------------------------------------------------------
+!
+!     setup a NetCDF file for output
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** create netCDF dataset 
+      !***
+
+      ncstat = nf_create (outfile, NF_CLOBBER, nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, NF_GLOBAL, 'title',
+     &                          len_trim(map_name), map_name)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid size dimensions
+      !***
+
+      allocate( nc_grid2size_id(grid2_rank+1))
+      allocate( nc_grid1size_id(grid1_rank+1))
+    
+      ncstat = nf_def_dim (nc_outfile_id, 'x',
+     &                       grid2_dims(1), nc_grid2size_id(1))
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_outfile_id, 'y',
+     &                       grid2_dims(2), nc_grid2size_id(2))
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_outfile_id, 'inx',
+     &                       grid1_dims(1), nc_grid1size_id(1))
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_def_dim (nc_outfile_id, 'iny',
+     &                       grid1_dims(2), nc_grid1size_id(2))
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** Create time dimension
+      !***
+
+      ncstat = nf_def_dim (nc_outfile_id, 'time',
+     &                       NF_UNLIMITED,  nc_grid2size_id(3))
+      call netcdf_error_handler(ncstat)
+      nc_grid1size_id(3)=nc_grid2size_id(3)
+      
+      ncstat = nf_def_var (nc_outfile_id, 'time',
+     &           NF_DOUBLE, 1, nc_grid2size_id(3), nc_vartime_id)
+      call netcdf_error_handler(ncstat)
+
+      if ( nc_time_id > -1 ) then
+        if ( natts >= 1 ) then 
+          do jatt = 1,natts
+            ncstat = nf_inq_attname(nc_infile_id, nc_time_id, jatt,
+     &                              attname)
+            call netcdf_error_handler(ncstat)
+
+            ncstat = nf_copy_att(nc_infile_id, nc_time_id, attname,
+     &                       nc_outfile_id, nc_vartime_id)
+          enddo
+       endif
+      endif
+
+      !***
+      !*** define grid center latitude array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'latitude', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id 
+     &                     (1:grid2_rank), nc_dstgrdcntrlat_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdcntrlat_id, 
+     &                          'units', 7, 'degrees')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define grid center longitude array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'longitude', 
+     &                     NF_DOUBLE, grid2_rank, nc_grid2size_id 
+     &                     (1:grid2_rank), nc_dstgrdcntrlon_id)
+      call netcdf_error_handler(ncstat)
+
+      ncstat = nf_put_att_text (nc_outfile_id, nc_dstgrdcntrlon_id, 
+     &                          'units', 7, 'degrees')
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define source array
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, 'input',
+     &                     NF_DOUBLE, (grid1_rank+1), nc_grid1size_id,
+     &                     nc_srcarray_id)
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_put_att_double (nc_outfile_id, nc_srcarray_id,
+     &                     'missing_value', NF_DOUBLE,1,dble(1e20))
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** define destination arrays
+      !***
+
+      ncstat = nf_def_var (nc_outfile_id, var, 
+     &                     NF_DOUBLE, ( grid2_rank + 1 ), 
+     &                     nc_grid2size_id, nc_dstarray1_id )
+      call netcdf_error_handler(ncstat)
+      ncstat = nf_put_att_double (nc_outfile_id, nc_dstarray1_id,
+     &                     'missing_value', NF_DOUBLE,1,dble(1e20))
+      call netcdf_error_handler(ncstat)
+
+      if ( nvaratts >= 1 ) then
+        do jatt = 1,nvaratts
+          ncstat = nf_inq_attname(nc_infile_id, nc_invar_id, jatt,
+     &                            attname)
+          call netcdf_error_handler(ncstat)
+          
+          if ((attname .ne. '_FillValue') .and. (attname .ne.
+     &        'missing_value') ) then 
+            ncstat = nf_copy_att(nc_infile_id, nc_invar_id, attname,
+     &                         nc_outfile_id, nc_dstarray1_id)
+            call netcdf_error_handler(ncstat)
+          endif
+        enddo
+      endif
+
+      if ( nglobatts >= 1 ) then
+        do jatt = 1,nglobatts
+          ncstat = nf_inq_attname(nc_infile_id, NF_GLOBAL, jatt,
+     &                            attname)
+          call netcdf_error_handler(ncstat)
+
+          ncstat = nf_copy_att(nc_infile_id, NF_GLOBAL, attname,
+     &                       nc_outfile_id, NF_GLOBAL)
+          call netcdf_error_handler(ncstat)
+        enddo
+      endif
+
+
+      ncstat = nf_close(nc_infile_id)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** end definition stage
+      !***
+
+      ncstat = nf_enddef(nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     write some grid info
+!
+!-----------------------------------------------------------------------
+
+      !***
+      !*** write grid center latitude array
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdcntrlat_id,
+     &                           grid2_center_lat*180./pi)
+      call netcdf_error_handler(ncstat)
+
+      !***
+      !*** write grid center longitude array
+      !***
+
+      ncstat = nf_put_var_double(nc_outfile_id, nc_dstgrdcntrlon_id,
+     &                           grid2_center_lon*180./pi)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+!
+!     Interpolate the input mask
+!
+!-----------------------------------------------------------------------
+
+      call remap(grid1_mask_grid2, wts_map1, grid2_add_map1,
+     &           grid1_add_map1, real(grid1_imask,kind=dbl_kind))
+
+!-----------------------------------------------------------------------
+!
+!     Prepare extrapolation 
+!
+!-----------------------------------------------------------------------
+
+      neighbour = 0
+      do n = 1,grid2_size
+        if ( grid2_imask(n)<0.5 .or. grid1_mask_grid2(n) == 0. ) then
+          dist_min     = 1e20
+
+          do j = 1,grid1_size
+
+            if ( grid1_imask(j) == 1 ) then
+              distance  = acos(dstcoslat(n)*srccoslat(j)*
+     &          (dstcoslon(n)*srccoslon(j)+dstsinlon(n)*srcsinlon(j))
+     &          +dstsinlat(n)*srcsinlat(j))
+              if ( distance < dist_min ) then
+                dist_min     = distance
+                neighbour(n) = j
+              end if
+            end if
+
+          end do
+ 
+        end if
+      end do
+
+!-----------------------------------------------------------------------
+!
+!     Write time dimension
+!
+!-----------------------------------------------------------------------
+
+      do jtime = 1,ntime
+
+        ncstat = nf_put_vara_double(nc_outfile_id, nc_vartime_id,
+     &                              jtime, 1, time(jtime))
+        call netcdf_error_handler(ncstat)
+        
+!-----------------------------------------------------------------------
+!
+!     if bicubic or 2nd-order conservative, 3 gradients needed in space
+!
+!-----------------------------------------------------------------------
+
+      if ( fromregular .and. (map_type == map_type_bicubic .or.  
+     &    map_type == map_type_conserv) ) then
+
+        nx = grid1_dims(1)
+        ny = grid1_dims(2)
+
+        do n=1,grid1_size
+
+          grad1_lat(n) = zero
+          grad1_lon(n) = zero
+          grad1_latlon(n) = zero
+
+          if (grid1_mask(n)) then
+
+            delew = half
+            delns = half
+
+            j = (n-1)/nx + 1
+            i = n - (j-1)*nx
+
+            ip1 = i+1
+            im1 = i-1
+            jp1 = j+1
+            jm1 = j-1
+
+            if (ip1 > nx) ip1 = ip1 - nx
+            if (im1 < 1 ) im1 = nx
+            if (jp1 > ny) then
+              jp1 = j
+              delns = one
+            endif
+            if (jm1 < 1 ) then
+              jm1 = j
+              delns = one
+            endif
+
+            in  = (jp1-1)*nx + i
+            is  = (jm1-1)*nx + i
+            ie  = (j  -1)*nx + ip1
+            iw  = (j  -1)*nx + im1
+
+            ine = (jp1-1)*nx + ip1
+            inw = (jp1-1)*nx + im1
+            ise = (jm1-1)*nx + ip1
+            isw = (jm1-1)*nx + im1
+
+            !*** compute i-gradient
+
+            if (.not. grid1_mask(ie)) then
+              ie = n
+              delew = one
+            endif
+            if (.not. grid1_mask(iw)) then
+              iw = n
+              delew = one
+            endif
+ 
+            grad1_lat(n) = delew*(grid1_array(ie,jtime) - 
+     &                            grid1_array(iw,jtime))
+
+            !*** compute j-gradient
+
+            if (.not. grid1_mask(in)) then
+              in = n
+              delns = one
+            endif
+            if (.not. grid1_mask(is)) then
+              is = n
+              delns = one
+            endif
+ 
+            grad1_lon(n) = delns*(grid1_array(in,jtime) -
+     &                            grid1_array(is,jtime))
+
+            !*** compute ij-gradient
+
+            delew = half
+            if (jp1 == j .or. jm1 == j) then
+              delns = one
+            else 
+              delns = half
+            endif
+
+            if (.not. grid1_mask(ine)) then
+              if (in /= n) then
+                ine = in
+                delew = one
+              else if (ie /= n) then
+                ine = ie
+                inw = iw
+                if (inw == n) delew = one
+                delns = one
+              else
+                ine = n
+                inw = iw
+                delew = one
+                delns = one
+              endif
+            endif
+
+            if (.not. grid1_mask(inw)) then
+              if (in /= n) then
+                inw = in
+                delew = one
+              else if (iw /= n) then
+                inw = iw
+                ine = ie
+                if (ie == n) delew = one
+                delns = one
+              else
+                inw = n
+                ine = ie
+                delew = one
+                delns = one
+              endif
+            endif
+
+            grad1_lat_zero(n) = delew*(grid1_array(ine,jtime) -
+     &                                 grid1_array(inw,jtime))
+
+            if (.not. grid1_mask(ise)) then
+              if (is /= n) then
+                ise = is
+                delew = one
+              else if (ie /= n) then
+                ise = ie
+                isw = iw
+                if (isw == n) delew = one
+                delns = one
+              else
+                ise = n
+                isw = iw
+                delew = one
+                delns = one
+              endif
+            endif
+
+            if (.not. grid1_mask(isw)) then
+              if (is /= n) then
+                isw = is
+                delew = one
+              else if (iw /= n) then
+                isw = iw
+                ise = ie
+                if (ie == n) delew = one
+                delns = one
+              else
+                isw = n
+                ise = ie
+                delew = one
+                delns = one
+              endif
+            endif
+
+            grad1_lon_zero(n) = delew*(grid1_array(ise,jtime) -
+     &                                 grid1_array(isw,jtime))
+
+            grad1_latlon(n) = delns*(grad1_lat_zero(n) -
+     &                               grad1_lon_zero(n))
+
+          endif
+        enddo
+      endif
+
+!-----------------------------------------------------------------------
+!
+!     remapping from grid1 to grid2
+!
+!-----------------------------------------------------------------------
+
+      grad1_lat_zero = zero
+      grad1_lon_zero = zero
+
+      if (map_type == map_type_bicubic) then   
+        if (fromregular) then 
+          call remap(grid2_tmp, wts_map1, grid2_add_map1, 
+     &               grid1_add_map1, grid1_array(:,jtime), 
+     &                          src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon,
+     &                          src_grad3=grad1_latlon)
+        else 
+          print*,"Input grid is not regular, bicubic interpolation "
+          stop"              is not possible : We stop "
+        endif 
+      elseif (map_type == map_type_conserv .and. fromregular ) then
+        call remap(grid2_tmp,wts_map1,grid2_add_map1,grid1_add_map1,
+     &             grid1_array(:,jtime), src_grad1=grad1_lat,
+     &                          src_grad2=grad1_lon)
+      else    
+        call remap(grid2_tmp, wts_map1, grid2_add_map1, grid1_add_map1,
+     &             grid1_array(:,jtime))
+      endif
+
+      if (map_type == map_type_conserv) then
+        select case (norm_opt)
+        case (norm_opt_none)
+          grid2_err = grid2_frac*grid2_area
+          where (grid2_err /= zero)
+            grid2_tmp = grid2_tmp/grid2_err
+          else where
+            grid2_tmp = zero
+          end where
+        case (norm_opt_frcarea)
+        case (norm_opt_dstarea)
+          where (grid2_frac /= zero)
+            grid2_tmp = grid2_tmp/grid2_frac
+          else where
+            grid2_tmp = zero
+          end where
+        end select
+      end if
+
+!-----------------------------------------------------------------------
+!
+!     extrapolation
+!
+!-----------------------------------------------------------------------
+!
+      do n = 1,grid2_size
+        if ( grid2_imask(n)<0.5 .or. grid1_mask_grid2(n) == 0. ) then
+
+          if ( neighbour(n) > 0 ) then
+            grid2_tmp(n) = grid1_array(neighbour(n),jtime)
+          else
+            grid2_tmp(n) = 1e20
+          end if
+ 
+        end if
+      end do
+
+!-----------------------------------------------------------------------
+!
+!     write results to NetCDF file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_put_vara_double(nc_outfile_id, nc_dstarray1_id,
+     &         (/1, 1, jtime/), (/grid2_dims, 1/), grid2_tmp  )
+      call netcdf_error_handler(ncstat)
+
+      where (grid1_imask<0.5)
+        grid1_array(:,jtime)=1e20
+      end where
+
+      ncstat = nf_put_vara_double(nc_outfile_id, nc_srcarray_id,
+     &         (/1, 1, jtime/), (/grid1_dims, 1/), grid1_array(:,jtime))
+      call netcdf_error_handler(ncstat)
+
+      enddo
+
+!-----------------------------------------------------------------------
+!
+!     close netCDF file
+!
+!-----------------------------------------------------------------------
+
+      ncstat = nf_close(nc_outfile_id)
+      call netcdf_error_handler(ncstat)
+
+!-----------------------------------------------------------------------
+
+      end program scrip_use_extrap
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 343 - 0
interpolation/scrip_sources/timers.f

@@ -0,0 +1,343 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!
+!     This module uses F90 cpu time routines to allowing setting of
+!     multiple CPU timers.
+!
+!-----------------------------------------------------------------------
+!
+!     CVS:$Id: timers.f,v 1.2 2000/04/19 21:56:26 pwjones Exp $
+!
+!     Copyright (c) 1997, 1998 the Regents of the University of 
+!       California.
+!
+!     This software and ancillary information (herein called software) 
+!     called SCRIP is made available under the terms described here.  
+!     The software has been approved for release with associated 
+!     LA-CC Number 98-45.
+!
+!     Unless otherwise indicated, this software has been authored
+!     by an employee or employees of the University of California,
+!     operator of the Los Alamos National Laboratory under Contract
+!     No. W-7405-ENG-36 with the U.S. Department of Energy.  The U.S.
+!     Government has rights to use, reproduce, and distribute this
+!     software.  The public may copy and use this software without
+!     charge, provided that this Notice and any statement of authorship
+!     are reproduced on all copies.  Neither the Government nor the
+!     University makes any warranty, express or implied, or assumes
+!     any liability or responsibility for the use of this software.
+!
+!     If software is modified to produce derivative works, such modified
+!     software should be clearly marked, so as not to confuse it with 
+!     the version available from Los Alamos National Laboratory.
+!
+!***********************************************************************
+
+      module timers
+
+!-----------------------------------------------------------------------
+
+      use kinds_mod
+
+      implicit none
+
+      integer (kind=int_kind), parameter ::  
+     &     max_timers = 99  ! max number of timers allowed
+
+      integer (kind=int_kind), save :: 
+     &     cycles_max       ! max value of clock allowed by system
+
+      integer (kind=int_kind), dimension(max_timers), save :: 
+     &     cycles1,         ! cycle number at start for each timer
+     &     cycles2          ! cycle number at stop  for each timer
+
+      real (kind=real_kind), save ::  
+     &     clock_rate       ! clock_rate in seconds for each cycle
+
+      real (kind=real_kind), dimension(max_timers), save ::  
+     &     cputime          ! accumulated cpu time in each timer
+
+      character (len=8), dimension(max_timers), save ::  
+     &     status           ! timer status string
+
+!***********************************************************************
+
+      contains
+
+!***********************************************************************
+
+      subroutine timer_check(timer)
+
+!-----------------------------------------------------------------------
+!
+!     This routine checks a given timer.  This is primarily used to
+!     periodically accumulate time in the timer to prevent timer cycles
+!     from wrapping around max_cycles.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     Input Variables:
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::  
+     &    timer            ! timer number
+
+!-----------------------------------------------------------------------
+
+      if (status(timer) .eq. 'running') then
+        call timer_stop (timer)
+        call timer_start(timer)
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine timer_check
+
+!***********************************************************************
+
+      subroutine timer_clear(timer)
+
+!-----------------------------------------------------------------------
+!
+!     This routine resets a given timer.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     Input Variables:
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::  
+     &    timer            ! timer number
+
+!-----------------------------------------------------------------------
+
+      cputime(timer) = 0.0_real_kind  ! clear the timer
+
+!-----------------------------------------------------------------------
+
+      end subroutine timer_clear
+
+!***********************************************************************
+
+      function timer_get(timer)
+
+!-----------------------------------------------------------------------
+!
+!     This routine returns the result of a given timer.  This can be
+!     called instead of timer_print so that the calling routine can 
+!     print it in desired format.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     Input Variables:
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::  
+     &    timer            ! timer number
+
+!-----------------------------------------------------------------------
+!
+!     Output Variables:
+!
+!-----------------------------------------------------------------------
+
+      real (kind=real_kind) ::  
+     &     timer_get   ! accumulated cputime in given timer
+
+!-----------------------------------------------------------------------
+
+      if (status(timer) .eq. 'stopped') then
+        timer_get = cputime(timer)
+      else
+        call timer_stop(timer)
+        timer_get = cputime(timer)
+        call timer_start(timer)
+      endif
+
+!-----------------------------------------------------------------------
+
+      end function timer_get
+
+!***********************************************************************
+
+      subroutine timer_print(timer)
+
+!-----------------------------------------------------------------------
+!
+!     This routine prints the accumulated cpu time in given timer.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     Input Variables:
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::  
+     &    timer            ! timer number
+
+!-----------------------------------------------------------------------
+
+      !--- 
+      !--- print the cputime accumulated for timer 
+      !--- make sure timer is stopped
+      !---
+
+      if (status(timer) .eq. 'stopped') then
+        write(*,"(' CPU time for timer',i3,':',1p,e16.8)")  
+     &       timer,cputime(timer)
+      else
+        call timer_stop(timer)
+        write(*,"(' CPU time for timer',i3,':',1p,e16.8)")  
+     &       timer,cputime(timer)
+        call timer_start(timer)
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine timer_print
+
+!***********************************************************************
+
+      subroutine timer_start(timer)
+
+!-----------------------------------------------------------------------
+!
+!     This routine starts a given timer.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     Input Variables:
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::  
+     &    timer            ! timer number
+
+!-----------------------------------------------------------------------
+
+      !---
+      !--- Start the timer and change timer status.
+      !---
+
+      if (status(timer) .eq. 'stopped') then
+        call system_clock(count=cycles1(timer))
+        status(timer) = 'running'
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine timer_start
+
+!***********************************************************************
+
+      subroutine timer_stop(timer)
+
+!-----------------------------------------------------------------------
+!
+!     This routine stops a given timer.
+!
+!-----------------------------------------------------------------------
+
+!-----------------------------------------------------------------------
+!
+!     Input Variables:
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind), intent(in) ::  
+     &    timer            ! timer number
+
+!-----------------------------------------------------------------------
+
+      if (status(timer) .eq. 'running') then
+
+        !---
+        !--- Stop the desired timer.
+        !---
+
+        call system_clock(count=cycles2(timer))
+
+        !---
+        !--- check and correct for cycle wrapping
+        !---
+
+        if (cycles2(timer) .ge. cycles1(timer)) then
+          cputime(timer) = cputime(timer) + clock_rate*  
+     &                     (cycles2(timer) - cycles1(timer))
+        else
+          cputime(timer) = cputime(timer) + clock_rate*  
+     &                (cycles2(timer) - cycles1(timer) + cycles_max)
+        endif
+
+        !---
+        !--- Change timer status.
+        !---
+
+        status(timer)='stopped'
+
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine timer_stop
+
+!***********************************************************************
+
+      subroutine timers_init
+
+!-----------------------------------------------------------------------
+!
+!     This routine initializes some machine parameters necessary for
+!     computing cpu time from F90 intrinsics.
+!
+!-----------------------------------------------------------------------
+
+      integer (kind=int_kind) :: cycles ! count rate return by sys_clock
+
+!-----------------------------------------------------------------------
+
+      !---
+      !--- Initialize timer arrays and clock_rate.
+      !---
+
+      clock_rate = 0.0_real_kind
+      cycles1    = 0
+      cycles2    = 0
+      cputime    = 0.0_real_kind
+      status     = 'stopped'
+
+      !---
+      !--- Call F90 intrinsic system_clock to determine clock rate
+      !--- and maximum cycles.  If no clock available, print message.
+      !---
+
+      call system_clock(count_rate=cycles, count_max=cycles_max)
+
+      if (cycles /= 0) then
+        clock_rate = 1.0_real_kind/real(cycles)
+      else
+        clock_rate = 0.0_real_kind
+        print *, '--- No system clock available ---'
+      endif
+
+!-----------------------------------------------------------------------
+
+      end subroutine timers_init
+
+!***********************************************************************
+
+      end module timers
+
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

+ 69 - 0
interpolation/vertextrap.py

@@ -0,0 +1,69 @@
+# This function extrapolates vertically a (x,y,z) field extending
+# linearly the vertical slope of the profile.
+#
+# Usage   : python vertextrap.py <input file> <input variable name> 
+#    <input grid description file> <input depth in grid file>
+#    <output file>
+#
+# History : Virginie Guemas - Initial version     -  March 2014
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+import cdms2 as cdms
+import sys,string
+from cdms2 import MV
+import numpy as N
+from numpy import ma
+#
+# 1. Input arguments
+# ===================
+# 
+# Input file and var names :
+# ---------------------------
+fileIN=sys.argv[1]
+varIN=sys.argv[2]
+#
+# Meshmask file : 
+# ----------------
+fileM=sys.argv[3]
+varD=sys.argv[4]
+#
+# Output file name :
+# -------------------
+fileOUT=sys.argv[5]
+#
+# 2. Get the input files 
+# =======================
+#
+f=cdms.open(fileIN)
+var0_a=f(varIN,squeeze=1)
+f.close()
+mask3d=var0_a.mask
+(lz0,ly0,lx0)=var0_a.shape
+#	    
+f=cdms.open(fileM)
+dep=f(varD,squeeze=1)
+f.close()
+#
+varout=N.zeros((lz0,ly0,lx0))
+varout=varout.astype('d')
+varout=N.where(mask3d==False,var0_a,varout)
+maskout=N.zeros((lz0,ly0,lx0))
+
+for jz in N.arange(lz0) :
+  if mask3d[jz,:,:].mean() == 1 :
+    ratio=(dep[jz]-dep[jz-1])/(dep[jz-1]-dep[jz-2])
+    varout[jz,:,:]=varout[jz-1,:,:]+(varout[jz-1,:,:]-varout[jz-2,:,:])*ratio
+    maskout[jz,:,:]=maskout[jz-1,:,:]
+  else :
+    maskout[jz,:,:]=mask3d[jz,:,:]
+
+varout=cdms.createVariable(varout,id=var0_a.id)
+varout=MV.where(maskout<0.5,varout,1e20)
+varout.getAxis(0)[:]=var0_a.getAxis(0)[:]
+varout.getAxis(0).id='z'
+varout.getAxis(1).id='y'
+varout.getAxis(2).id='x'
+varout.id=var0_a.id
+
+h=cdms.open(fileOUT,'w')
+h.write(varout)
+h.close()

+ 20 - 0
prep_nem_forcings/DFS5.2/README

@@ -0,0 +1,20 @@
+François Massonnet
+June 2016
+
+The three scripts
+
+1) preprocess.bash
+2) mkpert.R
+3) postprocess.bash
+
+can be used to generate as many perturbed versions of the Drakkar Forcing Set 5.2 (DFS5.2) respecting the temporal and spatial covariances of the eight variables of the forcing.
+
+The generation of perturbations is based on M. Meinvieille's thesis (http://www.theses.fr/2011GRENU053)
+https://hal.inria.fr/file/index/docid/681484/filename/24564_MEINVIELLE_2012_archivage.pdf
+
+See the wiki page:
+
+https://earth.bsc.es/wiki/doku.php?id=tools:perturbation_of_atmospheric_forcing
+
+for more information about the strategy to create the perturbations.
+

+ 188 - 0
prep_nem_forcings/DFS5.2/mkpert.R

@@ -0,0 +1,188 @@
+rm(list = ls())
+graphics.off()
+library(ncdf)
+
+# =============================================================================
+# Author - François Massonnet
+#
+# Date   - 02 February 2016, Candelaria! 
+#          https://es.wikipedia.org/wiki/Fiesta_de_la_Candelaria
+#        - 22 June 2016: enhancement to make consistent perturbations across
+#          several variables
+#        - 1st July 2016: Simplification. It is indeed not necessary to do 
+#          a SVD here. By multiplying the available perturbations by
+#          z / sqrt(N - 1) we create data with the same cov. matrix.
+#          See also M. Meinvielle's thesis:
+#          https://hal.inria.fr/file/index/docid/681484/filename/24564_MEINVIELLE_2012_archivage.pdf
+# Goal   - Create perturbed versions of the fields of DFS5.2. As many
+#          perturbations as the user wants can be created, but they will remain
+#          in the subspace spanned by the available perturbations (computed in
+#          the first script preprocess.bash)
+#          A seed is used to ensure reproducibility of perturbations.
+#
+# Input  - Files of perturbations must be available. They can be computed 
+#          using the script preprocess.bash
+# =============================================================================
+
+strvar  <- c('t2', 'qsw', 'qlw')     # Name of the variables for which perturbations
+                                     # have to be created. 
+fstrvar <- c('t2', 'radsw', 'radlw')     # Matching name of the variable as they
+                               # appear in the NetCDF
+                               # Normally equal to strvar, except 
+                               # qsw --> radsw and qlw --> radlw
+
+# The folder where the perturbations are (the folder was created during
+# the execution of preprocess.bash)
+pertdir <- c('/esnas/scratch/fmassonn/TMP/TMP_t2_15262', 
+             '/esnas/scratch/fmassonn/TMP/TMP_qsw_26904' , 
+             '/esnas/scratch/fmassonn/TMP/TMP_qlw_15266')
+
+yearbp <- 1979 # The years defining the time period used 
+               # to compute perturbations. That is, the reference period from
+               # which variability is estimated. 
+yearep <- 2015 # That period must be included in the period for which perturbations were
+               # created (in preprocess.bash)
+
+yearb  <- 1973 # The years for which a new perturbation has to be calculated
+yeare  <- 1973 # Can be outside the interval above 
+
+npert <-  25   # Number of perturbations that have to be created each year
+
+
+# ---------------------------------------------
+# Script starts -------------------------------
+# ---------------------------------------------
+
+nvar  <- length(strvar)
+
+nyearp <- yearep - yearbp + 1 # Number of years for the reference period
+nyear  <- yeare  - yearb  + 1 # Number of years for which to create perturbations
+
+nsamp <- nyearp - 1          # Number of perturbations samples available. 
+                             # Since we create 
+                             # the perturbations as successive year-to-year diffs,
+                             # we have one less than the number of years
+
+for (jvar in seq(1, nvar)) {
+  print(paste("Working on variable", strvar[jvar]))
+  # 1/ Recording the perturbations from the available sample
+  # --------------------------------------------------------
+  for (year in seq(yearbp + 1, yearep)) {
+    print(paste("  Reading data:", year, "-", year - 1))
+    filein <- paste(pertdir[jvar], "/", "diff_", strvar[jvar], "_DFS5.2_day_", year, 
+                  "-", year - 1, ".nc", sep = "")
+    f <- open.ncdf(filein, mode = 'r')
+    var <- get.var.ncdf(f, fstrvar[jvar])
+
+    if (year == yearbp + 1) {
+    # If we are in the first year
+    # let's also create the matrix that will receive the data
+
+      ny <- dim(var)[1]
+      nx <- dim(var)[2]
+      nt <- dim(var)[3]
+
+      ns <- ny * nx * nt # state vector dimension
+  
+      lat0 <- get.var.ncdf(f, 'lat0')
+      lon0 <- get.var.ncdf(f, 'lon0')
+  
+      print("    Creating X_raw, this has to be done only once")
+      X_raw <- matrix(NA, ns, nsamp)
+      print("    X_raw created")
+    } # if
+      close.ncdf(f)
+      var[is.na(var)] <- 0
+      # This last step is necessary, because the DFS5.2 forcing set has, sometimes,
+      # NaNs. (See e.g., year 2008, variable t2, time step 1776, in the Southern)
+
+      X_raw[, year - (yearbp + 1) + 1] <- var[]
+
+      rm(list = "var")
+    } # year
+
+    # Centering X
+    # -----------
+    print("Centering the data")
+    myones <- matrix(1, nrow = nsamp, ncol = 1)
+
+    Xbar <- 1 / nsamp * X_raw %*% myones
+    X    <- X_raw - Xbar %*% t(myones)
+
+    # Clearing X_raw and Xbar, as they are taking resources!
+    rm(list = c("X_raw", "Xbar", "myones"))
+  
+    print("Data centered")
+
+
+    # Creating perturbations
+    # ----------------------
+    # We take a N(0,1) random vector z of size nsamp by 1
+    # so that the product X %*% z / sqrt(nsamp - 1)
+    # will give us a new perturbation
+    # with the desired properties
+
+    print("Creating perturbations")
+
+    for (year in seq(yearb, yeare)) {
+      print(paste("Recording perturbations for variable", strvar[jvar], "and year", year))
+  
+      for (jpert in seq(1, npert)) {
+        print(paste("  Recording perturbation for member", sprintf("%02d", jpert)))
+
+        # EXTREMELY IMPORTANT - this ensures reproducibility. DON'T CHANGE.
+        # if year 1987, and perturbation 302:
+        # then the seed is 1000 * 1987 + 302 = 1987302. 
+        # This is then a unique identifier. It won't be unique if we go beyond 999 
+        # members, but I might no longer be on this planet when that happens.
+        #
+        # In addition the value of z must be the same for all variables to have coherent
+        # perturbations
+        set.seed(year * 1000 + jpert)
+
+        z <- matrix(rnorm(nsamp))
+        sample <- array(X %*% z / sqrt(nsamp - 1), c(ny, nx, nt))
+        print("First and last elements of perturbation: ")
+        print(z[1])
+        print(z[nsamp])
+        rm(list = "z")
+
+        # Record in  NetCDF
+        # ----------------
+        # 1/ Create dimensions
+        # --------------------
+        dimt <- dim.def.ncdf('time', 'time', seq(1, nt), unlim = TRUE)
+        dimy <- dim.def.ncdf('lon0', 'lon0', lon0)
+        dimx <- dim.def.ncdf('lat0', 'lat0', lat0)
+
+        # 2/ Define variables
+        # -------------------
+        mylist <- list()
+        mylist[[1]] <- var.def.ncdf(fstrvar[jvar], '-', list(dimy, dimx, dimt), -1e24)
+    
+        # 3/ Create NetCDF
+        # ----------------
+        fileout <- paste(pertdir[jvar], '/pert_', strvar[jvar], '_DFS5.2_', year, '_fc', sprintf("%02d", jpert), '_ref', yearbp, '-', yearep, '.nc', sep = "")
+        fo <- create.ncdf(fileout, mylist)
+        close.ncdf(fo)
+
+        # 4/ Open it
+        # ----------
+        fo <- open.ncdf(fileout, write = TRUE)
+
+        # 5/ Put variable
+        # ---------------
+        put.var.ncdf(fo, fstrvar[jvar], sample)
+
+        # 6/ Close it
+        # -----------
+        close.ncdf(fo)
+
+        print(paste(fileout, "created"))
+
+        # 7/ Clear workspace
+        # ------------------
+        rm(list = "sample")
+    } # jpert
+  } # year 
+} # var

+ 66 - 0
prep_nem_forcings/DFS5.2/plot.py

@@ -0,0 +1,66 @@
+#!/usr/bin/python
+
+from   netCDF4  import Dataset
+import numpy as np
+import matplotlib.pyplot as plt
+
+# Quick Python script to display time series
+# of some variable produced by perturbation
+#
+# Francois Massonnet
+# November 2016
+# francois.massonnet@bsc.es
+
+svar = 'qlw'	# Variable to show
+year = 1993	# Year to show
+jy = 20		# y-coordinate of grid-point to show
+jx = 20		# x- 
+repo = '/esarchive/releases/fg/ocean/DFS5.2' # where data is located
+nmemb=25        # Nb members to look for
+
+# ============
+# START SCRIPT
+# ============
+
+# Naming changes for two types of files (Asif decided so).
+fvar = svar
+if svar == 'qlw':
+  fvar = 'radlw'
+if svar == 'qsw':
+  fvar = 'radsw'
+
+fig = plt.figure(figsize = (10, 6))
+avg = 0
+javg = 1
+for  m in np.arange(nmemb, -1, -1):
+  print(str(m) + '/' + str(nmemb))
+  filein = repo + '/' + svar + '_fc' + str(m).zfill(2) + '_DFS5.2_' + str(year) + '.nc'
+
+  f = Dataset(filein, mode = 'r')
+  var = f.variables[fvar][:, jy, jx]
+  units = f.variables[fvar].units
+  f.close()
+
+  if m == 0:
+    color = (0.2, 0.0, 0.0)
+    lw = 2
+  else:
+    color = (0, 0.5, 0)
+    lw = 1
+
+  plt.plot(var, color = color, lw = lw)
+
+  # Compute mean incrementally
+  if m != 0:
+    avg = avg + (var - avg) / javg
+    javg = javg + 1
+
+# Mean
+plt.plot(avg, color = (0, 0.2, 0), lw = 2)
+plt.xlabel('time')
+plt.ylabel(units)
+plt.title(fvar + ' ' + str(year) + ' ' + 'jy = ' + str(jy) + '; jx = ' + str(jx))
+fig.savefig('fig.png', dpi = 300)
+plt.close("all")
+
+

+ 165 - 0
prep_nem_forcings/DFS5.2/postprocess.bash

@@ -0,0 +1,165 @@
+#!/bin/bash
+#SBATCH -n 1
+#SBATCH -t 12:00:00
+#SBATCH -J post_perturbation
+#SBATCH -o slurm_post-%j.out
+#SBATCH -e slurm_post-%j.err
+
+
+set -o nounset
+set -o errexit
+set -x
+
+# -----------------------------------------------------------------------------
+# Author - F. Massonnet
+# Purpose- Process perturbations of DFS5.2 forcing and adds them
+#          to the actual forcing. 
+# What the script does:
+#   1) Interpolate from daily to 3hourly
+#   2) Add 29 February if necessary
+#   3) Add to the true forcing
+# -----------------------------------------------------------------------------
+
+# Which variable of the DFS5.2 forcing set has to be perturbed?
+# Possibilities: t2, q2, u10, v10, qlw, qsw, snow or precip
+var=qsw
+
+alpha=1.0 # VERY IMPORTANT: strength of the perturbation. 1 = same as interannual variability. 0.5 = half of it.
+
+# First and end years for which a perturbation has to be created. 
+yearb=1973
+yeare=1973
+
+yearbp=1979    # First and end years defining the reference period on which
+yearep=2015    # the perturbations were created (must match those in preprocess.bash and in mkpert.R)
+
+nmb=1   # members to loop over. nmb = first member; nme = last one.
+        # Usually the first member ("fc0") is the true forcing so it should *NOT* be perturbed.
+        # Hence put nmb=1 and nme=25 if you want 25 perturbed forcings
+nme=25  
+
+workdir=/esnas/scratch/$USER/TMP/TMP_${var}_26904/  # Where all perturbations are recorded
+                                                # This folder was defined during the execution of preprocess.bash
+
+outtag=DFS5.2                                   # Name of the forcing after perturbations are created
+                                                # Can be the same (DFS5.2) or modified (perturbed-DFS5.2 for instance)
+outdir=/esarchive/releases/fg/ocean/$outtag/
+
+cd $workdir
+echo "Workdir is $workdir"
+
+case ${var} in
+  t2)
+    min=100.0 	# Min and max values allowed
+    max=400.0
+    freq=3hour  # Frequency of availability
+    ntim=2920   # Number of time steps in a year
+    fvar=${var} # Name of the variable in the NetCDF
+    ;;
+  q2)
+    min=0.0
+    max=0.1
+    freq=3hour
+    ntim=2920
+    fvar=${var}
+    ;;
+  u10)
+    min=-100.0
+    max=100.0
+    freq=3hour
+    ntim=2920
+    fvar=${var}
+    ;;
+  v10)
+    min=-100.0
+    max=100.0
+    freq=3hour
+    ntim=2920
+    fvar=${var}
+    ;;
+  qsw)
+    min=0.0
+    max=1000.0
+    freq=1day
+    ntim=365
+    fvar=radsw
+    ;;
+  qlw)
+    min=0.0
+    max=1000.0
+    freq=1day
+    ntim=365
+    fvar=radlw
+    ;;
+  snow)
+    min=0.0
+    max=0.001
+    freq=1day
+    ntim=365
+    fvar=${var}
+    ;;
+  precip)
+    min=0.0
+    max=0.01
+    freq=1day
+    ntim=365
+    fvar=${var}
+    ;;
+  *)
+  echo "Variable $var unknown"
+  exit
+esac
+
+for year in `seq ${yearb} ${yeare}`
+do
+  for mm in `seq $nmb $nme`
+  do
+    if [ $mm == 0 ] 
+    then
+      echo "WARNING!!!!"
+      echo "Usually, at BSC, member 0 is booked to point towards the true forcing"
+      echo "By creating a perturbed forcing named fc00, and then copying to the source"
+      echo "directory, you are going to erase a symbolic link that points to the true forcing"
+      echo "and thereby erase the true forcing!!"
+      echo "Since this is highly dangerous, this script is aborting."
+      echo "Contact francois.massonnet@bsc.es for further questions"
+  
+      exit
+    fi
+    m=$(printf "%02d" $mm)
+    # There is a small trick here. If we have 3 days of data and ask for 3-hourly interpolation, we will have only 17 points and not 24.
+    # We have in fact (ndays - 1) * 8 + 1. So the trick is to append the last day to the data twice and remove the last time frame.
+    # 
+    # Extract the last time frame
+    ncks -F -O -d time,365,365              pert_${var}_DFS5.2_${year}_fc${m}_ref${yearbp}-${yearep}.nc  tmp.${year}.${m}.nc
+    # Append it 
+    ncrcat -F -O                            pert_${var}_DFS5.2_${year}_fc${m}_ref${yearbp}-${yearep}.nc  tmp.${year}.${m}.nc pert_${var}_DFS5.2_${year}_fc${m}.nc.1
+    # Set time axis
+    cdo settaxis,${year}-01-01,00:00,1day   pert_${var}_DFS5.2_${year}_fc${m}.nc.1                       pert_${var}_DFS5.2_${year}_fc${m}.nc.2
+    # Interpolate in time
+    cdo inttime,${year}-01-01,00:00,${freq} pert_${var}_DFS5.2_${year}_fc${m}.nc.2                       pert_${var}_DFS5.2_${year}_fc${m}.nc.3
+    # Remove the last time frame
+    ncks -F -O -d time,1,${ntim}            pert_${var}_DFS5.2_${year}_fc${m}.nc.3                       pert_${var}_DFS5.2_${year}_fc${m}.nc.4
+
+    # Add the desired fraction "alpha" of the perturbation to the true forcing
+    cdo add -mulc,${alpha}                  pert_${var}_DFS5.2_${year}_fc${m}.nc.4   ${var}_DFS5.2_${year}.nc ${var}_fc${m}_DFS5.2_${year}.nc.0
+    
+    # Physical bounds
+    cdo setrtoc,-10000000000,${min},${min}  ${var}_fc${m}_DFS5.2_${year}.nc.0                            ${var}_fc${m}_DFS5.2_${year}.nc.1 
+    cdo setrtoc,${max},10000000000,${max}   ${var}_fc${m}_DFS5.2_${year}.nc.1                            ${var}_fc${m}_DFS5.2_${year}.nc.2
+
+    # Set time units to allow nice reading
+    cdo settunits,years                     ${var}_fc${m}_DFS5.2_${year}.nc.2                            ${outdir}/${var}_fc${m}_${outtag}_${year}.nc
+
+    # Add description in the header
+    ncatted -O -a description,${fvar},a,c,"Perturbed version of DFS5.2 variable ${fvar} for year ${year} (member fc${m}). Strength of perturbation is ${alpha} times the year-to-year differences estimated over the ${yearbp}-${yearep} reference period. For more details: francois.massonnet@bsc.es" ${outdir}/${var}_fc${m}_${outtag}_${year}.nc
+
+    chmod 777 ${outdir}/${var}_fc${m}_${outtag}_${year}.nc
+
+    rm -f tmp.${year}.${m}.nc pert_${var}_DFS5.2_${year}_fc${m}.nc.? ${var}_fc${m}_DFS5.2_${year}.nc.?
+  done
+done
+
+
+echo "SCRIPT POSTPROCESS.BASH FINISHED"
+

+ 151 - 0
prep_nem_forcings/DFS5.2/preprocess.bash

@@ -0,0 +1,151 @@
+#!/bin/bash
+#SBATCH -n 1
+#SBATCH -t 12:00:00
+#SBATCH -J pre_perturbation
+#SBATCH -o slurm_pre-%j.out
+#SBATCH -e slurm_pre-%j.err
+
+set -o nounset
+set -o errexit
+set -x
+
+# =============================================================================
+# Author - F. Massonnet
+# Purpose- Prepare DFS5.2 forcing set to create perturbations later
+# What the script does:
+#   1) Make daily means in the case atmospheric data is at higher frequency
+#   2) Remove leap days if any
+#   3) Computes year-to-year differences
+#
+# The year-to-year differences are then used to generate perturbations,
+# in a second script.
+#
+# History - February 2016: creation for T2 perturbation
+#         - March    2016: sorting problems with values beyond bounds
+#         - April    2016: extension to other variables
+
+# =============================================================================
+
+# Which variable of the DFS5.2 forcing set has to be perturbed?
+# Possibilities: t2, q2, u10, v10, qlw, qsw, snow or precip
+var=qsw
+
+# Years defining the period considered. If perturbations have to be generated
+# for 1980-1990 but based on anomalies of the period 1975-1985, the longest
+# period has to be set: 1975-1990
+#
+# yearb = 1979 and yeare = 2015 are recommended choices since the forcing
+# spans these years and all variables are interannual
+
+yearb=1973
+yeare=1974
+
+# Directory of the source of the data (i.e., the original files)
+sourcedir=/esarchive/releases/fg/ocean/DFS5.2/
+
+# Working directory
+workdir=/esnas/scratch/$USER/TMP/TMP_${var}_26904
+
+# =============================================================================
+# Script Starts
+# =============================================================================
+
+mkdir -p $workdir
+cd    $workdir
+echo "Workdir is $workdir"
+
+case ${var} in 
+  t2)
+    min=100.0 	# Min and max values allowed
+    max=400.0
+    freq=3hour  # Frequency of availability
+    fvar=${var} # Name of the variable in the NetCDF
+    ;;
+  q2)
+    min=0.0
+    max=0.1
+    freq=3hour
+    fvar=${var}
+    ;;
+  u10)
+    min=-100.0
+    max=100.0
+    freq=3hour
+    fvar=${var}
+    ;;
+  v10)
+    min=-100.0
+    max=100.0
+    freq=3hour
+    fvar=${var}
+    ;;
+  qsw)
+    min=0.0
+    max=1000.0
+    freq=1day
+    fvar=radsw
+    ;;
+  qlw)
+    min=0.0
+    max=1000.0
+    freq=1day
+    fvar=radlw
+    ;;
+  snow)
+    min=0.0
+    max=0.001
+    freq=1day
+    fvar=${var}
+    ;;
+  precip)
+    min=0.0
+    max=0.01
+    freq=1day 
+    fvar=${var}
+    ;;
+  *)
+  echo "Variable $var unknown"
+  exit
+esac
+
+for year in `seq ${yearb} ${yeare}`
+do
+  echo "Year $year / $yeare"
+
+  # Reset valid range. Otherwise, the CDO command settaxis messes up and several points are reported as 
+  # missing values. This then makes NEMO crash.
+  ncatted -O -a valid_range,${fvar},m,f,"${min}, ${max}" ${sourcedir}/${var}_DFS5.2_${year}.nc tmp.${year}.nc 
+
+  cdo settaxis,${year}-01-01,00:00,${freq} tmp.${year}.nc ${var}_DFS5.2_${year}.nc
+  if [ ${freq} = 3hour ]
+  then
+    cdo daymean ${var}_DFS5.2_${year}.nc ${var}_DFS5.2_day_${year}.nc
+  else
+    cp ${var}_DFS5.2_${year}.nc ${var}_DFS5.2_day_${year}.nc
+  fi
+
+  # Cope with leap years
+  ndays=`cdo ntime ${var}_DFS5.2_day_${year}.nc`
+
+  if [[ $ndays = 366 ]]
+  then
+    cdo delete,month=2,day=29 ${var}_DFS5.2_day_${year}.nc ${var}_DFS5.2_day_${year}.nc.X
+    mv ${var}_DFS5.2_day_${year}.nc.X ${var}_DFS5.2_day_${year}.nc
+  else
+    if [[ $ndays != 365 ]]
+    then
+      echo "Problem: nb of days is neither 365 nor 366"
+      exit
+    fi  
+  fi
+
+  if [[ $year -ge $(( ${yearb} + 1 )) ]]
+  then
+    cdo sub ${var}_DFS5.2_day_${year}.nc ${var}_DFS5.2_day_$(( ${year} - 1 )).nc diff_${var}_DFS5.2_day_${year}-$(( ${year} - 1 )).nc
+  fi
+
+  rm -f tmp.${year}.nc
+done
+
+
+echo "SCRIPT SUCCESSFULLY FINISHED"

+ 7 - 0
prep_nem_forcings/DFS5.2/script.bash

@@ -0,0 +1,7 @@
+#!/bin/bash
+#SBATCH -n 1
+#SBATCH -t 12:00:00
+#SBATCH -J perturbation
+#SBATCH -o slurm_R-%j.out
+#SBATCH -e slurm_R-%j.err
+Rscript mkpert.R

+ 60 - 0
prep_nem_forcings/eraint/orca1/README

@@ -0,0 +1,60 @@
+1) qsw + qlw :
+
+Download 1.5*1.5 daily fields from:
+http://apps.ecmwf.int/datasets/data/interim_full_daily/
+step 12
+Surface thermal radiation downwards
+Surface solar radiation downwards
+Download 00:00 --> output_step12_0h.nc
+Download 12:00 --> output_step12_12h.nc
+Run prep_eraint_qsw_qlwforcings.sh
+
+2) snow + precip:
+
+Download 1.5*1.5 synoptic monthly means from :
+http://apps.ecmwf.int/datasets/data/interim_full_mnth/
+step 12 
+Snowfall
+Total precipitation
+Download 00:00 --> output_step12_0h.nc
+Download 12:00 --> output_step12_12h.nc
+Run prep_eraint_prlr_prsn_forcings.sh
+
+3) t2 + q2
+
+Download 1.5*1.5 resolution 6h fields from :
+http://apps.ecmwf.int/datasets/data/interim_full_daily/
+step 0
+2 metre temperature
+2 metre dewpoint temperature
+Surface pressure
+Area : 90S 90N 0E 360E
+Download 00:00, 06:00, 12:00, 18:00 --> output_step0.nc
+Run prep_eraint_t2_q2_forcings.sh
+Run comp_q2.sh
+
+4) Interp qsw + qlw + snow + precip + t2 + q2
+Run interp_eraint.sh
+
+5) u10 + v10
+
+Download original resolution 6h fields :
+step 0
+10 metre U wind component 
+10 metre V wind component
+00:00, 06:00, 12:00, 18:00 --> output.grib
+by running download_u10v10.py
+If you use download_u10v10.py for the first time, you need to 
+install you API key as described here : 
+https://software.ecmwf.int/wiki/display/WEBAPI/Accessing+ECMWF+data+servers+in+batch
+Run prep_eraint_u10_v10_forcings.sh
+
+6) Surface wind perturbations:
+
+(To save time, you can go to nanna:/scratch/virginie/wndpert and copy all the data
+from this directory)
+Run wndpert.sh including for the year preceeding and following the years you
+aim at treating
+Run rotate_perturb.sh
+Run addpert_eraint.sh
+

+ 55 - 0
prep_nem_forcings/eraint/orca1/addpert_eraint.sh

@@ -0,0 +1,55 @@
+#/bin/ksh
+set -evx
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script adds interpolated and rotated surface wind perturbations to the
+# interpolated and rotated surface wind forcing fields.
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year0=2013                   # First year for which to add perturbations
+yearf=2014                   # Last year for which to add perturbations
+mid='_eraint_'               # '_eraint_' / '_DFS4.3_'
+mode=2                       # 1/2 = Bound the perturbations/No bounding
+bound=10.0                   # Bounding value for the perturbations
+lstmbs=( 1 2 3 4 )           # List of members
+lstvars=( 'u10' 'v10' )      # List of variables
+pathpert='perturb1_era-dfs_mon'
+             # Directory from where to take the perturbations
+pathera='/cfu/scratch/vguemas/ERAint_ORCA1t'
+pathdfs='/cfu/scratch/vguemas/dfs4.3'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+for ((year=${year0};year<=${yearf};year++)) ; do
+  for memb in ${lstmbs[@]} ; do
+    for var in ${lstvars[@]} ; do
+      cp ${pathera}/${var}${mid}${year}.nc ${var}_${year}.nc
+      fileout=${pathera}/${var}_fc${memb}${mid}${year}.nc
+      if [[ $mid == '_eraint_' ]] ; then
+        suf='_orca1t'
+      else
+        suf=''
+      fi
+      case $mode in 
+        1) 
+        cdo setrtomiss,$bound,100 ${pathpert}/${var}_fc${memb}${mid}${year}${suf}.nc toto1.nc
+        cdo setmisstoc,$bound toto1.nc toto2.nc
+        cdo setrtomiss,-100,-$bound toto2.nc toto1.nc
+        cdo setmisstoc,-$bound toto1.nc toto2.nc
+        mv toto2.nc ${var}_fc${memb}${mid}${year}${suf}.nc
+        rm -f toto1.nc toto2.nc
+        ;;
+        2)
+        cp ${pathpert}/${var}_fc${memb}${mid}${year}${suf}.nc .
+        ;;
+      esac
+      cdo add -selvar,$var ${var}_${year}.nc -selvar,$var ${var}_fc${memb}${mid}${year}${suf}.nc $fileout 
+      rm -f ${var}_${year}.nc ${var}_fc${memb}${mid}${year}${suf}.nc
+      if [[ $mid == '_DFS4.3_' ]] ; then
+        ncrename -d x,lon -d y,lat $fileout
+        ncks -A -v lon,lat ${pathera}/${var}${mid}${year}.nc $fileout
+      fi
+    done
+  done
+done

+ 51 - 0
prep_nem_forcings/eraint/orca1/comp_q2.sh

@@ -0,0 +1,51 @@
+#/bin/ksh
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script computes the specific humidity from the dew point using :
+#  
+# 1) the approximation e<<P which gives q=0.622(e/P)=0.622(es(Td)/P)
+#
+# 2) And one of the three formulas :
+#
+# - the August-Roche-Magnus formula : es=C1 x exp( (A1 x T)/(B1 + T) )
+#         A1=17.625, B1=243.04°C, C1=6.1094 hPa
+#    whose error <= 0.4% in the range -40°C < T < 50°C. 
+#   from Alduchov and Eskridge (1996) Ueber die Berechnung der Expansivkraft 
+#        des Wasserdunstes. Ann. Phys. Chem., 13, 122–137.
+#
+# - the Lawrence formula : es=C2 x exp( -L/(R*(T+273.15)) )
+#         C2=2.53e9 hPa, L=2.501e6 J.kg-1, R=461.5 J.K-1.kg-1 
+#   from Lawrence (2005), BAMS, 86, 225-233
+# 
+# - the Tetens formula : es= (D1 + E1*P) x C1 x exp((A1 x T)/(B1 + T) )
+#         D1=1.0007 hPa, E1=3.46e-6, C1=6.1121 hPa, B1=240.97°C, A1=17.502      
+#   from  Buck, 181, JAM, 20, 1527-1532
+#
+# Review  : Gibbins (1990) Ann. Geophys., 8, 859–885. 
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year0='2012'
+yearf='2014'
+equation='Lawrence'
+fileD='/cfu/scratch/vguemas/ERAint/d2m_eraint'  # Dew point
+fileP='/cfu/scratch/vguemas/ERAint/sp_eraint'   # Surface pressure
+fileout='q2_eraint'
+varD="d2m"
+varP='sp'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+for ((year=${year0};year<=${yearf};year++)) ; do
+  ncap -s ${varD}"=double("${varD}")" ${fileD}_${year}.nc tmp1_${year}.nc
+  cdo subc,273.15 tmp1_${year}.nc tmp_${year}.nc
+  ncap -s ${varP}"=double("${varP}")" ${fileP}_${year}.nc toto_${year}.nc
+  case $equation in 
+    'Magnus') cdo expr,'esd=6.1094*exp((17.625*'${varD}')/(243.04+'${varD}'));' tmp_${year}.nc tmp2_${year}.nc ;;
+    'Lawrence') cdo expr,'esd=2.53e9*exp(((d2m+273.15)^(-1))*(-2.501e6/461.5));' tmp_${year}.nc  tmp2_${year}.nc ;;
+    'Tetens') cdo expr,'esd=(1.0007+3.46e-6*1013)*6.1121*exp((17.502*d2m)/(240.97+d2m));' tmp_${year}.nc  tmp2_${year}.nc ;;
+  esac
+  ncks -A -v sp toto_${year}.nc tmp2_${year}.nc
+  cdo expr,'q2=0.622*esd/(sp/100)' tmp2_${year}.nc ${fileout}_${year}.nc
+  rm -f tmp_${year}.nc tmp1_${year}.nc tmp2_${year}.nc toto_${year}.nc
+done

+ 40 - 0
prep_nem_forcings/eraint/orca1/download_u10v10.py

@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script downloads the zonal and meridional 10m height wind components
+# from the ERA-interim reanalysis on the original reduced gaussian grid which
+# was used to perform ERA-interim.
+#
+# If you use this script for the first time, you need to install you API key
+# as described here : https://software.ecmwf.int/wiki/display/WEBAPI/Accessing+ECMWF+data+servers+in+batch
+#
+# History : Virginie Guemas - Initial version                     - May 2014 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+#
+# date1 - YYYYMMDD : First date to be downloaded
+# date2 - YYYYMMDD : Last date to downloaded
+#
+# Example : ./download_u10v10.py 20140101 20140228
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+import sys
+date1=sys.argv[1]
+date2=sys.argv[2]
+
+from ecmwfapi import ECMWFDataServer
+  
+server = ECMWFDataServer()
+  
+server.retrieve({
+  'dataset' : "interim",
+  'stream'  : "oper",
+  'step'    : "0",
+  'levtype' : "sfc",
+  'class'   : "ei",
+  'date'    : date1+"/to/"+date2,
+  'time'    : "00:00:00/06:00:00/12:00:00/18:00:00",
+  'type'    : "an",
+  'param'   : "165.128/166.128",
+  'gaussian': "reduced",
+  'grid'    : "128",
+  'target'  : "output.grib"
+  })

+ 61 - 0
prep_nem_forcings/eraint/orca1/interp_eraint.sh

@@ -0,0 +1,61 @@
+#!/bin/bash
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script interpolates the erainterim forcing fields from the 1.5*1.5 grid
+# to the ORCA1 grid.
+# 
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year0=2012
+yearf=2014
+lstvars=( 't2' 'q2' 'precip' 'snow' 'qsw' 'qlw' )
+lstmembs=( 0 )
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+set -evx
+
+cp /cfu/pub/scripts/interpolation/scrip_use_extrap .
+
+for ((year=${year0};year<=${yearf};year++)) ; do 
+
+for memb in ${lstmembs[@]} ; do
+
+for var in ${lstvars[@]} ; do
+
+  case $var in
+    'u10'|'v10') mid='_fc$'${memb}'_' ;;
+    *) mid='_' ;;
+  esac
+
+  cp /cfu/scratch/vguemas/ERAint/${var}${mid}eraint_${year}.nc ${var}_eraint_${year}.nc
+  case $var in 
+    'u10'|'v10') ncpdq -O -U ${var}_eraint_${year}.nc ${var}_eraint_${year}.nc ;;
+  esac
+  case $var in
+    't2'|'q2') nstep=1460 ; method='bicubic' ; filewgts='/cfu/pub/scripts/interpolation/weigths/rmp_ERAint_ocean_to_ORCA1t_v3.2_bicubic.nc' ;;
+    'precip'|'snow') nstep=12; method='bicubic' ; filewgts='/cfu/pub/scripts/interpolation/weigths/rmp_ERAint_ocean_to_ORCA1t_v3.2_bicubic.nc' ;;
+    'qsw'|'qlw') nstep=365; method='bicubic' ; filewgts='/cfu/pub/scripts/interpolation/weigths/rmp_ERAint_ocean_to_ORCA1t_v3.2_bicubic.nc' ;;
+  esac
+    
+  cat > scrip_use_in <<EOF
+&remap_inputs
+    remap_wgt   = '${filewgts}'
+    infile      = '${var}_eraint_${year}.nc'
+    invertlat   = TRUE 
+    var         = '${var}'
+    fromregular = TRUE
+    outfile     = '${var}_eraint_${year}_orca1.nc'
+/
+EOF
+./scrip_use_extrap
+
+rm -f ${var}_eraint_${year}.nc
+ncks -O -v ${var} ${var}_eraint_${year}_orca1.nc ${var}_eraint_${year}_orca1.nc
+
+done
+
+done
+
+done
+rm -f scrip_use_in scrip_use_extrap

+ 33 - 0
prep_nem_forcings/eraint/orca1/prep_eraint_prlr_prsn_forcings.sh

@@ -0,0 +1,33 @@
+#!/bin/bash
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script combines and formates the forcing solid and liquid 
+# precipitation fields. output_step12_0h.nc and output_step12_12h.nc as 
+# described in README should be in the working directory 
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year='2013'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ncap2 -O -s "sf=double(sf)" output_step12_12h.nc output_step12_12h.nc
+ncap2 -O -s "tp=double(tp)" output_step12_12h.nc output_step12_12h.nc
+ncap2 -O -s "sf=double(sf)" output_step12_0h.nc output_step12_0h.nc
+ncap2 -O -s "tp=double(tp)" output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a add_offset,sf,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a add_offset,tp,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a scale_factor,tp,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a scale_factor,sf,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a add_offset,sf,d,d, output_step12_12h.nc output_step12_12h.nc
+ncatted -O -a add_offset,tp,d,d, output_step12_12h.nc output_step12_12h.nc
+ncatted -O -a scale_factor,sf,d,d, output_step12_12h.nc output_step12_12h.nc
+ncatted -O -a scale_factor,tp,d,d, output_step12_12h.nc output_step12_12h.nc
+cdo add output_step12_12h.nc output_step12_0h.nc output_sf_tp_int.nc
+cdo divc,86.4 output_sf_tp_int.nc  output_sf_tp.nc
+ncks -v sf output_sf_tp.nc snow_eraint_int.nc
+ncrename -v sf,snow snow_eraint_int.nc
+ncks -v tp output_sf_tp.nc precip_eraint_int.nc
+ncrename -v tp,precip precip_eraint_int.nc
+cdo shifttime,14day precip_eraint_int.nc precip_eraint_${year}.nc
+cdo shifttime,14day snow_eraint_int.nc snow_eraint_${year}.nc
+rm -f output_step12_0h.nc output_step12_12h.nc output_sf_tp_int.nc output_sf_tp.nc snow_eraint_int.nc precip_eraint_int.nc

+ 33 - 0
prep_nem_forcings/eraint/orca1/prep_eraint_qsw_qlw_forcings.sh

@@ -0,0 +1,33 @@
+#!/bin/bash
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script combines and formates the forcing shortwave and longwave 
+# radiation fields. output_step12_0h.nc and output_step12_12h.nc as 
+# described in README should be in the working directory 
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year='2013'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ncap2 -O -s "ssrd=double(ssrd)" output_step12_12h.nc output_step12_12h.nc
+ncap2 -O -s "strd=double(strd)" output_step12_12h.nc output_step12_12h.nc
+ncap2 -O -s "ssrd=double(ssrd)" output_step12_0h.nc output_step12_0h.nc
+ncap2 -O -s "strd=double(strd)" output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a add_offset,ssrd,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a add_offset,strd,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a scale_factor,strd,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a scale_factor,ssrd,d,d, output_step12_0h.nc output_step12_0h.nc
+ncatted -O -a add_offset,ssrd,d,d, output_step12_12h.nc output_step12_12h.nc
+ncatted -O -a add_offset,strd,d,d, output_step12_12h.nc output_step12_12h.nc
+ncatted -O -a scale_factor,strd,d,d, output_step12_12h.nc output_step12_12h.nc
+ncatted -O -a scale_factor,ssrd,d,d, output_step12_12h.nc output_step12_12h.nc
+cdo add output_step12_12h.nc output_step12_0h.nc output_ssrd_strd_int.nc
+cdo divc,86400 output_ssrd_strd_int.nc  output_ssrd_strd.nc
+ncks -v ssrd output_ssrd_strd.nc qsw_eraint.nc
+ncrename -v ssrd,qsw qsw_eraint.nc
+ncks -v strd output_ssrd_strd.nc qlw_eraint.nc
+ncrename -v strd,qlw qlw_eraint.nc
+cdo shifttime,-12h qlw_eraint.nc qlw_eraint_${year}.nc
+cdo shifttime,-12h qsw_eraint.nc qsw_eraint_${year}.nc
+rm -f output_step12_0h.nc output_step12_12h.nc output_ssrd_strd_int.nc output_ssrd_strd.nc qsw_eraint.nc qlw_eraint.nc

+ 26 - 0
prep_nem_forcings/eraint/orca1/prep_eraint_t2_q2_forcings.sh

@@ -0,0 +1,26 @@
+#!/bin/bash
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script combines and formates the temperature, dew point and pressure  
+# fields to build the forcing temperature and humidity fields afterwards. 
+# output_step12_0h.nc and output_step12_12h.nc as described in README should 
+# be in the working directory 
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year='2013'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ncap2 -O -s "d2m=double(d2m)" output_step0.nc output_step0.nc
+ncap2 -O -s "t2m=double(t2m)" output_step0.nc output_step0.nc
+ncap2 -O -s "sp=double(sp)" output_step0.nc output_step0.nc
+ncatted -O -a add_offset,d2m,d,d, output_step0.nc output_step0.nc
+ncatted -O -a add_offset,t2m,d,d, output_step0.nc output_step0.nc
+ncatted -O -a add_offset,sp,d,d, output_step0.nc output_step0.nc
+ncatted -O -a scale_factor,d2m,d,d, output_step0.nc output_step0.nc
+ncatted -O -a scale_factor,t2m,d,d, output_step0.nc output_step0.nc
+ncatted -O -a scale_factor,sp,d,d, output_step0.nc output_step0.nc
+ncks -v t2m output_step0.nc t2m_eraint_${year}.nc
+ncrename -v t2m,t2 t2m_eraint_${year}.nc t2_eraint_${year}.nc
+ncks -v d2m output_step0.nc d2m_eraint_${year}.nc
+ncks -v sp output_step0.nc sp_eraint_${year}.nc

+ 63 - 0
prep_nem_forcings/eraint/orca1/prep_eraint_u10_v10_forcings.sh

@@ -0,0 +1,63 @@
+#!/bin/bash
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script interpolates and rotates the 10m zonal and meridional wind
+# component to the ORCA1 grid from the output.grib coming from 
+# download_u10v10.py.
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year='2014'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+cdo -r -f nc copy output.grib u10v10_eraint_T255l.nc
+ncecat -O u10v10_eraint_T255l.nc u10v10_eraint_T255l_bis.nc
+ncpdq -O -h -a time,record u10v10_eraint_T255l_bis.nc u10v10_eraint_T255l_bis.nc
+ncrename -d record,y -d rgrid,x u10v10_eraint_T255l_bis.nc
+ncrename -v var165,u10 -v var166,v10 u10v10_eraint_T255l_bis.nc 
+ncatted -a normalization,global,m,c,"none" /cfu/pub/scripts/interpolation/weights/remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc
+ncatted -O -a map_method,global,m,c,"Distance weighted avg of nearest neighbors" remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc
+ncatted -O -a conventions,global,m,c,"SCRIP" remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc
+ncks -O -x -v src_grid_dims remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc
+ncks -A -v src_grid_dims /cfu/pub/scripts/interpolation/weights/rmp_EC-Earth_T255l_to_T106_conserv.nc remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc
+
+cat > scrip_use_in <<EOF
+&remap_inputs
+    remap_wgt   = 'remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc'
+    infile      = 'u10v10_eraint_T255l_bis.nc'
+    invertlat   = FALSE
+    var         = 'u10'
+    fromregular = FALSE
+    outfile     = 'u10_eraint_orca1t_unrotated.nc'
+/
+EOF
+cp /cfu/pub/scripts/interpolation/scrip_use scrip_use
+./scrip_use
+
+cat > scrip_use_in <<EOF
+&remap_inputs
+    remap_wgt   = 'remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc'
+    infile      = 'u10v10_eraint_T255l_bis.nc'
+    invertlat   = FALSE
+    var         = 'v10'
+    fromregular = FALSE
+    outfile     = 'v10_eraint_orca1t_unrotated.nc'
+/
+EOF
+./scrip_use
+
+cat > namelist_rotateUVorca <<EOF
+&nam_rotUV
+  Ufilein   =  'u10_eraint_orca1t_unrotated.nc'
+  Uvarin    =  'u10'
+  Vfilein   =  'v10_eraint_orca1t_unrotated.nc'
+  Vvarin    =  'v10'
+  meshmask  =  '/cfu/autosubmit/con_files/mesh_mask_nemo.N3.2_O1L42.nc'
+  Ufileout   =  'u10_eraint_${year}.nc'
+  Vfileout   =  'v10_eraint_${year}.nc'
+/
+EOF
+cp /cfu/pub/scripts/interpolation/rotateUVorca_orca1 rotateUVorca 
+./rotateUVorca
+rm -f u10v10_eraint_T255l.nc u10v10_eraint_T255l_bis.nc scrip_use scrip_use_in remap_255l_2_mask_to_orca1_z42_nomask_T_L1.nc rotateUVorca namelist_rotateUVorca u10_eraint_orca1t_unrotated.nc v10_eraint_orca1t_unrotated.nc 

+ 58 - 0
prep_nem_forcings/eraint/orca1/rotate_perturb.sh

@@ -0,0 +1,58 @@
+set -evx
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script interpolates and rotates the 10m zonal and meridional wind
+# perturbations.
+#
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+year='2014'                 # Year for which to interpolate and rotate
+dir='perturb1_era-dfs_mon'  # Directory from where to take the perturbations
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+for ((jmemb=1;jmemb<=4;jmemb++)) ; do
+
+cat > scrip_use_in <<EOF
+&remap_inputs
+    remap_wgt   = '/cfu/pub/scripts/interpolation/weights/rmp_ERAint_to_ORCA1t_unmasked_bicubic.nc'
+    infile      = '${dir}/u10_fc${jmemb}_${year}.nc'
+    invertlat   = TRUE
+    var         = 'u10'
+    fromregular = TRUE
+    outfile     = '${dir}/u10_fc${jmemb}_eraint_${year}_orca1t_unrotated.nc'
+/
+EOF
+cp /cfu/pub/scripts/interpolation/scrip_use scrip_use
+./scrip_use
+
+cat > scrip_use_in <<EOF
+&remap_inputs
+    remap_wgt   = '/cfu/pub/scripts/interpolation/weights/rmp_ERAint_to_ORCA1t_unmasked_bicubic.nc'
+    infile      = '${dir}/v10_fc${jmemb}_${year}.nc'
+    invertlat   = TRUE
+    var         = 'v10'
+    fromregular = TRUE
+    outfile     = '${dir}/v10_fc${jmemb}_eraint_${year}_orca1t_unrotated.nc'
+/
+EOF
+./scrip_use
+
+cat > namelist_rotateUVorca <<EOF
+&nam_rotUV
+  Ufilein   =  '${dir}/u10_fc${jmemb}_eraint_${year}_orca1t_unrotated.nc'
+  Uvarin    =  'u10'
+  Vfilein   =  '${dir}/v10_fc${jmemb}_eraint_${year}_orca1t_unrotated.nc'
+  Vvarin    =  'v10'
+  meshmask  =  '/cfu/autosubmit/con_files/mesh_mask_nemo.N3.2_O1L42.nc'
+  Ufileout   =  '${dir}/u10_fc${jmemb}_eraint_${year}_orca1t.nc'
+  Vfileout   =  '${dir}/v10_fc${jmemb}_eraint_${year}_orca1t.nc'
+/
+EOF
+cp /cfu/pub/scripts/interpolation/rotateUVorca_orca1 rotateUVorca
+./rotateUVorca
+
+rm -f ${dir}/u10_fc${jmemb}_eraint_${year}_orca1t_unrotated.nc ${dir}/v10_fc${jmemb}_eraint_${year}_orca1t_unrotated.nc
+
+done
+rm -f scrip_use scrip_use_in rotateUVorca namelist_rotateUVorca

+ 329 - 0
prep_nem_forcings/eraint/orca1/wndpert.sh

@@ -0,0 +1,329 @@
+#/bin/ksh
+set -evx
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This script computes daily perturbation from any NEMO forcing variable 
+# ('u10', 'v10', 't2', 'q2', 'qsw', 'qlw', 'snow', 'precip') by picking up
+# randomly monthly differences between ERAinterim and DFS4.3 forcing fields
+# and interpolating these to a daily frequency (see more details in Guemas
+# et al, 2014, Climate Dynamics)
+#
+# This script chases a fly with a flame thrower because it has been written
+# while testing many different options. It works but it should be better 
+# rewritten. 
+# 
+# History : Virginie Guemas - Initial version                         - 2012 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+lstvars=( 'u10' 'v10' )
+             # Variables for which to compute and pick up random perturbations
+lstmbs=( 1 2 3 4 )
+             # List of members for which to build pertubation files
+dfs2era='T'  # Interpolation from DFS4 to ERA (T) or from ERA to DFS (F) 
+             # to build differences and perturbations
+patherain='/cfu/scratch/vguemas/ERAint/' 
+             # Path were to find the ERAinterim forcing files
+pathdfsin='/cfu/scratch/vguemas/dfs4.3/'
+             # Path were to find the DFS4.3 forcing files 
+year0=1979   # Initial year of the reference period to compute perturbations
+yearf=2006   # Last year of the reference period to compute perturbations
+year1=2012   # First year for which to pick and build a pertubation file
+year2=2014   # Last year for which  to pick and build a perturbation file
+nstep=1460   # Number of time steps for a non-leap year : 1460 or 365
+raw='F'      # Compute the timestep differences between raw ERA and DFS fields
+             # If the script has already run once and the differences have
+             # been stored, this option can be set to F for the step of picking
+             # up random perturbations.
+ano='F'      # Compute anomalies relative to a monthly-smoothed daily climatology
+             # and the timestep differences between anomalies of ERA and DFS fields
+             # raw=T needs to have run before. If the script has already run once and 
+             # the timesteps differences have been stored, this option can be set to F. 
+reorg='F'    # Reorganize the differences timestep per timestep all the years together
+             # rather than year per year with all the timesteps. Works on anomalies
+             # if wih is set to 'ano_' and raw differences if wih is set to '_'
+             # Once the computation of differences and their reorganization has been 
+             # performed once (raw=T, ano=T, reorg=T), the results are stored in directories
+             # named according to the options set and raw, ano and reorg can be set
+             # to F for the picking up.
+wih='ano_'   # Which types needs to be picked for the perturbations
+monmean='T'  # Compute monthly mean differences and pickup perturbations from monthly mean
+             # differences rather than timestep differences 
+pickup1='T'  # Pickup random perturbations for each member, variable, year, either
+             # one per timestep or one per month
+pickup2='F'  # Pickup 2 random perturbations and average them for each member, variable,
+             # year, each timestep 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+if [[ $dfs2era == T ]] ; then
+  dif='eraint-dfs4.3'
+  dir0='era-dfs'
+else
+  dif='dfs4.3-eraint'
+  dir0='dfs-era'
+fi
+
+for var in ${lstvars[@]} ; do
+  if [[ $raw == T ]] ; then
+    for ((year=$year0;year<=$yearf;year++)) ; do
+      cp ${patherain}/${var}_eraint_${year}.nc ${var}_eraint_${year}.nc
+      ncpdq -O -U ${var}_eraint_${year}.nc ${var}_eraint_${year}.nc
+      if [[ $nstep == 1460 ]] ; then
+        case $year in
+          1980|1984|1988|1992|1996|2000|2004|2008|2012|2016) 
+          ncks -O -d time,0,232 ${var}_eraint_${year}.nc ${var}_eraint_${year}_1.nc
+          ncks -O -d time,237, ${var}_eraint_${year}.nc ${var}_eraint_${year}_2.nc
+          ncrcat -O ${var}_eraint_${year}_1.nc ${var}_eraint_${year}_2.nc ${var}_eraint_${year}.nc
+          rm -f ${var}_eraint_${year}_1.nc ${var}_eraint_${year}_2.nc      
+          ;;
+        esac
+      fi
+      if [[ $nstep == 365 ]] ; then
+        case $year in
+          1980|1984|1988|1992|1996|2000|2004|2008|2012|2016)
+          ncks -O -d time,0,58 ${var}_eraint_${year}.nc ${var}_eraint_${year}_1.nc
+          ncks -O -d time,60, ${var}_eraint_${year}.nc ${var}_eraint_${year}_2.nc
+          ncrcat -O ${var}_eraint_${year}_1.nc ${var}_eraint_${year}_2.nc ${var}_eraint_${year}.nc
+          rm -f ${var}_eraint_${year}_1.nc ${var}_eraint_${year}_2.nc
+          ;;
+        esac
+      fi 
+      cp ${pathdfsin}/${var}_DFS4.3_${year}.nc ${var}_dfs4.3_${year}.nc
+      ncks -A -v time ${var}_eraint_${year}.nc ${var}_dfs4.3_${year}.nc
+      case $var in
+        'u10'|'v10'|'t2'|'q2') cdo setgrid,r320x161 ${var}_dfs4.3_${year}.nc ${var}_dfs4.3_${year}_bis.nc ;;
+        'qsw'|'qlw'|'snow'|'precip') cdo setgrid,r192x94 ${var}_dfs4.3_${year}.nc ${var}_dfs4.3_${year}_bis.nc ;;
+      esac
+      ncks -O -x -v lat_2,lon_2 ${var}_dfs4.3_${year}_bis.nc ${var}_dfs4.3_${year}.nc
+      rm -f ${var}_dfs4.3_${year}_bis.nc
+      if [[ $dfs2era == T ]] ; then
+        cdo remapbic,r240x121 ${var}_dfs4.3_${year}.nc ${var}_dfs4.3_${year}_grideraint.nc
+        cdo invertlat ${var}_dfs4.3_${year}_grideraint.nc ${var}_dfs4.3_${year}_grideraint_bis.nc
+        rm -f ${var}_dfs4.3_${year}.nc ${var}_dfs4.3_${year}_grideraint.nc 
+        mv ${var}_dfs4.3_${year}_grideraint_bis.nc ${var}_dfs4.3_gridera_${year}.nc 
+        cdo sub ${var}_eraint_${year}.nc ${var}_dfs4.3_gridera_${year}.nc ${var}_eraint-dfs4.3_${year}.nc
+      else
+        case $var in
+          'u10'|'v10'|'t2'|'q2') cdo remapbic,r320x161 ${var}_eraint_${year}.nc ${var}_eraint_griddfs_${year}.nc ;;
+          'qsw'|'qlw'|'snow'|'precip') cdo remapbic,r192x94 ${var}_eraint_${year}.nc ${var}_eraint_griddfs_${year}.nc ;;
+        esac
+        rm -f ${var}_eraint_${year}.nc 
+        cdo sub ${var}_dfs4.3_${year}.nc ${var}_eraint_griddfs_${year}.nc ${var}_dfs4.3-eraint_${year}.nc
+      fi
+    done
+  fi
+  if [[ $ano == T ]] ; then
+    length=30
+    if [[ $dfs2era == T ]] ; then 
+      ncea -O -n $((${yearf}-${year0}+1)),4,1 ${var}_dfs4.3_gridera_${year0}.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim.nc
+      ncea -O -n $((${yearf}-${year0}+1)),4,1 ${var}_eraint_${year0}.nc ${var}_eraint_${year0}-${yearf}_clim.nc
+      ncrcat -O ${var}_dfs4.3_gridera_${year0}-${yearf}_clim.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_bis.nc
+      ncrcat -O ${var}_eraint_${year0}-${yearf}_clim.nc ${var}_eraint_${year0}-${yearf}_clim.nc ${var}_eraint_${year0}-${yearf}_clim_bis.nc
+      cdo runmean,${length} ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_bis.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed.nc
+      ncks -O -d time,$(($nstep+1)),$(($nstep+$length/2)) ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed_1.nc
+      ncks -O -d time,$(($length/2+1)),$nstep ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed_2.nc
+      ncrcat -O ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed_1.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed_2.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim.nc
+      rm -f ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_bis.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed_1.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim_smoothed_2.nc
+      cdo runmean,${length} ${var}_eraint_${year0}-${yearf}_clim_bis.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed.nc
+      ncks -O -d time,$(($nstep+1)),$(($nstep+$length/2)) ${var}_eraint_${year0}-${yearf}_clim_smoothed.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed_1.nc
+      ncks -O -d time,$(($length/2+1)),$nstep ${var}_eraint_${year0}-${yearf}_clim_smoothed.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed_2.nc
+      ncrcat -O ${var}_eraint_${year0}-${yearf}_clim_smoothed_1.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed_2.nc ${var}_eraint_${year0}-${yearf}_clim.nc
+      rm -f ${var}_eraint_${year0}-${yearf}_clim_bis.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed_1.nc ${var}_eraint_${year0}-${yearf}_clim_smoothed_2.nc
+      for ((year=$year0;year<=$yearf;year++)) ; do
+        cdo sub ${var}_eraint_${year}.nc ${var}_eraint_${year0}-${yearf}_clim.nc ${var}_ano_eraint_${year}.nc
+        cdo sub ${var}_dfs4.3_gridera_${year}.nc ${var}_dfs4.3_gridera_${year0}-${yearf}_clim.nc ${var}_ano_dfs4.3_gridera_${year}.nc
+        cdo sub ${var}_ano_eraint_${year}.nc ${var}_ano_dfs4.3_gridera_${year}.nc ${var}_ano_eraint-dfs4.3_${year}.nc
+        rm -f ${var}_eraint_${year}.nc ${var}_dfs4.3_gridera_${year}.nc ${var}_ano_eraint_${year}.nc ${var}_ano_dfs4.3_gridera_${year}.nc
+      done
+    else
+      ncea -O -n $((${yearf}-${year0}+1)),4,1 ${var}_dfs4.3_${year0}.nc ${var}_dfs4.3_${year0}-${yearf}_clim.nc
+      ncea -O -n $((${yearf}-${year0}+1)),4,1 ${var}_eraint_griddfs_${year0}.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim.nc
+      ncrcat -O ${var}_dfs4.3_${year0}-${yearf}_clim.nc ${var}_dfs4.3_${year0}-${yearf}_clim.nc ${var}_dfs4.3_${year0}-${yearf}_clim_bis.nc
+      ncrcat -O ${var}_eraint_griddfs_${year0}-${yearf}_clim.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_bis.nc
+      cdo runmean,${length} ${var}_dfs4.3_${year0}-${yearf}_clim_bis.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed.nc
+      ncks -O -d time,$(($nstep+1)),$(($nstep+$length/2)) ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed_1.nc
+      ncks -O -d time,$(($length/2+1)),$nstep ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed_2.nc
+      ncrcat -O ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed_1.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed_2.nc ${var}_dfs4.3_${year0}-${yearf}_clim.nc
+      rm -f ${var}_dfs4.3_${year0}-${yearf}_clim_bis.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed_1.nc ${var}_dfs4.3_${year0}-${yearf}_clim_smoothed_2.nc
+      cdo runmean,${length} ${var}_eraint_griddfs_${year0}-${yearf}_clim_bis.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed.nc
+      ncks -O -d time,$(($nstep+1)),$(($nstep+$length/2)) ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed_1.nc
+      ncks -O -d time,$(($length/2+1)),$nstep ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed_2.nc
+      ncrcat -O ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed_1.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed_2.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim.nc
+      rm -f ${var}_eraint_griddfs_${year0}-${yearf}_clim_bis.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed_1.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim_smoothed_2.nc
+      for ((year=$year0;year<=$yearf;year++)) ; do
+        cdo sub ${var}_eraint_griddfs_${year}.nc ${var}_eraint_griddfs_${year0}-${yearf}_clim.nc ${var}_ano_eraint_griddfs_${year}.nc
+        cdo sub ${var}_dfs4.3_${year}.nc ${var}_dfs4.3_${year0}-${yearf}_clim.nc ${var}_ano_dfs4.3_${year}.nc
+        cdo sub ${var}_ano_dfs4.3_${year}.nc ${var}_ano_eraint_griddfs_${year}.nc ${var}_ano_dfs4.3-eraint_${year}.nc
+        rm -f ${var}_eraint_griddfs_${year}.nc ${var}_dfs4.3_${year}.nc ${var}_ano_eraint_griddfs_${year}.nc ${var}_ano_dfs4.3_${year}.nc
+      done
+    fi
+  fi
+  if [[ $reorg == T ]] ; then
+    for ((jt=1;jt<=$nstep;jt++)) ; do
+      ncrcat -O  -d time,$((jt-1)),,$nstep -n $((yearf-year0+1)),4,1 ${var}_${wih}${dif}_${year0}.nc ${var}_${wih}${dif}_${year0}-${yearf}_$jt.nc
+    done
+    mkdir -p ${wih}${dir0} 
+    mv ${var}_${wih}${dif}_* ${wih}${dir0}/.
+  fi
+  if [[ $raw == T && $ano == F ]] ; then
+    for ((year=$year0;year<=$yearf;year++)) ; do
+      if [[ $dfs2era == T ]] ; then
+        rm -f ${var}_eraint_${year}.nc ${var}_dfs4.3_gridera_${year}.nc
+      else
+        rm -f ${var}_eraint_griddfs_${year}.nc ${var}_dfs4.3_${year}.nc
+      fi
+    done
+  fi
+
+  if [[ $monmean == T ]] ; then
+    jt0=1
+    for ((mon=1;mon<=12;mon++)) ; do
+      case $mon in 
+        1|3|5|7|8|10|12) ndays=31 ;;
+        4|6|9|11) ndays=30 ;;    
+        2) ndays=28 ;;
+      esac
+      lstfiles=""
+      case $nstep in 
+        1460) ntimes=$((ndays*4+jt0-1)) ;;
+        365) ntimes=$((ndays+jt0-1)) ;;
+        12) ntimes=1 ;;
+      esac
+      for ((jt=$jt0;jt<=$ntimes;jt++)) ;do
+        lstfiles=$lstfiles" "${wih}${dir0}/${var}_${wih}${dif}_${year0}-${yearf}_$jt.nc
+      done
+      ncea -O $lstfiles ${wih}${dir0}/${var}_${wih}${dif}_${year0}-${yearf}_mon$(printf "%02d" ${mon}).nc
+      case $nstep in
+        1460) jt0=$((${jt0}+ndays*4)) ;;
+        365) jt0=$((${jt0}+ndays)) ;;
+        12) jt0=$((${jt0}+1)) ;;
+      esac
+    done
+  fi
+done
+
+if [[ $pickup1 == T ]] ; then
+  if [[ $monmean == T ]] ; then
+    dirpert=perturb1_${dir0}_mon
+    nstep=12
+  else
+    dirpert=perturb1_${dir0}
+  fi
+  mkdir -p $dirpert
+  RANGE=$((${yearf}-${year0}+1))
+  for ((jt=1;jt<=$nstep;jt++)) ; do
+    for memb in ${lstmbs[@]} ; do
+      for ((year=$year1;year<=$year2;year++)) ; do
+        number=$((RANDOM%${RANGE}))
+        for var in ${lstvars[@]} ; do
+          if [[ $monmean == T ]] ; then
+            filein=${var}_${wih}${dif}_${year0}-${yearf}_mon$(printf "%02d" ${jt}).nc
+          else
+            filein=${var}_${wih}${dif}_${year0}-${yearf}_$jt.nc
+          fi
+          ncks -O -d time,$number ${wih}${dir0}/${filein} ${dirpert}/${var}_fc${memb}_${year}_$(printf "%04d" $jt).nc
+        done
+      done
+    done
+  done
+fi
+
+if [[ $pickup2 == T ]] ; then
+  mkdir -p perturb2_${dir0}
+  RANGE=$((${yearf}-${year0}+1))
+  for ((jt=1;jt<=$nstep;jt++)) ; do
+    for memb in ${lstmbs[@]} ; do
+      for ((year=$year1;year<=$year2;year++)) ; do
+        for ((jlaunch=1;jlaunch<=2;jlaunch++)) ; do
+          number=$((RANDOM%${RANGE}))
+          for var in ${lstvars[@]} ; do
+            ncks -O -d time,$number ${wih}${dir0}/${var}_${wih}${dif}_${year0}-${yearf}_$jt.nc perturb2_${dir0}/${var}_fc${memb}_${year}_$(printf "%04d" $jt)_$jlaunch.nc
+          done
+        done
+        for var in ${lstvars[@]} ; do
+          ncea -O perturb2_${dir0}/${var}_fc${memb}_${year}_$(printf "%04d" $jt)_1.nc perturb2_${dir0}/${var}_fc${memb}_${year}_$(printf "%04d" $jt)_2.nc perturb2_${dir0}/${var}_fc${memb}_${year}_$(printf "%04d" $jt).nc
+          rm -f perturb2_${dir0}/${var}_fc${memb}_${year}_$(printf "%04d" $jt)_1.nc perturb2_${dir0}/${var}_fc${memb}_${year}_$(printf "%04d" $jt)_2.nc
+        done
+      done
+    done
+  done
+fi
+
+if [[ $pickup1 == T || $pickup2 == T ]] ; then
+  lstdirs=""
+  if [[ $pickup1 == T ]]  ; then
+    if [[ $monmean == T ]] ; then
+      lstdirs=$lstdirs" "perturb1_${dir0}_mon
+    else
+      lstdirs=$lstdirs" "perturb1_${dir0}
+    fi
+  fi
+  if [[ $pickup2 == T ]]  ; then
+    lstdirs=$lstdirs" "perturb2_${dir0}
+  fi
+  for dir in $lstdirs ; do
+  for memb in ${lstmbs[@]} ; do
+    for ((year=$year1;year<=$year2;year++)) ; do
+      for var in ${lstvars[@]} ; do
+        ncrcat -O -n $nstep,4,1 ${dir}/${var}_fc${memb}_${year}_0001.nc ${dir}/${var}_fc${memb}_${year}.nc
+        rm -f ${dir}/${var}_fc${memb}_${year}_*.nc
+        if [[ $monmean == T ]] ; then
+          cdo settaxis,${year}-01-15,12:00,1mon ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_bis.nc
+          rm ${dir}/${var}_fc${memb}_${year}.nc 
+          mv ${dir}/${var}_fc${memb}_${year}_bis.nc ${dir}/${var}_fc${memb}_${year}.nc
+        fi
+        if [[ $dfs2era == T && $monmean == F ]] ; then
+          case $year in 
+            1980|1984|1988|1992|1996|2000|2004|2008|2012|2016|2020)
+            case $nstep in
+             1460)
+              ncks -O -d time,0,232 ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_1.nc
+              ncks -O -d time,229,232 ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_2.nc
+              ncks -O -d time,233, ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_3.nc
+              ;;
+             365)
+              ncks -O -d time,0,58 ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_1.nc
+              ncks -O -d time,58 ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_2.nc
+              ncks -O -d time,59, ${dir}/${var}_fc${memb}_${year}.nc ${dir}/${var}_fc${memb}_${year}_3.nc
+              ;;
+            esac
+            ncrcat -O ${dir}/${var}_fc${memb}_${year}_1.nc ${dir}/${var}_fc${memb}_${year}_2.nc ${dir}/${var}_fc${memb}_${year}_3.nc ${dir}/${var}_fc${memb}_${year}.nc
+            rm -f ${dir}/${var}_fc${memb}_${year}_1.nc ${dir}/${var}_fc${memb}_${year}_2.nc ${dir}/${var}_fc${memb}_${year}_3.nc
+          esac
+        fi
+      done
+    done
+  done
+  done
+fi
+     
+if [[ $monmean == T ]] ; then
+  for memb in ${lstmbs[@]} ; do
+    for var in ${lstvars[@]} ; do
+      ncrcat -O -n $((${year2}-${year1}+1)),4,1 perturb1_${dir0}_mon/${var}_fc${memb}_${year1}.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year1}-${year2}.nc
+      cdo inttime,${year1}-01-15,12:00,6hour perturb1_${dir0}_mon/${var}_fc${memb}_${year1}-${year2}.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year1}-${year2}_6hourly.nc
+      rm -f perturb1_${dir0}_mon/${var}_fc${memb}_${year1}-${year2}.nc
+      cdo splityear perturb1_${dir0}_mon/${var}_fc${memb}_${year1}-${year2}_6hourly.nc perturb1_${dir0}_mon/${var}_fc${memb}_
+      rm -f perturb1_${dir0}_mon/${var}_fc${memb}_${year1}-${year2}_6hourly.nc
+      if [[ $dfs2era == F ]] ; then
+        lstyears=( 1960 1964 1968 1972 1976 1980 1984 1988 1992 1996 2000 2004 2008 2012 2016 2020 )
+        for year in ${lstyears[@]} ; do
+          if [[ -e perturb1_${dir0}_mon/${var}_fc${memb}_${year}.nc ]] ; then 
+            case $nstep in
+             1460)
+              ncks -O -d time,0,232 perturb1_${dir0}_mon/${var}_fc${memb}_${year}.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}_1.nc
+              ncks -O -d time,237, perturb1_${dir0}_mon/${var}_fc${memb}_${year}.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}_2.nc
+              ;;
+             365)
+              ncks -O -d time,0,58 perturb1_${dir0}_mon/${var}_fc${memb}_${year}.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}_1.nc
+              ncks -O -d time,60, perturb1_${dir0}_mon/${var}_fc${memb}_${year}.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}_2.nc
+              ;;
+            esac
+            ncrcat -O perturb1_${dir0}_mon/${var}_fc${memb}_${year}_1.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}_2.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}.nc
+            rm -f perturb1_${dir0}_mon/${var}_fc${memb}_${year}_1.nc perturb1_${dir0}_mon/${var}_fc${memb}_${year}_2.nc  
+          fi
+        done
+      fi
+    done
+  done
+fi
+ 

+ 20 - 0
prep_restarts/README

@@ -0,0 +1,20 @@
+In this directory, you can find:
+
+1) a script to interpolate and extrapolate vertically 
+ocean restarts to a new grid and to fill empty seas
+with a climatology : 
+script_interp_vert_ocean_restart.bash 
+
+2) a script to build configuration files for the
+script_interp_vert_ocean_restart.bash script:
+build_Tofill.bash 
+(see instructions in script_interp_vert_ocean_restart.bash)
+
+3) s script to compute a climatology of restarts:
+script_clim_restart.sh
+
+4) a library of python tools used in 
+script_interp_vert_ocean_restart.bash
+
+5) an example script of how to use 
+script_interp_vert_ocean_restart.bash

+ 51 - 0
prep_restarts/build_Tofill.bash

@@ -0,0 +1,51 @@
+#!/bin/bash
+set -evx 
+# 
+# This script prepares the Tofill.nc file to be split afterwards between
+# Tofill.clim.nc and Tofill.extrap.nc
+#
+# History : Virginie Guemas - Initial version                     March 2014  
+#
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+restart=$1 # Output restart after skipping the extrapolation and filling of
+           # empty seas to be able to compute Tofill.nc
+meshmask=$2 # Mask of the grid that restart should have after extrapolation 
+            # and filling of empty seas
+Tofill=$3 # Name of the output Tofill.nc file
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+lstmasks=('tmask' 'umask' 'vmask' 'fmask')
+
+cdo setmisstoc,1e19 $1 tmp1.nc
+cdo gtc,1e18 tmp1.nc tmp2.nc
+
+for mask in ${lstmasks[@]} ; do
+  case $mask in
+    'tmask' ) var='tn';;
+    'umask' ) var='un';;
+    'vmask' ) var='vn';;
+    'fmask' ) var='rotn';;
+  esac
+
+  cdo selvar,$var tmp2.nc tmp2_${var}.nc 
+  cdo add -chvar,$var,$mask tmp2_${var}.nc -selvar,$mask $2 sum.nc
+  cdo gtc,1.5 sum.nc Tofill_${mask}.nc
+  rm -f tmp2_${var}.nc sum.nc
+  
+  if [ -e $3 ] ; then
+    ncks -A Tofill_${mask}.nc $3 
+    rm -f Tofill_${mask}.nc
+  else
+    mv Tofill_${mask}.nc $3
+  fi  
+
+done
+rm -f tmp1.nc tmp2.nc
+
+echo "You have just built Tofill.nc : "$3
+echo "Now you can use :"
+echo "    1) cdo setcindexbox,c,idx1,idx2,idy1,idy2 ifile ofile to set seas at 0"
+echo "       and build Tofill.extrap.nc"
+echo "    2) cdo sub to get Tofill.clim.nc from Tofill.nc and Tofill.extrap.nc"

+ 27 - 0
prep_restarts/comp_rhop/check_stat.f90

@@ -0,0 +1,27 @@
+SUBROUTINE check_stat(Kstat,Hmessage)
+!
+! ============================================================================
+! This routine check if there is errors during Netcdf interfacing and in case 
+! of error write the error message.
+!
+! Author  : Virginie Guemas
+! Created : January 2007
+! ============================================================================
+
+      IMPLICIT NONE 
+
+      INCLUDE'netcdf.inc'
+      INTEGER,INTENT(IN)      :: Kstat
+      CHARACTER*60,INTENT(IN) :: Hmessage 
+!
+! ============================================================================
+!
+      IF ( Kstat /= NF_NOERR ) THEN
+        PRINT*,NF_STRERROR(Kstat)      
+        PRINT*,'ERROR in Netcdf: '//TRIM(Hmessage)
+        STOP
+      END IF    
+!
+! ============================================================================
+!
+END SUBROUTINE check_stat

+ 143 - 0
prep_restarts/comp_rhop/comp_rhop.f90

@@ -0,0 +1,143 @@
+program comp_rhop
+!
+! =============================================================================
+! 
+!  This program computes the potential density rhop based on the equation 
+!       used in NEMO. Beware that the configuration is hard-coded. LX, LY
+!       LZ, LT need to be changed for each new configuration and the program
+!       needs to be recompiled with the makefile provided.
+!
+!  Usage ./comp_rhop with a namelist called namelist_rhop:
+!              &density 
+!                input_file = ''
+!                input_Tvar = ''
+!                input_Svar = ''
+!                input_Dvar = ''
+!                outfile    = ''
+!              /
+! 
+!  History : Virginie Guemas - Initial version PhD tools       - February 2008
+!                            - Namelist and remove hard-coding - April    2014
+!  
+! =============================================================================
+!
+      USE eosbn2        ! Routine from NEMO to compute rhop
+      IMPLICIT NONE
+      INCLUDE 'netcdf.inc'
+
+      CHARACTER(80) :: &
+        & input_file, & ! Input netcdf file name
+        & input_Tvar, & ! Input netcdf potential temperature variable name
+        & input_Svar, & ! Input netcdf salinity variable name
+        & input_Dvar, & ! Input netcdf depth variable name
+        & outfile       ! Ouput netcdf file name
+
+      NAMELIST/density/input_file,input_Tvar,input_Svar,input_Dvar,outfile
+      
+      INTEGER,PARAMETER :: inam=1
+      INTEGER :: ncid, varid, istat
+      INTEGER,DIMENSION(4) :: dimoutids
+      INTEGER,PARAMETER :: LX=1442,LY=1021,LZ=46,LT=1
+        ! These dimensions need to be changed also in eosbn2.f90 for each new
+        ! configuration
+
+      DOUBLE PRECISION,DIMENSION(LX,LY,LZ,LT)   :: PT,PS
+      DOUBLE PRECISION,DIMENSION(LZ)            :: PD
+      DOUBLE PRECISION,DIMENSION(LX,LY,LZ,LT)   :: PRHO
+      DOUBLE PRECISION,DIMENSION(LX,LY,LZ)      :: MASK
+      DOUBLE PRECISION,DIMENSION(LX,LY,LZ,LT)   :: Zrho_smow,PO
+      INTEGER :: JI,JJ,JK,JT  
+      
+!
+! =============================================================================
+!
+! Read the namelist
+! ==================
+
+      OPEN(UNIT=inam,FILE='namelist_rhop',FORM='FORMATTED',ACCESS=&
+           &'SEQUENTIAL',STATUS='OLD',IOSTAT=istat)
+      IF (istat /=0 ) THEN
+        PRINT*,'IOSTAT = ',istat
+        STOP"Problem opening namelist_rhop"
+      END IF
+
+      REWIND(inam)
+      READ(UNIT=inam,NML=density)
+
+
+! Read input thermodynamic fields
+! =================================
+!
+      istat = NF_OPEN (input_file,0,ncid )
+      call check_stat(istat,'Opening input file')
+
+      istat = NF_INQ_VARID(ncid,input_Tvar,varid)
+      call check_stat(istat,'Finding input temperature variable')
+
+      istat = NF_GET_VAR_DOUBLE(ncid,varid,PT)
+      call check_stat(istat,'Reading input temperature')
+
+      istat = NF_INQ_VARID(ncid,input_Svar,varid)
+      call check_stat(istat,'Finding input salinity variable')
+
+      istat = NF_GET_VAR_DOUBLE(ncid,varid,PS)
+      call check_stat(istat,'Reading input temperature')
+
+      istat = NF_INQ_VARID(ncid,input_Dvar,varid)
+      call check_stat(istat,'Finding input depth variable')
+
+      istat = NF_GET_VAR_DOUBLE(ncid,varid,PD)
+      call check_stat(istat,'Reading input depth')
+
+      istat = NF_CLOSE ( ncid )
+      call check_stat(istat,'Closing input file')
+!
+! compute potential density
+!   
+      MASK = 1.
+      WHERE(PT(:,:,:,1) > 1e19 .or. PT(:,:,:,1)< -9e30)
+        MASK = 0.
+      END WHERE
+     
+      DO JT = 1,LT
+        call eos_insitu_pot( PT(:,:,:,JT), PS(:,:,:,JT), PD, MASK, &
+        &PRHO(:,:,:,JT) )
+        WHERE (MASK < 0.5)
+          PRHO(:,:,:,JT) = 1e20
+        END WHERE
+      END DO
+!
+! Write outputs
+! ==============
+!
+      istat = NF_CREATE(outfile,NF_NOCLOBBER,ncid)
+      call check_stat(istat,'Opening output file')
+
+      istat = NF_DEF_DIM(ncid,'x',LX,dimoutids(1))
+      call check_stat(istat,'Defining x dimension in output file')
+
+      istat = NF_DEF_DIM(ncid,'y',LY,dimoutids(2))
+      call check_stat(istat,'Defining y dimension in output file')
+
+      istat = NF_DEF_DIM(ncid,'z',LZ,dimoutids(3))
+      call check_stat(istat,'Defining z dimension in output file')
+
+      istat = NF_DEF_DIM(ncid,'t',LT,dimoutids(4))
+      call check_stat(istat,'Defining t dimension in output file')
+
+      istat = NF_DEF_VAR(ncid,'rhop',NF_DOUBLE,4,dimoutids,varid)
+      call check_stat(istat,'Defining rhop variable in output file')
+
+      istat = NF_PUT_ATT_DOUBLE(ncid,varid,'missing_value',NF_DOUBLE,1,DBLE(1e20))
+      call check_stat(istat,'Defining missing value for rhop')
+
+      istat = NF_ENDDEF(ncid)
+      call check_stat(istat,'Closing output file definition')
+
+      istat = NF_PUT_VAR_DOUBLE(ncid,varid,PRHO)
+      call check_stat(istat,'Writing rhop in output file')
+
+      istat = NF_CLOSE ( ncid )
+      call check_stat(istat,'Closing output file')
+
+end program comp_rhop

+ 185 - 0
prep_restarts/comp_rhop/eosbn2.f90

@@ -0,0 +1,185 @@
+MODULE eosbn2
+   !!==============================================================================
+   !!                       ***  MODULE  eosbn2  ***
+   !! Ocean diagnostic variable : equation of state - in situ and potential density
+   !!                                               - Brunt-Vaisala frequency 
+   !!==============================================================================
+   !! History :  OPA  ! 1989-03  (O. Marti)  Original code
+   !!            6.0  ! 1994-07  (G. Madec, M. Imbard)  add bn2
+   !!            6.0  ! 1994-08  (G. Madec)  Add Jackett & McDougall eos
+   !!            7.0  ! 1996-01  (G. Madec)  statement function for e3
+   !!            8.1  ! 1997-07  (G. Madec)  density instead of volumic mass
+   !!             -   ! 1999-02  (G. Madec, N. Grima) semi-implicit pressure gradient
+   !!            8.2  ! 2001-09  (M. Ben Jelloul)  bugfix on linear eos
+   !!   NEMO     1.0  ! 2002-10  (G. Madec)  add eos_init
+   !!             -   ! 2002-11  (G. Madec, A. Bozec)  partial step, eos_insitu_2d
+   !!             -   ! 2003-08  (G. Madec)  F90, free form
+   !!            3.0  ! 2006-08  (G. Madec)  add tfreez function
+   !!            3.3  ! 2010-05  (C. Ethe, G. Madec)  merge TRC-TRA
+   !!             -   ! 2010-10  (G. Nurser, G. Madec)  add eos_alpbet used in ldfslp
+   !!----------------------------------------------------------------------
+
+   !!----------------------------------------------------------------------
+   !!   eos            : generic interface of the equation of state
+   !!   eos_insitu     : Compute the in situ density
+   !!   eos_insitu_pot : Compute the insitu and surface referenced potential
+   !!                    volumic mass
+   !!   eos_insitu_2d  : Compute the in situ density for 2d fields
+   !!   eos_bn2        : Compute the Brunt-Vaisala frequency
+   !!   eos_alpbet     : calculates the in situ thermal and haline expansion coeff.
+   !!   tfreez         : Compute the surface freezing temperature
+   !!   eos_init       : set eos parameters (namelist)
+   !!----------------------------------------------------------------------
+!   USE dom_oce         ! ocean space and time domain
+!   USE phycst          ! physical constants
+!   USE zdfddm          ! vertical physics: double diffusion
+!   USE in_out_manager  ! I/O manager
+!   USE lib_mpp         ! MPP library
+!   USE prtctl          ! Print control
+
+   IMPLICIT NONE
+
+   !                   !! * Interface 
+   !                                          !!* Namelist (nameos) *
+   INTEGER ::   nn_eos   = 0         !: = 0/1/2 type of eq. of state and Brunt-Vaisala frequ.
+   DOUBLE PRECISION ::   rn_alpha = 2.0e-4    !: thermal expension coeff. (linear equation of state)
+   DOUBLE PRECISION ::   rn_beta  = 7.7e-4    !: saline  expension coeff. (linear equation of state)
+
+   DOUBLE PRECISION ::   ralpbet              !: alpha / beta ratio
+   INTEGER, PARAMETER :: jpi=1442, jpj=1021, jpk=46, jpts=2
+   DOUBLE PRECISION :: rau0 = 1035 
+   
+CONTAINS
+
+   SUBROUTINE eos_insitu_pot( pt, ps, pd, pmask, prhop )
+      !!----------------------------------------------------------------------
+      !!                  ***  ROUTINE eos_insitu_pot  ***
+      !!           
+      !! ** Purpose :   Compute the in situ density (ratio rho/rau0) and the
+      !!      potential volumic mass (Kg/m3) from potential temperature and
+      !!      salinity fields using an equation of state defined through the 
+      !!     namelist parameter nn_eos.
+      !!
+      !! ** Method  :
+      !!      nn_eos = 0 : Jackett and McDougall (1994) equation of state.
+      !!         the in situ density is computed directly as a function of
+      !!         potential temperature relative to the surface (the opa t
+      !!         variable), salt and pressure (assuming no pressure variation
+      !!         along geopotential surfaces, i.e. the pressure p in decibars
+      !!         is approximated by the depth in meters.
+      !!              prd(t,s,p) = ( rho(t,s,p) - rau0 ) / rau0
+      !!              rhop(t,s)  = rho(t,s,0)
+      !!         with pressure                      p        decibars
+      !!              potential temperature         t        deg celsius
+      !!              salinity                      s        psu
+      !!              reference volumic mass        rau0     kg/m**3
+      !!              in situ volumic mass          rho      kg/m**3
+      !!              in situ density anomalie      prd      no units
+      !!
+      !!         Check value: rho = 1060.93298 kg/m**3 for p=10000 dbar,
+      !!          t = 40 deg celcius, s=40 psu
+      !!
+      !!      nn_eos = 1 : linear equation of state function of temperature only
+      !!              prd(t) = ( rho(t) - rau0 ) / rau0 = 0.028 - rn_alpha * t
+      !!              rhop(t,s)  = rho(t,s)
+      !!
+      !!      nn_eos = 2 : linear equation of state function of temperature and
+      !!               salinity
+      !!              prd(t,s) = ( rho(t,s) - rau0 ) / rau0 
+      !!                       = rn_beta * s - rn_alpha * tn - 1.
+      !!              rhop(t,s)  = rho(t,s)
+      !!      Note that no boundary condition problem occurs in this routine
+      !!      as (tn,sn) or (ta,sa) are defined over the whole domain.
+      !!
+      !! ** Action  : - prd  , the in situ density (no units)
+      !!              - prhop, the potential volumic mass (Kg/m3)
+      !!
+      !! References :   Jackett and McDougall, J. Atmos. Ocean. Tech., 1994
+      !!                Brown and Campana, Mon. Weather Rev., 1978
+      !!----------------------------------------------------------------------
+      !!
+      DOUBLE PRECISION, DIMENSION(jpi,jpj,jpk     ), INTENT(   in) ::   pt    !  potential temperature  [Celcius]
+      DOUBLE PRECISION, DIMENSION(jpi,jpj,jpk     ), INTENT(   in) ::   ps    !  salinity               [psu]
+      DOUBLE PRECISION, DIMENSION(jpi,jpj,jpk     ), INTENT(   in) ::   pmask
+      DOUBLE PRECISION, DIMENSION(jpk             ), INTENT(   in) ::   pd   
+      DOUBLE PRECISION, DIMENSION(jpi,jpj,jpk     ), INTENT(  out) ::   prhop  ! potential density (surface referenced)
+      !
+      INTEGER  ::   ji, jj, jk   ! dummy loop indices
+      DOUBLE PRECISION, DIMENSION(jpi,jpj,jpk     ) ::   zrd    ! in situ density            [-]
+      DOUBLE PRECISION, DIMENSION(jpi,jpj,jpk     ) :: zws
+      DOUBLE PRECISION ::   zt, zs, zh, zsr, zr1, zr2, zr3, zr4, zrhop, ze, zbw   ! local scalars
+      DOUBLE PRECISION ::   zb, zd, zc, zaw, za, zb1, za1, zkw, zk0, zrau0r       !   -      -
+      !!----------------------------------------------------------------------
+
+      SELECT CASE ( nn_eos )
+      !
+      CASE( 0 )                !==  Jackett and McDougall (1994) formulation  ==!
+         zrau0r = 1.e0 / rau0
+!CDIR NOVERRCHK
+         zws(:,:,:) = SQRT( ABS( ps(:,:,:) ) )
+         !  
+         DO jk = 1, (jpk-1)
+            DO jj = 1, jpj
+               DO ji = 1, jpi
+                  zt = pt    (ji,jj,jk)
+                  zs = ps    (ji,jj,jk)
+                  zh = pd    (jk)        ! depth
+                  zsr= zws   (ji,jj,jk)        ! square root salinity
+                  !
+                  ! compute volumic mass pure water at atm pressure
+                  zr1= ( ( ( ( 6.536332e-9*zt-1.120083e-6 )*zt+1.001685e-4 )*zt   &
+                     &                          -9.095290e-3 )*zt+6.793952e-2 )*zt+999.842594
+                  ! seawater volumic mass atm pressure
+                  zr2= ( ( ( 5.3875e-9*zt-8.2467e-7 ) *zt+7.6438e-5 ) *zt   &
+                     &                                         -4.0899e-3 ) *zt+0.824493
+                  zr3= ( -1.6546e-6*zt+1.0227e-4 )    *zt-5.72466e-3
+                  zr4= 4.8314e-4
+                  !
+                  ! potential volumic mass (reference to the surface)
+                  zrhop= ( zr4*zs + zr3*zsr + zr2 ) *zs + zr1
+                  !
+                  ! save potential volumic mass
+                  prhop(ji,jj,jk) = zrhop * pmask(ji,jj,jk)
+                  !
+                  ! add the compression terms
+                  ze = ( -3.508914e-8*zt-1.248266e-8 ) *zt-2.595994e-6
+                  zbw= (  1.296821e-6*zt-5.782165e-9 ) *zt+1.045941e-4
+                  zb = zbw + ze * zs
+                  !
+                  zd = -2.042967e-2
+                  zc =   (-7.267926e-5*zt+2.598241e-3 ) *zt+0.1571896
+                  zaw= ( ( 5.939910e-6*zt+2.512549e-3 ) *zt-0.1028859 ) *zt - 4.721788
+                  za = ( zd*zsr + zc ) *zs + zaw
+                  !
+                  zb1=   (  -0.1909078  *zt+7.390729    ) *zt-55.87545
+                  za1= ( (   2.326469e-3*zt+1.553190    ) *zt-65.00517 ) *zt + 1044.077
+                  zkw= ( ( (-1.361629e-4*zt-1.852732e-2 ) *zt-30.41638 ) *zt + 2098.925 ) *zt+190925.6
+                  zk0= ( zb1*zsr + za1 )*zs + zkw
+                  !
+                  ! masked in situ density anomaly
+                  zrd(ji,jj,jk) = (  zrhop / (  1.0 - zh / ( zk0 - zh * ( za - zh * zb ) )  )    &
+                     &             - rau0  ) * zrau0r * pmask(ji,jj,jk)
+               END DO
+            END DO
+         END DO
+         !
+      CASE( 1 )                !==  Linear formulation = F( temperature )  ==!
+         DO jk = 1, (jpk-1)
+            zrd  (:,:,jk) = ( 0.0285 - rn_alpha * pt (:,:,jk) )        * pmask(:,:,jk)
+            prhop(:,:,jk) = ( 1.e0   +            zrd (:,:,jk)       ) * rau0 * pmask(:,:,jk)
+         END DO
+         !
+      CASE( 2 )                !==  Linear formulation = F( temperature , salinity )  ==!
+         DO jk = 1, (jpk-1)
+            zrd  (:,:,jk) = ( rn_beta  * ps(:,:,jk) - rn_alpha * pt(:,:,jk) )        * pmask(:,:,jk)
+            prhop(:,:,jk) = ( 1.e0  + zrd (:,:,jk) )                                       * rau0 * pmask(:,:,jk)
+         END DO
+         !
+      END SELECT
+      !
+      !
+   END SUBROUTINE eos_insitu_pot
+
+
+   !!======================================================================
+END MODULE eosbn2  

+ 25 - 0
prep_restarts/comp_rhop/makefile

@@ -0,0 +1,25 @@
+#!/bin/csh
+#
+# Makefile for potential density computation
+# Virginie Guemas - 14 April 2014
+#
+COMPILE = gfortran
+FLAGS = -O3 -mcmodel=medium -I/usr/include -L/usr/lib
+LIB  =  -lnetcdf -lnetcdff
+INCLUDE =
+EXEC = comp_rhop  
+SRC  = eosbn2.f90 check_stat.f90  comp_rhop.f90
+
+.SUFFIXES : .f90 .o .F90
+
+.f90.o:
+	$(COMPILE) $(FLAGS) -c $(LIB) $<
+
+all: $(EXEC) clean
+
+$(EXEC): $(SRC:.f90=.o)
+	$(COMPILE) ${FLAGS} $(LIB) -o $@ \
+	$(SRC:.f90=.o) ${FLAGS} 
+
+clean: 	
+	rm -f *.o *.mod

+ 113 - 0
prep_restarts/dispatch.bash

@@ -0,0 +1,113 @@
+#!/bin/bash
+#
+# Given a tarred restart file, puts in on the local repository
+# 
+# Author: F. Massonnet
+# Date  : November 2016
+
+set -o nounset
+set -o errexit
+set -x
+
+if [ $# == 0 ]
+then
+  echo "dispatch.bash restart-file.tar"
+  echo "EXAMPLE: "
+  echo "dispatch.bash /esnas/exp/nemo/original_files/a05p/restart_files/a05p/19580101/fc00/restarts/RESTO_a05p_19580101_fc00_32_19890101-19891231.tar"
+  echo ""
+  exit
+fi
+
+rfile=$1
+
+workdir=/scratch/Earth/$USER/TMP_24881
+
+mkdir -p $workdir
+echo "WORKDIR >>>>>      $workdir      <<<<<<"
+
+if [ ! -f $rfile ]
+then
+  echo "$rfile not found"
+  exit
+fi
+
+# Obtain some information...
+fbase=`basename $rfile`
+exp=$(echo $fbase   | cut -d'_' -f 2)
+sdate=$(echo $fbase | cut -d'_' -f 3)
+memb=$(echo $fbase  | cut -d'_' -f 4)
+chunk=$(echo $fbase | cut -d'_' -f 5)
+runperiod=$(echo $fbase | cut -d'_' -f 6)
+runperiod=${runperiod%.tar} # remove the .tar
+endperiod=$(echo $runperiod | cut -d'-' -f 2)
+
+#tar xf $rfile -C $workdir
+
+
+# Check whether the restarts are rebuilt or not. If not, stop -- not coded yet
+nfiles_oce=`ls $workdir/NEMO_Restart_${chunk}/${exp}_????????_restart_oce*.nc | wc -l`
+nfiles_ice=`ls $workdir/NEMO_Restart_${chunk}/${exp}_????????_restart_ice*.nc | wc -l`
+
+if [ $nfiles_oce != 1 ] || [ $nfiles_ice != 1 ]
+then
+  echo "Either the ocean or the ice restart files are not rebuilt"
+  echo "Aborting, for now rebuilding outside HPC is not possible"
+  exit
+fi
+
+fileoce=$workdir/NEMO_Restart_${chunk}/${exp}_????????_restart_oce.nc
+fileice=$workdir/NEMO_Restart_${chunk}/${exp}_????????_restart_ice.nc
+
+# Get grid information 
+nx=`ncdump -h  $fileoce | grep "x =" | awk {'print $3'}`
+ny=`ncdump -h  $fileoce | grep "y =" | awk {'print $3'}`
+nz=`ncdump -h  $fileoce | grep "z =" | awk {'print $3'}`
+
+if [ $nx = 362 ] && [ $ny = 292 ]
+then
+  grid=ORCA1
+else
+  echo "Not coded yet, to add"
+  exit
+fi
+
+# Get sea ice model information. Looking for variable a_i_htc1 which is LIM3-indicative
+if [ `ncdump -h $fileice | grep a_i_htc1 | wc -l` != 0 ]
+then
+  seaicemodel=LIM3
+else
+  echo "sea ice model unknown, or maybe LIM2?"
+fi
+
+
+# Create output directory for ocean and ice
+if [ ! -d /esnas/releases/ic/ocean/ORCA1L75 ]
+then
+  echo "Configuration does not exist: ${grid}L${nz}"
+  echo "There must be an error."
+  exit
+fi
+
+# Do the ocean
+mkdir -p /esnas/releases/ic/ocean/${grid}L${nz}/${exp}
+
+fileout=/esnas/releases/ic/ocean/${grid}L${nz}/${exp}/${exp}_${memb}_${endperiod}_restart.nc
+cp $fileoce $fileout
+chmod 777 $fileout
+gzip -v $fileout
+
+# Do the ice
+mkdir -p /esnas/releases/ic/ice/${grid}_${seaicemodel}/${exp}
+fileout=/esnas/releases/ic/ice/${grid}_${seaicemodel}/${exp}/${exp}_${memb}_${endperiod}_restart_ice.nc
+cp $fileice $fileout
+chmod 777 $fileout
+gzip -v $fileout
+
+
+
+
+
+
+
+
+

+ 49 - 0
prep_restarts/example/loop_outputs_oras4.sh

@@ -0,0 +1,49 @@
+#!/bin/bash
+ 
+#SBATCH -n 1
+#SBATCH -t 60:00:00
+#SBATCH -J oras4_O1L46
+#SBATCH --mail-user <virginie.guemas@ic3.cat>
+#SBATCH --mail-type FAIL
+#SBATCH -o oras4_O1L46-%j.out
+#SBATCH -e oras4_O1L46-%j.err
+
+set -evx
+
+year0=1958
+yearf=2013
+fc0=4
+fcf=4
+config='O1L46'
+TMPDIR=/scratch/vguemas/$$
+
+mkdir $TMPDIR
+cd $TMPDIR
+cp /home/vguemas/CFU_tools_new/prep_restarts/script_interp_vert_ocean_outputs.bash .
+
+for ((fc=${fc0};fc<=${fcf};fc++)) ; do
+  for ((year=${year0};year<=${yearf};year++)) ; do
+    for ((mon=1;mon<=12;mon++)) ; do
+# Fetching the input file
+      cp /cfu/releases/nudging/ocean/s4/ORCA1/fc${fc}/s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc.gz .
+      gunzip s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc.gz
+# Vertical interpolation + horizontal and vertical extrapolation + filling empty seas 
+      bash script_interp_vert_ocean_outputs.bash s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc votemper ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc $(printf "%02d" $mon) 
+      bash script_interp_vert_ocean_outputs.bash s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc vosaline vosaline.nc $(printf "%02d" $mon)
+      ncks -A vosaline.nc ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc; rm -f vosaline.nc
+# The output file is masked to be able to check the mask but EC-Earth does not accept masks
+      cdo setmisstoc,0 ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc tmp.nc; rm -f ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc; mv tmp.nc ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc 
+# Formatting
+      ncks -O -v vosaline,votemper ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc
+      ncecat -O -h ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc
+      ncrename -d record,time ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc
+# Storage
+      gzip ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc
+      mv ${config}_s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc.gz /cfu/releases/nudging/ocean/s4/ORCA1L46/fc${fc}/s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc.gz
+      rm -f s4_fc${fc}_${year}$(printf "%02d" $mon)_grid_T.nc.gz
+    done
+  done
+done
+rm -f script_interp_vert_ocean_outputs.bash
+ls
+rm -rf $TMPDIR

+ 56 - 0
prep_restarts/example/loop_restarts_oras4.sh

@@ -0,0 +1,56 @@
+#!/bin/bash
+ 
+#SBATCH -n 1
+#SBATCH -t 120:00:00
+#SBATCH -J oras4_O1L46
+#SBATCH --mail-user <virginie.guemas@ic3.cat>
+#SBATCH --mail-type FAIL
+#SBATCH -o oras4_O1L46-%j.out
+#SBATCH -e oras4_O1L46-%j.err
+
+set -evx
+
+year0=1992
+yearf=2009
+fc0=1
+fcf=4
+lstMM=( '1031' '0430' )
+config='O1L46'
+TMPDIR=/scratch/vguemas/$$
+
+mkdir $TMPDIR
+cd $TMPDIR
+cp /cfu/pub/scripts/prep_restarts/library/comp_rhop_$config comp_rhop
+cp /home/vguemas/CFU_tools_new/prep_restarts/script_interp_vert_ocean_restart.bash .
+for ((year=${year0};year<=${yearf};year++)) ; do
+ for ((fc=${fc0};fc<=${fcf};fc++)) ; do
+  for MM in ${lstMM[@]} ; do
+   cp /cfu/releases/ic/ocean/ORCA1/s4/s4_fc${fc}_${year}${MM}_restart.nc.gz .
+   gunzip s4_fc${fc}_${year}${MM}_restart.nc.gz
+   # Vertical interpolation + horizontal and vertical extrapolation + filling empty seas 
+   bash script_interp_vert_ocean_restart.bash  s4_fc${fc}_${year}${MM}_restart s4_fc${fc}_${year}${MM}_restart_O1L46 ${MM:0:2}   
+   # rhop needs to be added for EC-Earth3 ; computed from tn and sn
+   cat > namelist_rhop <<EOF
+&density
+  input_file = 's4_fc${fc}_${year}${MM}_restart_O1L46.nc'
+  input_Tvar = 'tn'
+  input_Svar = 'sn'
+  input_Dvar = 'nav_lev'
+  outfile    = 'rhop_fc${fc}_${year}${MM}.nc'
+/
+EOF
+   ./comp_rhop
+   ncwa -O -a t rhop_fc${fc}_${year}${MM}.nc rhop_fc${fc}_${year}${MM}.nc
+   ncks -A rhop_fc${fc}_${year}${MM}.nc s4_fc${fc}_${year}${MM}_restart_O1L46.nc
+   # The output restart is masked to be able to check the mask
+   # but EC-Earth does not accept masks
+   cdo setmisstoc,0 s4_fc${fc}_${year}${MM}_restart_O1L46.nc tmp.nc; rm -f s4_fc${fc}_${year}${MM}_restart_O1L46.nc; mv tmp.nc s4_fc${fc}_${year}${MM}_restart_O1L46.nc 
+   # Storage
+   gzip s4_fc${fc}_${year}${MM}_restart_O1L46.nc
+   mv s4_fc${fc}_${year}${MM}_restart_O1L46.nc.gz /cfu/releases/ic/ocean/ORCA1L46/s4/s4_fc${fc}_${year}${MM}_restart.nc.gz
+   rm -f s4_fc${fc}_${year}${MM}_restart.nc namelist_rhop rhop_fc${fc}_${year}${MM}.nc
+  done
+ done
+done
+rm -f comp_rhop script_interp_vert_ocean_restart.bash
+rm -rf $TMPDIR

+ 78 - 0
prep_restarts/gener_perturbation.bash

@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# -- Author : François Massonnet, francois.massonnet@ic3.cat
+# -- Date   : 30 Jan 2015
+# -- At     : IC3, Barcelona
+# -- Modified : 19 Jan 2016, omar.bellprat@bsc.es 
+#
+# -- Purpose: Generation of an arbitrary number of NEMO oceanic restarts that are copies of a reference, plus a perturbation
+#
+# -- Method : The reference file is duplicated in this script, then read by a R script. The perturbation is introduced and finally written to this latter file.
+#             The script must be placed into the restart directory (e.g. NEMO_Restart_23). The generated restarts have to be renamed after generation. This 
+#             script has been tested on MareNostrum3.
+#
+# -- Input  : NEMO ocean restart from an EC-Earth 3.1 run
+# -- Output : N restarts with the same name,but with an index fc0, fc1, ... fcN-1 appended
+#
+# -- Limitations: Only the surface conditions are perturbed (level index: 1) but this can be changed in the R script
+
+set -o errexit
+set -o nounset
+set -x
+
+if [ $# == 0 ] ; then
+  echo "gener_perturbation.bash ocean_restart_file Nmembers"
+  exit
+fi
+
+filein=$1
+nmemb=$2
+
+# ---------------------------------------------------------
+
+var=tn          # Variable to be perturbed
+per=0.0001      # Standard deviation of gaussian perturbation to be applied,
+                # in units of the variable (for tn: in K for example)
+
+for jmemb in `seq 0 $(( $nmemb -1 ))`
+do
+  echo $jmemb
+  # 1. Make a copy of the original file, with the new name
+  filenew="${filein%.nc}_fc${jmemb}.nc"
+  cp $filein ${filein}.backup
+  cp $filein $filenew
+
+  # 2. Prepare the R script
+
+  echo "#!/usr/bin/env Rscript
+  library(ncdf)
+
+  # François Massonnet, 30 Jan 2015
+  # Adds a gaussian perturbation at the first level of a 3D field
+  # Tested only for NEMO restarts
+  #
+  # This script should be called by a bash script so that the variable and file names are specified, as well as the perturbation
+
+  varname='$var'
+  filein <- '$filenew'
+  ex.nc           <- open.ncdf(filein,write=TRUE)
+  spert <- $per
+
+  myvar     <- get.var.ncdf(ex.nc, varname)
+  myvarpert <- myvar
+  for (i in seq(1,dim(myvar)[1])){
+    for (j in seq(1,dim(myvar)[2])){
+      if (myvar[i,j,1] != 0){
+        myvarpert[i,j,1] = myvarpert[i,j,1] + rnorm(1,sd=spert)
+      }
+    }
+  }
+
+  put.var.ncdf(ex.nc,varname,myvarpert)
+  close.ncdf(ex.nc)" > Rtmpfile.R
+
+  chmod 744 Rtmpfile.R
+
+  # 3. Run the R script, that produces the new NetCDF
+  ./Rtmpfile.R 
+done

+ 76 - 0
prep_restarts/library/library.bash

@@ -0,0 +1,76 @@
+function extrap {
+  typeset var restartin=$1
+  typeset var var=$2
+  typeset var meshin=$3
+  typeset var lon=$4
+  typeset var lat=$5
+  typeset var maskextrap=$6
+  typeset var maskclim=$7
+  typeset var varmask=$8
+  typeset var restartout=$9
+  typeset var flag=${10}
+  typeset var cfutools=${11}
+  typeset var confout=${12}
+  typeset var mon=${13}
+  typeset var fac
+
+  case $flag in
+   '3d') script=${cfutools}/interpolation/extrap.py ;;
+   '2d') script=${cfutools}/interpolation/extrap2d.py ;;
+  esac
+  python ${script} ${restartin} ${var} ${meshin} ${lon} ${lat} ${maskextrap} ${varmask} tmp1.nc
+  cdo ltc,0.5 -selvar,${varmask} ${maskclim} tmp.nc
+  case $var in
+   'tn'|'tb'|'votemper')
+    cdo ifthenelse -chvar,${varmask},$var tmp.nc tmp1.nc -chvar,votemper,$var /cfu/pub/scripts/prep_restarts/auxfiles/filesclim/${confout}/temperature_m$mon.nc ${restartout}
+   ;;
+   'sst_m')
+    cdo mulc,5 /cfu/pub/scripts/prep_restarts/auxfiles/filesclim/${confout}/sst_m$mon.nc titi.nc
+    cdo ifthenelse -chvar,${varmask},$var tmp.nc tmp1.nc -chvar,votemper,$var titi.nc ${restartout}
+    rm -f titi.nc
+   ;;
+   'sst_io')
+    cdo addc,273.15 /cfu/pub/scripts/prep_restarts/auxfiles/filesclim/${confout}/sst_m$mon.nc titi.nc
+    cdo mulc,2 titi.nc toto.nc
+    cdo ifthenelse -chvar,${varmask},$var tmp.nc tmp1.nc -chvar,votemper,$var toto.nc ${restartout}
+    rm -f titi.nc toto.nc
+   ;;
+   'sn'|'sb'|'vosaline')
+    cdo ifthenelse -chvar,${varmask},$var tmp.nc tmp1.nc -chvar,vosaline,$var /cfu/pub/scripts/prep_restarts/auxfiles/filesclim/${confout}/salinity_m$mon.nc ${restartout}
+   ;;
+   'sss_m'|'sss_io')
+    case $var in
+      'sss_m' ) fac=5;;
+      'sss_io') fac=2;;
+    esac
+    cdo mulc,$fac  /cfu/pub/scripts/prep_restarts/auxfiles/filesclim/${confout}/sss_m$mon.nc titi.nc
+    cdo ifthenelse -chvar,${varmask},$var tmp.nc tmp1.nc -chvar,vosaline,$var titi.nc ${restartout}
+    rm -f titi.nc
+   ;;
+   *)
+    cdo ifthenelse -chvar,${varmask},$var tmp.nc tmp1.nc -chvar,${varmask},$var tmp.nc ${restartout}
+   ;;
+  esac
+  rm -f tmp.nc tmp1.nc
+}
+
+function gathervar {
+ typeset var restfile=$1
+ typeset var newvar=$2
+ if [[ -e ${restfile} ]] ; then
+   ncks -A ${newvar} ${restfile}
+ else
+   cp ${newvar} ${restfile}
+ fi
+}
+
+function applymask {
+  typeset var maskfile=$1
+  typeset var maskvar=$2
+  typeset var file=$3
+  typeset var varname=$4
+  typeset var outfile=$5
+  cdo selvar,$maskvar $maskfile tmpmask.nc
+  cdo ifthen -chvar,${maskvar},${varname} tmpmask.nc $file $outfile 
+  rm -f tmpmask.nc 
+}

+ 39 - 0
prep_restarts/script_clim_restart.sh

@@ -0,0 +1,39 @@
+#!/bin/bash
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# This computes a climatology of restarts. 
+#
+# History : Virginie Guemas - Initial version                   - April 2014 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# Name of the experiment from which to compute the climatology
+expin='i00v'  
+# Name tag given to the output climatology
+expout='chis'
+# Start month : MMDD 
+MM=1031
+# First year used to compute climatology
+year0=1981
+# Last year used to compute climatology
+yearf=2010 
+# ocean or ice ?
+typ='ice'
+# configuration : ORCA1_LIM2, ORCA1_LIM3, ORCA025_LIM2, ORCA025_LIM3 for ice
+#                 ORCA1, ORCA1L46, ORCA025L46, ORCA025L75 for ocean
+config='ORCA1_LIM2'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+case $typ in
+  'ocean') suf='.nc' ;;
+  'ice') suf='_ice.nc' ;;
+esac
+
+for ((jmemb=0;jmemb<=4;jmemb++)) ; do
+  for ((year=$year0;year<=$yearf;year++)) ; do
+    cp /cfu/releases/ic/$typ/${config}/${expin}/${expin}_fc${jmemb}_${year}${MM}_restart${suf}.gz .
+    gunzip ${expin}_fc${jmemb}_${year}${MM}_restart${suf}.gz 
+  done
+  ncea  ${expin}_fc${jmemb}_*${MM}_restart${suf}  ${expout}_fc${jmemb}_${MM}_restart${suf}
+  gzip ${expout}_fc${jmemb}_${MM}_restart${suf}
+  mv ${expout}_fc${jmemb}_${MM}_restart${suf}.gz /cfu/releases/ic/$typ/${config}/${expout}/
+  rm -f ${expin}_fc${jmemb}_*${MM}_restart${suf}
+done

+ 72 - 0
prep_restarts/script_interp_vert_ocean_outputs.bash

@@ -0,0 +1,72 @@
+#!/bin/bash
+set -evx 
+# 
+# This script interpolates vertically ocean monthly mean temperature and 
+# salinity, extrapolates them horizontally, fills up empty seas with 
+# climatologies, and extrapolate vertically. These monthly mean temperature and
+# salinity are to be used as reference files for ocean nudging. 
+#
+# History : Virginie Guemas - Initial version                           2012 
+#           Virginie Guemas - ORAS4 + vertical extrapolation + 
+#                             commenting and clarifying         -  July 2014
+# 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+monthly_in=$1 # Monthly mean netcdf file from the reanalysis to be interpolated
+              # GLORYS2v1, GLOSEA5 or ORAS4
+varin=$2      # Input variable : 'votemper' or 'vosaline' - if this script
+              # is extended to be used for other variables, a few lines need 
+              # to be changed below in the case varin loop.
+meshmaskin='/cfu/autosubmit/con_files/mesh_mask_nemo.nemovar_O1L42.nc'
+              # Meshmask of the input reanalysis. Example:
+              #'/cfu/autosubmit/con_files/mesh_mask_nemo.glorys2v1_O25L75.nc'
+confout='ORCA1L46' # output configuration
+vertgridout='/cfu/autosubmit/con_files/mesh_mask_nemo.Ec3.0_O1L46.nc'
+              # Meshmask of the output grid, i.e. model and grid to be used in
+              # the nudged simulation
+Ptoextrap='/cfu/pub/scripts/prep_restarts/auxfiles/masks/Tofill_ecearth.v3.0.ORCA1L46.extrap.oras4.outputs.nc'
+  # Locations of the points where to extrapolate horizontally the outputs after
+  # interpolation, = 1 in the netcdf file, 0 everywhere else. Locations 
+  # available for the tmask grid. If you don't know what is tmask,
+  # read the NEMO documentation about the grid.
+  # To obtain the Ptoextrap netcdf file, it is necessary to run once this script
+  # without extrapolation and filling of the empty seas and compare the output
+  # with the meshmask of the output grid. To do so, set 0 everywhere in 
+  # Ptoextrap and use build_Tofill.bash afterwards.
+Ptofillclim='/cfu/pub/scripts/prep_restarts/auxfiles/masks/Tofill_ecearth.v3.0.ORCA1L46.clim.nc'
+  # Locations of the empty seas that need to be filled up with a climatology 
+  # after interpolation = 1 in the netcdf file, 0 everywhere else. Locations 
+  # available for the tmask grid. If you don't know what is tmask,
+  # read the NEMO documentation about the grid.
+  # To obtain the Ptofillclim netcdf file, it is necessary to run once this 
+  # script without extrapolation and filling of the empty seas and compare the
+  # output with the meshmask of the output grid. To do so, set 0 everywhere in 
+  # Ptofillclim and use build_Tofill.bash afterwards.
+mon=$4 # Month of the restart on 2 digits MM
+monthly_out=$3 # Output netcdf file name
+cfutools='/home/vguemas/CFU_tools_new'
+  # Location of the cfutools repository
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+source ${cfutools}/prep_restarts/library/library.bash
+
+cdo vertsum -selvar,tmask $meshmaskin mask2din.nc
+mask='tmask' ; varlon='glamt' ; varlat='gphit' ;  fill='tmask'
+
+# The vertical interpolation is performed below
+python ${cfutools}/interpolation/interp_vert.py $monthly_in $varin $meshmaskin e3t_0 mask2din.nc $mask $vertgridout e3t_0 gdept_0 int_${monthly_in}
+
+# The function extrap extrapolates horizontally and fill in the empty seas with a climatology
+extrap int_${monthly_in} ${varin} ${meshmaskin} ${varlon} ${varlat} ${Ptoextrap} ${Ptofillclim} ${fill} int2_${monthly_in} 3d $cfutools $confout $mon
+
+# The vertical extrapolation to empty levels is performed below
+python ${cfutools}/interpolation/vertextrap.py int2_${monthly_in} ${varin} $vertgridout nav_lev int3_${monthly_in}
+
+# Apply the mask
+applymask $vertgridout $mask int3_${monthly_in} $varin $monthly_out
+
+# Add vertical levels
+ncrename -v z,nav_lev $monthly_out
+ncks -A -v gdepw_0 $vertgridout $monthly_out
+
+# Clean
+rm -f  int_${monthly_in} int2_${monthly_in} int3_${monthly_in} mask2din.nc
+

+ 135 - 0
prep_restarts/script_interp_vert_ocean_restart.bash

@@ -0,0 +1,135 @@
+#!/bin/bash
+set -evx 
+# 
+# This script interpolates vertically ocean restarts, extrapolates horizontally,
+# fills up empty seas with climatologies, extrapolate vertically. 
+#
+# History : Virginie Guemas - Initial version                           2012 
+#           Virginie Guemas - ORAS4 + vertical extrapolation + 
+#                             commenting and clarifying         - March 2014
+# 
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#                                Arguments
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+type='oras4' # 'glorys' / 'metoffice' : if you add another type, you'll have
+             # to adapt the list of variables this restart contains below.
+             # Look for the variable $type in the script.
+fillinrot='TRUE' # Extrapolate or not the rotational rotn / rotb
+                  # TRUE increases subtantially the interpolation time 
+                  # FALSE has negligible effects ( u, v, t, s are initialized )
+restart_in=$1 # Input restart file name without the .nc extension
+meshmaskin='/cfu/autosubmit/con_files/mesh_mask_nemo.nemovar_O1L42_new.nc'
+    # Meshmask of the input reanalysis. Example:
+    #'/cfu/autosubmit/con_files/mesh_mask_nemo.glorys2v1_O25L75.nc'
+confout='ORCA1L46' # output configuration
+vertgridout='/cfu/autosubmit/con_files/mesh_mask_nemo.Ec3.0_O1L46.nc'
+    # Meshmask of the output restart, i.e. model and grid to be used in
+    # climate prediction 
+Ptoextrap='/cfu/pub/scripts/prep_restarts/auxfiles/masks/Tofill_ecearth.v3.0.ORCA1L46.extrap.oras4.restarts.nc'
+    # Locations of the points where to extrapolate horizontally the restarts after
+    # interpolation = 1 in the netcdf file, 0 everywhere else. Locations available
+    # for the tmask, umask, vmask and fmask grid. If you don't know about those
+    # four variables read about the NEMO documentation about the grid.
+    # To obtain the Ptoextrap netcdf file, it is necessary to run once this script
+    # without extrapolation and filling of the empty seas and compare the output
+    # restart with the output meshmask. To do so, set 0 everywhere in Ptoextrap
+    # and use build_Tofill.bash afterwards.
+Ptofillclim='/cfu/pub/scripts/prep_restarts/auxfiles/masks/Tofill_ecearth.v3.0.ORCA1L46.clim.nc'
+    # Locations of the empty seas that need to be filled up with a climatology after
+    # interpolation = 1 in the netcdf file, 0 everywhere else. Locations available
+    # for the tmask, umask, vmask and fmask grid. If you don't know about those
+    # four variables read about the NEMO documentation about the grid.
+    # To obtain the Ptofillclim netcdf file, it is necessary to run once this script
+    # without extrapolation and filling of the empty seas and compare the output
+    # restart with the output meshmask. To do so, set 0 everywhere in Ptofillclim
+    # and use build_Tofill.bash afterwards.
+mon=$3 # Month of the restart on 2 digits MM
+restart_out=$2 # Output restart file name without the .nc extension
+cfutools='/home/vguemas/CFU_tools_new'
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+case $type in
+  'glorys') 
+# List of 3d variables = that need to be vertically interpolated 
+   listvarsinterp=( 'en' 'avt' 'avmt' 'avmu' 'avmv' 'dissl' 'ub' 'vb' 'tb' 'sb' 'rotb' 'hdivb' 'un' 'vn' 'tn' 'sn' 'rotn' 'hdivn' 'rhop' )
+# List of 2d variables = that only need extrapolation  
+   listvarsextrap=( 'gcx' 'gcxb' 'gcxbfs' 'gru' 'grv' 'gsu' 'gsv' 'gtu' 'gtv' 'ssh_m' 'sshb' 'sshn' 'sss_m' 'sst_m' 'ssu_m' 'ssv_m' )
+# List of 0d variables = that need to be copied to the output restart
+   varscopy='nn_fsbc,rdt,rdttra1'
+  ;;
+  'oras4')
+   listvarsinterp=( 'en' 'ub' 'vb' 'tb' 'sb' 'rotb' 'hdivb' 'un' 'vn' 'tn' 'sn' 'rotn' 'hdivn' )
+   listvarsextrap=('gcx' 'gcxb' 'sshb' 'sshn' ) 
+   varscopy='rdt,rdttra1,ndastp,adatrj'
+  ;;
+esac
+
+source ${cfutools}/prep_restarts/library/library.bash
+
+if [[ $fillinrot == 'FALSE' ]] ; then
+ cp ${Ptoextrap} Ptoextrap.nc 
+ ncks -v fmask Ptoextrap.nc tmp1.nc
+ cdo mulc,0 tmp1.nc tmp2.nc
+ ncks -A tmp2.nc Ptoextrap.nc
+ rm -f tmp1.nc tmp2.nc
+ Ptoextrap='Ptoextrap.nc'
+fi
+
+# Copy the 0d variables from the input restart to the output restart
+ncks -O -v $varscopy ${restart_in}.nc ${restart_out}.nc
+ncks -A -v nav_lon,nav_lat $meshmaskin ${restart_out}.nc
+ncks -A -v nav_lev $vertgridout ${restart_out}.nc
+
+# Vertical interpolation + horizontal extrapolation at each level + filling empty seas + vertical extrapolation
+cdo vertsum -selvar,tmask,vmask,umask,fmask $meshmaskin mask2din.nc
+for var in ${listvarsinterp[@]} ; do
+  case $var in
+   'avmu'|'un'|'ub') mask='umask' ; varlon='glamu' ; varlat='gphiu' ; fill='umask' ;;
+   'avmv'|'vn'|'vb') mask='vmask' ; varlon='glamv' ; varlat='gphiv' ; fill='vmask' ;;
+   'rotb'|'rotn') mask='fmask' ; varlon='glamf' ; varlat='gphif' ; fill='fmask' ;;
+   'avt'|'avmt'|'en'|'dissl'|'hdivb'|'hdivn'|'rhop'|'sn'|'tn'|'sb'|'tb') mask='tmask' ; varlon='glamt' ; varlat='gphit' ;  fill='tmask' ;;
+  esac
+  # The vertical interpolation is performed below
+  python ${cfutools}/interpolation/interp_vert.py ${restart_in}.nc $var $meshmaskin e3t_0 mask2din.nc $mask $vertgridout e3t_0 gdept_0 ${var}_${restart_out}_int.nc
+
+  # The function extrap extrapolates horizontally and fill in the empty seas with a climatology
+  extrap ${var}_${restart_out}_int.nc ${var} ${meshmaskin} ${varlon} ${varlat} ${Ptoextrap} ${Ptofillclim} ${fill} ${var}_${restart_out}_int2.nc 3d $cfutools $confout $mon 
+
+  # The vertical extrapolation to empty levels is performed below
+  python ${cfutools}/interpolation/vertextrap.py  ${var}_${restart_out}_int2.nc ${var} $vertgridout nav_lev ${var}_${restart_out}_int3.nc
+
+  # Apply the mask
+  applymask $vertgridout $mask ${var}_${restart_out}_int3.nc $var ${var}_${restart_out}.nc
+
+  # Pasting the output to ${restart_out}.nc
+  gathervar ${restart_out}.nc ${var}_${restart_out}.nc
+  rm -f ${var}_${restart_out}.nc ${var}_${restart_out}_int.nc ${var}_${restart_out}_int2.nc ${var}_${restart_out}_int3.nc
+done
+
+# Horizontal extrapolation and filling empty seas of 2d variables
+ncks -O -d lev,0 ${Ptoextrap} Ptoextrap_lev0.nc
+ncks -O -d lev,0 ${Ptofillclim} Ptofillclim_lev0.nc
+ncks -O -d z,0 $vertgridout mask_lev0.nc
+ncks -O -d z,0 $meshmaskin maskin_lev0.nc
+for var in ${listvarsextrap[@]} ; do
+  case $var in
+   'gru'|'gsu'|'gtu'|'ssu_m'|'u_io') varlon='glamu' ; varlat='gphiu' ; fill='umask' ; mask='umask' ;;
+   'grv'|'gsv'|'gtv'|'ssv_m'|'v_io') varlon='glamv' ; varlat='gphiv' ; fill='vmask' ; mask='vmask' ;;
+   'gcx'|'gcxb'|'gcxbfs'|'ssh_m'|'sshb'|'sshn'|'sss_m'|'sst_m'|'alb_ice'|'sst_io'|'sss_io') varlon='glamt' ; varlat='gphit' ; fill='tmask' ; mask='tmask' ;;
+  esac
+
+  # Apply the input mask
+  applymask maskin_lev0.nc $mask ${restart_in}.nc $var ${var}_${restart_in}.nc
+
+  # Horizontal extrapolation and fillup the empty seas with a climatology
+  extrap ${var}_${restart_in}.nc ${var} ${meshmaskin} ${varlon} ${varlat} Ptoextrap_lev0.nc Ptofillclim_lev0.nc ${fill} ${var}_${restart_out}_int.nc 2d $cfutools $confout $mon
+ 
+  # Apply the output mask
+  applymask mask_lev0.nc $mask ${var}_${restart_out}_int.nc $var ${var}_${restart_out}.nc
+
+  # Pasting the output to ${restart_out}.nc
+  gathervar ${restart_out}.nc ${var}_${restart_out}.nc
+  rm -f ${var}_${restart_out}.nc ${var}_${restart_out}_int.nc ${var}_${restart_in}.nc
+done
+rm -f Ptoextrap_lev0.nc Ptofillclim_lev0.nc mask2din.nc mask_lev0.nc tmpmask.nc maskin_lev0.nc
+if [[ -e Ptoextrap.nc ]] ; then rm -f Ptoextrap.nc ; fi

+ 120 - 0
regression/PoissonReg.R

@@ -0,0 +1,120 @@
+#!/usr/bin/env Rscript 
+#
+# Poisson regression on 1 variable only
+# by Mathieu Boudreault and Louis-Philippe Caron
+# Input: 1 file with a series of predictors
+#        1 file with a series of predictands
+# Required format:  - Both files should be matrices, which each item separated from the next by ","
+#                   - The first column of both files has to be the same and must represent the time of measurements (unit is irrelevant)
+#                   - Missing values should be left blank
+#                   - Each column should be identified with text
+# Output: 1 file with the beta values - betas.csv
+#         1 file with the p-values - pval.csv
+#         Optional : 1 file with p-values robust to autoregression and heteroskedasticity (Sandwich test) - pvalsand.csv - Is returned with file name of the 5th argument 
+#         Format: All are matrices, num(predictands) x num(predictors)
+#
+# History: Created on 11/11/13
+#
+#########################################################################
+
+rm(list=ls(all=TRUE))  # Clear memory
+##  1. Load the necessary packages
+library(zoo)
+library(sandwich)
+library(lmtest) 
+
+args=commandArgs(TRUE)
+countfile=args[1]
+predictorfile=args[2]
+
+#length(args)
+
+if ( length(args)==5  ) {
+betafile=args[3]
+pvaluefile=args[4]
+pvalsandfile=args[5] 
+} else if ( length(args)==4  ) {
+betafile=args[3]
+pvaluefile=args[4]
+} else if ( length(args)==2  ) {
+#countfile="counts.csv"
+#predictorfile="predictors.csv"
+betafile="betas.csv"
+pvaluefile="pval.csv"
+pvalsandfile="pvalsand.csv" 
+} else 
+    stop("Invalid number of input files !") 
+
+
+path = "."
+
+##  2. Load data and convert into R format
+X <- data.frame(read.table(paste(path, predictorfile, sep = "/"), sep=",", header=TRUE))
+N <- data.frame(read.table(paste(path, countfile, sep = "/"), sep=",", header=TRUE))
+ALL.data <- merge(N, X)
+
+## 3. Additional variables
+nameX <- names(X)
+nameN <- names(N)
+nameX <- nameX[-1] #Remove first element
+nameN <- nameN[-1] #Remove first element
+#
+cte <- length(nameX) * length(nameN) # total number of regressions
+TheRegressions <- as.list(numeric(cte))
+TheOutput <- as.list(numeric(cte))
+dim(TheRegressions) <- c(length(nameN), length(nameX))
+dim(TheOutput) <- c(length(nameN), length(nameX))
+
+rownames(TheRegressions) <- nameN
+colnames(TheRegressions) <- nameX
+rownames(TheOutput) <- nameN
+colnames(TheOutput) <- nameX
+
+## 4. Compute the Poisson regressions
+for(n in 1:length(nameN))
+{
+  for(x in 1:length(nameX))
+  {
+    formule <- as.formula(paste(c(nameN[n], nameX[x]), collapse=" ~ "))
+    regression <- glm(formula = formule, data = ALL.data, family = poisson)
+  
+    TheRegressions[[n, x]] <- regression   #store the entire object
+    TheOutput[[n, x]] <- list(modele = regression$formula,
+                              resultats = summary(regression),
+                              sandwich.coef.test = coeftest(regression, vcov = sandwich)
+                              )
+  }
+}
+#
+## 5. Variable to store results 
+ somm.beta <- matrix(0, nrow = length(nameN), ncol = length(nameX))
+ rownames(somm.beta) <- nameN
+ colnames(somm.beta) <- nameX
+# 
+ somm.pval <- matrix(0, nrow = length(nameN), ncol = length(nameX))
+ rownames(somm.pval) <- nameN
+ colnames(somm.pval) <- nameX
+ 
+ somm.pval.sand <- matrix(0, nrow = length(nameN), ncol = length(nameX))
+ rownames(somm.pval.sand) <- nameN
+ colnames(somm.pval.sand) <- nameX
+# 
+# 
+ for(n in 1:length(nameN))
+ {
+   for(x in 1:length(nameX))
+   {
+     somm.beta[n,x] <- TheOutput[[n, x]]$resultats$coefficients[2,1]
+     somm.pval[n,x] <- TheOutput[[n, x]]$resultats$coefficients[2,4]
+     somm.pval.sand[n,x] <- TheOutput[[n, x]]$sandwich.coef.test[2,4]
+   }
+ }
+# 
+# Write the data 
+write.csv(somm.beta, paste(path,betafile,sep = "/")) # The betas
+write.csv(somm.pval, paste(path, pvaluefile,sep = "/")) # The p-values
+if ( length(args)==5  ) {
+write.csv(somm.pval.sand, paste(path, pvalsandfile,sep = "/")) # The Sandwich p-values 
+}
+rm(list=ls())
+quit()

+ 174 - 0
regression/PoissonRegwTrend.R

@@ -0,0 +1,174 @@
+#!/usr/bin/env Rscript 
+#
+## Poisson regression on 1 variable only
+# by Mathieu Boudreault and Louis-Philippe Caron
+# Input: 1 file with a series of predictors
+#        1 file with a series of predictands
+#        1 file with a series of trends (max 6)
+# Required format:  - All files should be matrices, which each item separated from the next by ","
+#                   - The first column of all files has to be the same and must represent the time of measurements (unit is irrelevant)
+#                   - Missing values should be left blank
+#                   - Each column should be identified with text
+# Output: 1 file with the beta values - betas.csv
+#         1 file with the p-values - pval.csv
+#         Optional : 1 file with p-values robust to autoregression and heteroskedasticity (Sandwich test) - pvalsand.csv  - Is produced if a file name is given (i.e. if 6 arguments are passed when calling the function)  
+#         Format: All are matrices, [num(predictands) x num(predictors)] x num(trends)
+#
+# History: Created on 11/11/13
+#
+#########################################################################
+
+rm(list=ls(all=TRUE))  # Clear memory
+##  1. Load the necessary packages
+library(zoo)
+library(sandwich)
+library(lmtest) 
+
+args=commandArgs(TRUE)
+countfile=args[1]
+predictorfile=args[2]
+trendfile=args[3]
+#length(args)
+
+if ( length(args)==6  ) {
+betafile=args[4]
+pvaluefile=args[5]
+pvalsandfile=args[6] 
+} else if ( length(args)==5  ) {
+betafile=args[4]
+pvaluefile=args[5]
+} else if ( length(args)==3  ) {
+#countfile="counts.csv"
+#predictorfile="predictors.csv"
+betafile="betas.csv"
+pvaluefile="pval.csv"
+pvalsandfile="pvalsand.csv" 
+} else 
+    stop("Invalid number of input files !") 
+
+
+path = "."
+
+##  2. Load data and convert into R format
+Predictors <- data.frame(read.table(paste(path, predictorfile, sep = "/" ), sep=",", header=TRUE))
+Trends <- data.frame(read.table(paste(path,trendfile, sep = "/"), sep=",", header=TRUE))
+
+X <- merge(Predictors, Trends)
+
+N <- data.frame(read.table(paste(path,countfile, sep = "/" ), sep=",", header=TRUE))
+
+ALL.data <- merge(N, X)
+
+
+## 3. Additional variables
+nameP <- names(Predictors)
+nameP <- nameP[-1] #Remove first element
+nameT <- names(Trends)
+nameT <- nameT[-1] #Remove first element
+nameN <- names(N)
+nameN <- nameN[-1] #Remove first element
+#
+cte <- length(nameP) * length(nameN) * length(nameT)  # total number of regressions
+
+TheRegressions <- as.list(numeric(cte))
+TheOutput <- as.list(numeric(cte))
+
+dim(TheRegressions) <- c(length(nameN), length(nameP), length(nameT))
+dim(TheOutput) <- c(length(nameN), length(nameP), length(nameT))
+
+rownames(TheRegressions) <- nameN
+colnames(TheRegressions) <- nameP 
+rownames(TheOutput) <- nameN
+colnames(TheOutput) <- nameP
+
+## 4. Compute the regressions
+
+for(ttt in 1:length(nameT))
+{
+    for(n in 1:length(nameN))
+    {
+        for(x in 1:length(nameP))
+        {
+          REGRESSORS <- paste(c(nameT[ttt], nameP[x]), collapse="+")
+          formule <- as.formula(paste(c(nameN[n], REGRESSORS), collapse=" ~ "))
+          regression <- glm(formula = formule, data = ALL.data, family = poisson)
+          
+          TheRegressions[[n, x, ttt]] <- regression
+          TheOutput[[n, x, ttt]] <- list(modele = regression$formula,
+                                    resultats = summary(regression),
+                                    sandwich.coef.test = coeftest(regression, vcov = sandwich)
+                                    )
+      }
+  }
+}
+#
+## 5. Variable to store results 
+ somm.beta <- array(0, dim=c(length(nameN), length(nameP), length(nameT)))
+ rownames(somm.beta) <- nameN
+ colnames(somm.beta) <- nameP
+ 
+ somm.pval <- array(0, dim=c(length(nameN), length(nameP), length(nameT)))
+ rownames(somm.pval) <- nameN
+ colnames(somm.pval) <- nameP
+ 
+ somm.pval.sand <- array(0, dim=c(length(nameN), length(nameP), length(nameT)))
+ rownames(somm.pval.sand) <- nameN
+ colnames(somm.pval.sand) <- nameP
+ 
+ for(ttt in 1:length(nameT))
+ {
+   for(n in 1:length(nameN))
+   {
+     for(x in 1:length(nameP))
+     {
+       somm.beta[n,x,ttt] <- TheOutput[[n, x, ttt]]$resultats$coefficients[2,1]
+       somm.pval[n,x,ttt] <- TheOutput[[n, x, ttt]]$resultats$coefficients[2,4]
+       somm.pval.sand[n,x,ttt] <- TheOutput[[n, x, ttt]]$sandwich.coef.test[2,4]
+     }
+   }
+ }
+#
+# 
+# Write the data 
+if ( length(nameT) == 1) {
+  write.csv(rbind(somm.beta[,,1]), paste(path, betafile,sep = "/"))
+  write.csv(rbind(somm.pval[,,1]), paste(path, pvaluefile,sep = "/"))
+  if ( length(args)==6  ) {
+    write.csv(rbind(somm.pval.sand[,,1]), paste(path, pvalsandfile,sep = "/"))
+  }
+} else if( length(nameT) == 2) {
+  write.csv(rbind(somm.beta[,,1],somm.beta[,,2]), paste(path, betafile,sep = "/"))
+  write.csv(rbind(somm.pval[,,1],somm.pval[,,2]), paste(path, pvaluefile,sep = "/"))
+  if ( length(args)==6  ) {
+    write.csv(rbind(somm.pval.sand[,,1],somm.pval.sand[,,2]), paste(path, pvalsandfile,sep = "/"))
+  }
+} else if( length(nameT) == 3) {
+  write.csv(rbind(somm.beta[,,1],somm.beta[,,2],somm.beta[,,3]), paste(path, betafile,sep = "/"))
+  write.csv(rbind(somm.pval[,,1],somm.pval[,,2],somm.pval[,,3]), paste(path, pvaluefile,sep = "/"))
+  if ( length(args)==6  ) {
+    write.csv(rbind(somm.pval.sand[,,1],somm.pval.sand[,,2],somm.pval.sand[,,3]), paste(path, pvalsandfile,sep = "/"))
+  }
+} else if( length(nameT) == 4) {
+  write.csv(rbind(somm.beta[,,1],somm.beta[,,2],somm.beta[,,3],somm.beta[,,4]), paste(path, betafile,sep = "/"))
+  write.csv(rbind(somm.pval[,,1],somm.pval[,,2],somm.pval[,,3],somm.pval[,,4]), paste(path, pvaluefile,sep = "/"))
+  if ( length(args)==6  ) {
+    write.csv(rbind(somm.pval.sand[,,1],somm.pval.sand[,,2],somm.pval.sand[,,3],somm.pval.sand[,,4]), paste(path, pvalsandfile,sep = "/"))
+  }
+} else if ( length(nameT) == 5) {
+  write.csv(rbind(somm.beta[,,1],somm.beta[,,2],somm.beta[,,3],somm.beta[,,4],somm.beta[,,5]), paste(path, betafile,sep = "/"))
+  write.csv(rbind(somm.pval[,,1],somm.pval[,,2],somm.pval[,,3],somm.pval[,,4],somm.pval[,,5]), paste(path, pvaluefile,sep = "/"))
+  if ( length(args)==6  ) {
+    write.csv(rbind(somm.pval.sand[,,1],somm.pval.sand[,,2],somm.pval.sand[,,3],somm.pval.sand[,,4],somm.pval.sand[,,5]), paste(path, pvalsandfile,sep = "/"))
+  }
+} else if( length(nameT) == 6) {
+  write.csv(rbind(somm.beta[,,1],somm.beta[,,2],somm.beta[,,3],somm.beta[,,4],somm.beta[,,5],somm.beta[,,6]), paste(path, betafile,sep = "/"))
+  write.csv(rbind(somm.pval[,,1],somm.pval[,,2],somm.pval[,,3],somm.pval[,,4],somm.pval[,,5],somm.pval[,,6]), paste(path, pvaluefile,sep = "/"))
+  if ( length(args)==6  ) {
+    write.csv(rbind(somm.pval.sand[,,1],somm.pval.sand[,,2],somm.pval.sand[,,3],somm.pval.sand[,,4],somm.pval.sand[,,5],somm.pval.sand[,,6]), paste(path, pvalsandfile,sep = "/"))
+  }
+} else 
+   stop("Too many trends!")
+
+rm(list=ls())
+quit()
+

+ 19 - 0
regression/README

@@ -0,0 +1,19 @@
+In this directory, you can find:
+
+1) a script which extracts a signal by linear regression over a range of lags:
+regressedts.R
+
+2) a script which extracts a signal by multilinear regression over a set of indices:
+multipleregress.R 
+
+3) a script which filters out a signal by a linear regression over a range of lags:
+filteroutreg.R 
+
+4) a script which computes Poisson regressions (predictand -> predictor):
+PoissonReg.R
+
+5) a script which computes Poisson regressions after accounting for a trend:
+PoissonRegwTrend.R 
+
+More information can be found in the documentation here:
+http://ic3.cat/wikicfu/index.php/Tools/Regression

+ 128 - 0
regression/filteroutreg.R

@@ -0,0 +1,128 @@
+#!/usr/bin/env Rscript 
+
+#
+# This script filters out from any 1d (time) to 3d (time-lat-lon) field the 
+# effect of greenhouse gases, ENSO, or any mode of variability through a 
+# multilinear regression over the corresponding index (greenhouse gas 
+# concentration, ENSO  index ..) at a set of lags you can choose. 
+#
+# Usage : ./regressedts.R <ncfile> <ncvar> <indexfile> <indexvar> <outfile> 
+#                         <lag1> <lag2> ... (as many lags as wanted)
+#
+# History : Virginie Guemas - Initial version                    -  2011
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+library(ncdf)
+
+args=commandArgs(TRUE)
+
+filein=args[1] # Input netcdf file 
+varin=args[2] # Input netcdf variable name
+index=args[3] # Netcdf file containing the index which to filter the signal
+varindex=args[4] # Index variable name
+fileout=args[5] # Output file
+lags=c()        # List of lags
+jind=6
+while ( jind <= length(args) ) {
+  lags=c(lags,as.numeric(args[jind]))
+  jind=jind+1
+}
+if ( is.null(lags) ) { lags=c(0) } 
+
+fnc=open.ncdf(filein)
+absent=T
+for ( jvar in 1:fnc$nvars ) {
+  if ( fnc$var[[jvar]]$name == varin ) {
+    data=fnc$var[[jvar]]
+    unit=data$units
+    n0dims=data$ndims
+    wdimsout=data$dim
+    var=get.var.ncdf(fnc,varin)
+    ndims=length(dim(var))
+    miss=att.get.ncdf(fnc,varin,'_FillValue')
+    if ( miss$hasatt == TRUE ) { var[var==miss$value]=NA }
+    miss=att.get.ncdf(fnc,varin,'missing_value')
+    if ( miss$hasatt == TRUE ) { var[var==miss$value]=NA }
+    absent=F
+  }
+}
+if (absent) { stop("Variable not in input file !") } 
+close.ncdf(fnc)
+
+system(paste('cdo showmon ',filein,'> mons 2>/dev/null'))
+mons=read.table('mons')
+system(paste('cdo showyear ',filein,'> years 2>/dev/null'))
+years=read.table('years')
+system('rm -f mons years')
+
+if (is.null(ndims)) { ndims=1 }
+dimsvar=switch(ndims,length(var),dim(var),c(dim(var)[1]*dim(var)[2],dim(var)[3]))
+if ( ndims > 1 ) { tmpvar=t(array(var,dim=dimsvar)) } else { tmpvar=var }
+var_ts=ts(tmpvar,start=c(years[[1]],mons[[1]]),end=c(years[[length(years)]],mons[[length(mons)]]),frequency=12)
+
+fnc=open.ncdf(index)
+vind=get.var.ncdf(fnc,varindex)
+close.ncdf(fnc)
+
+system(paste('cdo showmon ',index,'> mons 2>/dev/null'))
+mons=read.table('mons')
+system(paste('cdo showyear ',index,'> years 2>/dev/null'))
+years=read.table('years')
+system('rm -f mons years')
+
+index_ts=ts(vind,start=c(years[[1]],mons[[1]]),end=c(years[[length(years)]],mons[[length(mons)]]),frequency=12)
+lstind=list()
+for (jlag in lags) { lstind=list(lstind,lag(index_ts,-jlag)) }
+
+def=TRUE
+if ( ndims > 1 ) {
+  for (jpt in 1:dimsvar[1]) {
+    varjpt=var_ts[,jpt]
+    if (length(sort(varjpt)) > 1 ) {
+      toto = varjpt
+      f='dataf[,1] ~ '
+      jind=2
+      for (jlag in lags) { 
+        toto = ts.intersect(toto,lag(index_ts,-jlag), dframe=FALSE) 
+        f=paste(f,'dataf[,',as.character(jind),'] +',sep="")
+        jind=jind+1
+      }
+      dataf=toto
+      if (def==TRUE) {
+        tmp=array(,dim=c(dimsvar[1],dim(toto)[1]))
+        def=FALSE
+      }
+      f=substr(f,start=1,stop=(nchar(f)-1))
+      dataf=toto
+      lm.out=lm(as.formula(f),data=toto,na.action=NULL)
+      #lm.out=lm(varjpt ~ index_ts,na.action=na.omit)
+      #tmp[jpt,]=varjpt-(lm.out$coefficients[1]+index_ts*lm.out$coefficients[2])
+      tmp[jpt,]=toto[,1]-lm.out$fitted.values
+    }
+  }
+}else{
+  toto = var_ts
+  f='dataf[,1] ~ '
+  jind=2
+  for (jlag in lags) {
+    toto = ts.intersect(toto,lag(index_ts,-jlag), dframe=FALSE)
+    f=paste(f,'dataf[,',as.character(jind),'] +',sep="")
+    jind=jind+1
+  }
+  f=substr(f,start=1,stop=(nchar(f)-1))
+  dataf=toto
+  lm.out=lm(as.formula(f),data=dataf,na.action=NULL)
+  tmp=toto[,1]-lm.out$fitted.values
+}
+
+tmp[is.na(tmp)]=1e20
+wtime=dim.def.ncdf("time",paste("months since ",as.character(start(toto)[1]),"-",as.character(start(toto)[2]),"-15 12:00:00",sep=""),seq(0,dim(toto)[1]-1))
+wdimsout[[n0dims]]=wtime
+dimsout=switch(ndims,length(tmp),dim(tmp),c(dim(var)[1],dim(var)[2],dim(tmp)[2]))
+wvarout=var.def.ncdf(varin,unit,wdimsout,1e20)
+f=create.ncdf(fileout,wvarout)
+put.var.ncdf(f,wvarout,array(tmp,dim=dimsout))
+close.ncdf(f)
+
+rm(list=ls())
+quit()

+ 123 - 0
regression/multipleregress.R

@@ -0,0 +1,123 @@
+#!/usr/bin/env Rscript 
+#
+# This script extract from any 1d (time) to 3d (time-lat-lon) field the 
+# combined effect of greenhouse gases, ENSO, and any mode of variability 
+# through a multilinear regression over the corresponding index 
+# (greenhouse gas concentration, ENSO index ..)  
+#
+# Usage : ./regressedts.R <ncfile> <ncvar> <indexfile1> <indexvar1> 
+#             <indexfile2> <indexvar2> (as many indices as wanted) <outfile> 
+#
+# History : Virginie Guemas - Initial version                    -  2011
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+library(ncdf)
+
+args=commandArgs(TRUE)
+
+filein=args[1] # Input netcdf file 
+varin=args[2] # Input netcdf variable name
+jind=3
+indexes=c() # List of netcdf files containing the indices to regress on
+varindex=c() # List of index variable names
+while ( jind <= (length(args)-1) ) {
+  indexes=c(indexes,args[jind])
+  varindex=c(varindex,args[jind+1])
+  jind=jind+2
+}
+fileout=args[jind] # Output file
+
+fnc=open.ncdf(filein)
+absent=T
+for ( jvar in 1:fnc$nvars ) {
+  if ( fnc$var[[jvar]]$name == varin ) {
+    data=fnc$var[[jvar]]
+    unit=data$units
+    n0dims=data$ndims
+    wdimsout=data$dim
+    var=get.var.ncdf(fnc,varin)
+    ndims=length(dim(var))
+    miss=att.get.ncdf(fnc,varin,'_FillValue')
+    if ( miss$hasatt == TRUE ) { var[var==miss$value]=NA }
+    miss=att.get.ncdf(fnc,varin,'missing_value')
+    if ( miss$hasatt == TRUE ) { var[var==miss$value]=NA }
+    absent=F
+  }
+}
+if (absent) { stop("Variable not in input file !") } 
+close.ncdf(fnc)
+
+system(paste('cdo showmon ',filein,'> mons 2>/dev/null'))
+mons=read.table('mons')
+system(paste('cdo showyear ',filein,'> years 2>/dev/null'))
+years=read.table('years')
+system('rm -f mons years')
+
+if (is.null(ndims)) { ndims=1 }
+dimsvar=switch(ndims,length(var),dim(var),c(dim(var)[1]*dim(var)[2],dim(var)[3]))
+if ( ndims > 1 ) { tmpvar=t(array(var,dim=dimsvar)) } else { tmpvar=var }
+var_ts=ts(tmpvar,start=c(years[[1]],mons[[1]]),end=c(years[[length(years)]],mons[[length(mons)]]),frequency=12)
+
+lstind=list()
+for (ind in 1:length(indexes) ) {
+  fnc=open.ncdf(indexes[ind])
+  vind=get.var.ncdf(fnc,varindex[ind])
+  close.ncdf(fnc)
+
+  system(paste('cdo showmon ',indexes[ind],'> mons 2>/dev/null'))
+  mons=read.table('mons')
+  system(paste('cdo showyear ',indexes[ind],'> years 2>/dev/null'))
+  years=read.table('years')
+  system('rm -f mons years')
+
+  index_ts=ts(vind,start=c(years[[1]],mons[[1]]),end=c(years[[length(years)]],mons[[length(mons)]]),frequency=12)
+
+  lstind[[ind]]=index_ts
+}
+
+def=TRUE
+if ( ndims > 1 ) {
+  for (jpt in 1:dimsvar[1]) {
+    varjpt=var_ts[,jpt]
+    if (length(sort(varjpt)) > 1 ) {
+      toto = varjpt
+      f='dataf[,1] ~ '
+      for (ind in 1:length(indexes)) { 
+        toto = ts.intersect(toto,lstind[[ind]], dframe=FALSE) 
+        f=paste(f,'dataf[,',as.character(ind+1),'] +',sep="")
+      }
+      dataf=toto
+      if (def==TRUE) {
+        tmp=array(,dim=c(dimsvar[1],dim(toto)[1]))
+        def=FALSE
+      }
+      f=substr(f,start=1,stop=(nchar(f)-1))
+      dataf=toto
+      lm.out=lm(as.formula(f),data=toto,na.action=NULL)
+      tmp[jpt,]=lm.out$fitted.values
+    }
+  }
+}else{
+  toto = var_ts
+  f='dataf[,1] ~ '
+  for (ind in 1:length(indexes)) {
+    toto = ts.intersect(toto,lstind[[ind]], dframe=FALSE)       
+    f=paste(f,'dataf[,',as.character(ind),'] +',sep="")
+  }
+  f=substr(f,start=1,stop=(nchar(f)-1))
+  dataf=toto
+  lm.out=lm(as.formula(f),data=dataf,na.action=NULL)
+  tmp=lm.out$fitted.values
+}
+
+tmp[is.na(tmp)]=1e20
+wtime=dim.def.ncdf("time",paste("months since ",as.character(start(toto)[1]),"-",as.character(start(toto)[2]),"-15 12:00:00",sep=""),seq(0,dim(toto)[1]-1))
+wdimsout[[n0dims]]=wtime
+dimsout=switch(ndims,length(tmp),dim(tmp),c(dim(var)[1],dim(var)[2],dim(tmp)[2]))
+wvarout=var.def.ncdf(varin,unit,wdimsout,1e20)
+f=create.ncdf(fileout,wvarout)
+put.var.ncdf(f,wvarout,array(tmp,dim=dimsout))
+close.ncdf(f)
+
+rm(list=ls())
+quit()

+ 128 - 0
regression/regressedts.R

@@ -0,0 +1,128 @@
+#!/usr/bin/env Rscript 
+
+# This script extract from any 1d (time) to 3d (time-lat-lon) field the effect 
+# of greenhouse gases, ENSO, or any mode of variability through a multilinear 
+# regression over the corresponding index (greenhouse gas concentration, ENSO 
+# index ..) at a set of lags you can choose. 
+#
+# Usage : ./regressedts.R <ncfile> <ncvar> <indexfile> <indexvar> <outfile> 
+#                         <lag1> <lag2> ... (as many lags as wanted)
+#
+# History : Virginie Guemas - Initial version                    -  2011
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+library(ncdf)
+
+args=commandArgs(TRUE)
+
+# Command line arguments :
+filein=args[1] # Input netcdf file 
+varin=args[2] # Input netcdf variable name
+index=args[3] # Netcdf file containing the index to regress on
+varindex=args[4] # Index variable name
+fileout=args[5] # Output file
+lags=c()        # List of lags
+jind=6
+while ( jind <= length(args) ) {
+  lags=c(lags,as.numeric(args[jind]))
+  jind=jind+1
+}
+if ( is.null(lags) ) { lags=c(0) } 
+
+fnc=open.ncdf(filein)
+absent=T
+for ( jvar in 1:fnc$nvars ) {
+  if ( fnc$var[[jvar]]$name == varin ) {
+    data=fnc$var[[jvar]]
+    unit=data$units
+    n0dims=data$ndims
+    wdimsout=data$dim
+    var=get.var.ncdf(fnc,varin)
+    ndims=length(dim(var))
+    miss=att.get.ncdf(fnc,varin,'_FillValue')
+    if ( miss$hasatt == TRUE ) { var[var==miss$value]=NA }
+    miss=att.get.ncdf(fnc,varin,'missing_value')
+    if ( miss$hasatt == TRUE ) { var[var==miss$value]=NA }
+    absent=F
+  }
+}
+if (absent) { stop("Variable not in input file !") } 
+close.ncdf(fnc)
+
+system(paste('cdo showmon ',filein,'> mons 2>/dev/null'))
+mons=read.table('mons')
+system(paste('cdo showyear ',filein,'> years 2>/dev/null'))
+years=read.table('years')
+system('rm -f mons years')
+
+if (is.null(ndims)) { ndims=1 }
+dimsvar=switch(ndims,length(var),dim(var),c(dim(var)[1]*dim(var)[2],dim(var)[3]))
+if ( ndims > 1 ) { tmpvar=t(array(var,dim=dimsvar)) } else { tmpvar=var }
+var_ts=ts(tmpvar,start=c(years[[1]],mons[[1]]),end=c(years[[length(years)]],mons[[length(mons)]]),frequency=12)
+
+fnc=open.ncdf(index)
+vind=get.var.ncdf(fnc,varindex)
+close.ncdf(fnc)
+
+system(paste('cdo showmon ',index,'> mons 2>/dev/null'))
+mons=read.table('mons')
+system(paste('cdo showyear ',index,'> years 2>/dev/null'))
+years=read.table('years')
+system('rm -f mons years')
+
+index_ts=ts(vind,start=c(years[[1]],mons[[1]]),end=c(years[[length(years)]],mons[[length(mons)]]),frequency=12)
+lstind=list()
+for (jlag in lags) { lstind=list(lstind,lag(index_ts,-jlag)) }
+
+def=TRUE
+if ( ndims > 1 ) {
+  for (jpt in 1:dimsvar[1]) {
+    varjpt=var_ts[,jpt]
+    if (length(sort(varjpt)) > 1 ) {
+      toto = varjpt
+      f='dataf[,1] ~ '
+      jind=2
+      for (jlag in lags) { 
+        toto = ts.intersect(toto,lag(index_ts,-jlag), dframe=FALSE) 
+        f=paste(f,'dataf[,',as.character(jind),'] +',sep="")
+        jind=jind+1
+      }
+      dataf=toto
+      if (def==TRUE) {
+        tmp=array(,dim=c(dimsvar[1],dim(toto)[1]))
+        def=FALSE
+      }
+      f=substr(f,start=1,stop=(nchar(f)-1))
+      dataf=toto
+      lm.out=lm(as.formula(f),data=toto,na.action=NULL)
+      #lm.out=lm(varjpt ~ index_ts,na.action=na.omit)
+      #tmp[jpt,]=varjpt-(lm.out$coefficients[1]+index_ts*lm.out$coefficients[2])
+      tmp[jpt,]=lm.out$fitted.values
+    }
+  }
+}else{
+  toto = var_ts
+  f='dataf[,1] ~ '
+  jind=2
+  for (jlag in lags) {
+    toto = ts.intersect(toto,lag(index_ts,-jlag), dframe=FALSE)
+    f=paste(f,'dataf[,',as.character(jind),'] +',sep="")
+    jind=jind+1
+  }
+  f=substr(f,start=1,stop=(nchar(f)-1))
+  dataf=toto
+  lm.out=lm(as.formula(f),data=dataf,na.action=NULL)
+  tmp=lm.out$fitted.values
+}
+
+tmp[is.na(tmp)]=1e20
+wtime=dim.def.ncdf("time",paste("months since ",as.character(start(toto)[1]),"-",as.character(start(toto)[2]),"-15 12:00:00",sep=""),seq(0,dim(toto)[1]-1))
+wdimsout[[n0dims]]=wtime
+dimsout=switch(ndims,length(tmp),dim(tmp),c(dim(var)[1],dim(var)[2],dim(tmp)[2]))
+wvarout=var.def.ncdf(varin,unit,wdimsout,1e20)
+f=create.ncdf(fileout,wvarout)
+put.var.ncdf(f,wvarout,array(tmp,dim=dimsout))
+close.ncdf(f)
+
+rm(list=ls())
+quit()

+ 82 - 0
transfer/chkexpjlt.sh

@@ -0,0 +1,82 @@
+#!/bin/bash
+# ./chkexpjlt.sh arch expid sesth expvol >& expid.log &
+
+#set -xuve
+
+arch=$1
+expid=$2
+sesth=$3 # sim (job) estimated computing time (in hours)
+expvol=$4
+src=/esnas/autosubmit/$expid/tmp
+case $arch in
+ mn-*)     rsrc=/gpfs/scratch/*/*; rptr="\.err" ;;
+ ithaca)   rsrc=/scratch/cfu/*; rptr="\.e" ;;
+ ecmwf)    rsrc=c2a:/scratch/ms; rptr="\.err" ;;
+ ht-*)     rsrc=/work/pr1u1011/pr1u1011/* ; rptr="\.e" ;;
+ lindgren) rsrc=/cfs/klemming/scratch/*/* ;;
+ jaguar)   rsrc= ; rptr="" ;;
+ ar-*)     rsrc=/work/pr1u1011/pr1u1011/* ; rptr="\.e" ;;
+ *) echo "!!! $arch is not available !!!"; exit 1 ;;
+esac
+rlst=/tmp/$expid.chkexpjlt.$$
+case $arch in
+ ecmwf)
+  hpcproj=$(grep -w HPCPROJ /esnas/autosubmit/$expid/conf/expdef_${expid}.conf | cut -d '=' -f2 |sed 's/ //g')
+  hpcuser=$(grep -w HPCUSER /esnas/autosubmit/$expid/conf/expdef_${expid}.conf | cut -d '=' -f2 | sed 's/ //g')
+  ecaccess-file-dir $rsrc/$hpcproj/$hpcuser/$expid/LOG_$expid > $rlst
+ ;;
+ *)
+  ssh $arch ls -1 $rsrc/$expid/LOG_$expid > $rlst
+ ;;
+esac
+
+#compute job life time (jlt) for sim jobs
+echo "JNo. JLT(Hours)* JET(Hours) JFailed JName Chunk_start_date Chunk_end_date"
+typ=sim
+cnt=0
+sdates=$(ls -1 $src/${expid}_*_${typ}_COMPLETED | cut -d '_' -f 2 | uniq)
+started=$(ls -rt $src/*.cmd | head -n 1 | xargs stat -c %Y)
+for s in $sdates; do 
+ mems=$(ls -1 $src/${expid}_${s}_*_${typ}_COMPLETED | cut -d '_' -f 3 | uniq)
+ for m in $mems; do
+  cnt2=0
+  chunks=$(ls -1rt $src/${expid}_${s}_${m}_*_${typ}_COMPLETED | cut -d '_' -f 4 | uniq)
+  for c in $chunks; do
+   cnt=$((cnt+1)); ncnt=$(printf "%04d" $cnt)
+   cnt2=$((cnt2+1))
+   job=${expid}_${s}_${m}_${c}_${typ}
+   csd=$(grep 'Chunk_start_date=' $src/$job.cmd | cut -d '=' -f 2)
+   ced=$(grep 'Chunk_end_date=' $src/$job.cmd | cut -d '=' -f 2)
+   case $cnt2 in
+    1) job_1=${expid}_${s}_${m}_ini ;;
+    #1) job_1=${expid}_${s}_${m}_1_init ;;
+    *) job_1=$job_tmp ;;
+   esac
+   ts=$(ls -rt $src/${job_1}_COMPLETED | xargs stat -c %Y)
+   te=$(ls -rt $src/${job}_COMPLETED | xargs stat -c %Y)
+   jlt=$((te - ts))
+   jlt=$(echo | awk "{print $jlt/3600}"); njlt=$(printf "%0.2f" $jlt) # job life time
+   jwt=$(echo | awk "{print $jlt-$sesth}"); njwt=$(printf "%0.2f" $jwt) # job wasted time
+   nfj=$(cat $rlst | grep ${job} | grep $rptr | wc -l); nfj=$((nfj-1)); nnfj=$(printf "%02d" $nfj) # no. of failed jobs
+   echo $ncnt $njlt $njwt $nnfj $job $csd $ced
+   job_tmp=$job
+  done
+ done
+done
+till=$(ls -rt $src/*_COMPLETED | tail -n 1 | xargs stat -c %Y)
+elt=$((till - started))
+elt=$(echo | awk "{print $elt/86400}"); nelt=$(printf "%0.2f" $elt)
+ajpd=$(echo | awk "{print $cnt/$nelt}"); najpd=$(printf "%0.2f" $ajpd)
+echo 
+echo "$expid (optimum computing time per job ~${sesth}H)"
+echo "started: $(date -d @$started)" 
+echo "...till: $(date -d @$till)"
+echo "$cnt jobs completed in ~${nelt} days which implies"
+echo "average ~$najpd jobs completed per single day."
+echo "Until: `date`"
+echo "no. of pending jobs is $(($expvol-$cnt)) out of $expvol."
+echo
+echo "* Job life time (JLT) since Autosubmit started the job first time"
+echo "* Job excess time (JET) including queue + machine + autosubmit + human intervention etc"
+#rm -f $rlst
+

+ 63 - 0
transfer/chkexpout.sh

@@ -0,0 +1,63 @@
+#!/bin/bash
+# ./chkexpout.sh model expid
+
+#set -xuve
+
+model=$1
+expid=$2
+src1=/esnas/exp/$model
+src2=/esnas/exp/$model/restartfiles
+src30=/esnas/exp/$model/gribfiles
+src3=/esnas/exp/$model/rawfiles
+src4=/esnas/exp/$model/cmorfiles
+
+sdates=$(ls -1 $src1/$expid | grep [0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9])
+for s in $sdates; do
+ mems=$(ls -1 $src1/$expid/$s | grep fc)
+ for m in $mems; do
+  if [[ -d $src1/$expid/$s/$m ]]; then
+   l=$(ls -1 $src1/$expid/$s/$m | grep logfiles | wc -l)
+   mm=""
+   if [[ -d $src1/$expid/$s/$m/outputs ]]; then 
+    a=$(ls -1 $src1/$expid/$s/$m/outputs | grep MMA | wc -l)
+    o=$(ls -1 $src1/$expid/$s/$m/outputs | grep MMO | wc -l)
+    sz=$(du -chs $src1/$expid/$s/$m/outputs | grep total | awk '{print $1}')
+    mm="MMA=$a MMO=$o MM_SZ=$sz"
+   fi
+   rest=""
+   # old option
+   if [[ -d $src1/$expid/$s/$m/restarts ]]; then
+    r=$(ls -1 $src1/$expid/$s/$m/restarts | grep REST | wc -l)
+    sz=$(du -chs $src1/$expid/$s/$m/restarts | grep total | awk '{print $1}')
+    rest="REST=$r REST_SZ=$sz"
+   fi
+   # old option
+   if [[ -d $src2/$expid/$s/$m/restarts ]]; then
+    r=$(ls -1 $src2/$expid/$s/$m/restarts | grep REST | wc -l)
+    sz=$(du -chs $src2/$expid/$s/$m/restarts | grep total | awk '{print $1}')
+    rest="REST=$r REST_SZ=$sz"
+   fi
+   grb=""
+   if [[ -d $src3/$expid/$s/$m/outputs ]]; then 
+    gg=$(ls -1 $src3/$expid/$s/$m/outputs | grep ICMGG | wc -l)
+    sh=$(ls -1 $src3/$expid/$s/$m/outputs | grep ICMSH | wc -l)
+    sz=$(du -chs $src3/$expid/$s/$m/outputs | grep total | awk '{print $1}')
+    grb="GG=$gg SH=$sh GRB_SZ=$sz"
+   fi
+   cmor=""
+   if [[ -d $src4/$expid/$s/$m/outputs ]]; then
+    ca=$(ls -1 $src4/$expid/$s/$m/outputs | grep CMORA | wc -l)
+    co=$(ls -1 $src4/$expid/$s/$m/outputs | grep CMORO | wc -l)
+    sz=$(du -chs $src4/$expid/$s/$m/outputs | grep total | awk '{print $1}')
+    cmor="CMORA=$ca CMORO=$co CMOR_SZ=$sz"
+   fi
+   echo "$expid $s $m LOGS=$l ${mm} ${rest} $grb $cmor"
+  fi
+ done
+done
+du -chs $src1/$expid | grep $expid
+du -chs $src2/$expid | grep $expid
+du -chs $src30/$expid | grep $expid
+du -chs $src3/$expid | grep $expid
+du -chs $src4/$expid | grep $expid
+

+ 161 - 0
transfer/data_transfer.sh

@@ -0,0 +1,161 @@
+#!/bin/bash
+#
+# nohup ./data_transfer.sh >& expid.log &
+#
+
+##################################
+####  User Defined Variables  #### 
+##################################
+
+# User must fill-in
+arch="" # choose platform (ithaca, mn-*, cfs-aux-4, ecmwf, ht-*, ar-*)
+model="" # choose model (ecearth, nemo)
+expid="" # supply expid: e.g. xxxx
+sdates="" # supply list of start dates: e.g. 19601101 19651101 ...
+members="" # supply list of members: e.g. fc0 fc1 fc2 ...
+remove=TRUE # remove output from source by default (if user want to kee the model output at HPC make it FALSE)
+
+# User may change
+types="logfiles MM PP diags REST restart ICM CMOR"
+dst1=/esnas/exp/$model # store logfiles, MM*
+dst2=/esnas/exp/$model/restartfiles # store REST*
+dst3=/esnas/exp/$model/rawfiles # store ICM*, ORCA, EXPID_* (raw data)
+dst4=/esnas/exp/$model/cmorfiles # store CMOR*
+
+#####  End of User Defined Variables  ####
+
+#################################
+####  User Defined Funtions  #### 
+#################################
+
+function transfer1(){
+ set -e
+ dst=$1/$expid/$s/$m/$2
+ if [[ $remove == TRUE ]]; then
+  RSYNC="rsync -acv --remove-sent-files"
+ else
+  RSYNC="rsync -acv"
+ fi
+ for l in $list; do
+  echo "tranfering file ... ${arch}:$l $dst"
+  $RSYNC ${arch}:${l} $dst
+  echo "done."
+  echo
+  echo
+ done
+ set +e
+}
+
+function transfer2(){
+ dst=$1/$expid/$s/$m/$2
+ for l in $list; do
+  echo "tranfering file ... $src/$expid/$s/$m/$2/$l $dst/$l"
+  ecaccess-file-get $src/$expid/$s/$m/$2/$l $dst/$l
+  if [[ $? -eq 0 && $remove == TRUE ]]; then
+   ecaccess-file-delete -force $src/$expid/$s/$m/$2/$l
+  fi
+  echo "done."
+  echo
+  echo
+ done
+}
+
+####  End of the User Defined Functions  #### 
+
+###################################
+####  Main Part of the Script  ####
+###################################
+
+date
+echo "platform is: $arch"
+echo "model is: $model"
+echo "expid is: $expid"
+echo "start dates: $sdates"
+echo "members per start date: $members"
+echo "remove output from HPC disks: $remove"
+
+for s in $sdates; do
+ for m in $members; do
+
+  rmtlist="ssh $arch ls -1"
+  transfer="transfer1"
+
+  case $arch in
+   ithaca)
+    src=/share/data/cfu/exp
+   ;;
+   mn-*)
+    src=/gpfs/scratch/ecm86/ecm86010/exp
+   ;;
+   cfs-aux-4)
+    src=/cfs/klemming/nobackup/a/asifsami/exp
+   ;;
+   ecmwf*)
+    rmtlist="ecaccess-file-dir"
+    transfer="transfer2"
+    src=ec:/c3m/exp
+   ;;
+   ht-*)
+    src=/work/pr1u1011/pr1u1011/pr1e1001/exp
+   ;;
+   ar-*)
+    src=/work/pr1u1011/pr1u1011/shared/exp
+   ;;
+   *)
+    echo "$arch is not a valid machine"
+   ;;
+  esac
+
+  for typ in $types; do
+   case $typ in
+    logfiles)
+     lswc=`$rmtlist $src/$expid/$s/$m/logfiles* | wc -l`
+     if [[ $lswc -gt 0 ]]; then
+      mkdir -p $dst1/$expid/$s/$m
+      list=`$rmtlist $src/$expid/$s/$m/logfiles*`
+      $transfer $dst1 ""
+     fi
+    ;;
+    MM|PP|diags)
+     lswc=`$rmtlist $src/$expid/$s/$m/outputs/${typ}* | wc -l`
+     if [[ $lswc -gt 0 ]]; then
+      mkdir -p $dst1/$expid/$s/$m/outputs
+      list=`$rmtlist $src/$expid/$s/$m/outputs/${typ}*`
+      $transfer $dst1 outputs
+     fi
+    ;;
+    REST|restart)
+     lswc=`$rmtlist $src/$expid/$s/$m/restarts/${typ}* | wc -l`
+     if [[ $lswc -gt 0 ]]; then
+      mkdir -p $dst2/$expid/$s/$m/restarts
+      list=`$rmtlist $src/$expid/$s/$m/restarts/${typ}*`
+      $transfer $dst2 restarts
+     fi
+    ;;
+    ICM|ORCA|$(eval echo ${expid}_))
+     lswc=`$rmtlist $src/$expid/$s/$m/outputs/${typ}* | wc -l`
+     if [[ $lswc -gt 0 ]]; then
+      mkdir -p $dst3/$expid/$s/$m/outputs
+      list=`$rmtlist $src/$expid/$s/$m/outputs/${typ}*`
+      $transfer $dst3 outputs
+     fi
+    ;;
+    CMOR)
+     lswc=`$rmtlist $src/$expid/$s/$m/outputs/${typ}* | wc -l`
+     if [[ $lswc -gt 0 ]]; then
+      mkdir -p $dst4/$expid/$s/$m/outputs
+      list=`$rmtlist $src/$expid/$s/$m/outputs/${typ}*`
+      $transfer $dst4 outputs
+     fi
+    ;;
+    *)
+     echo "$typ is not a valid data type"
+    ;;
+   esac
+  done
+
+ done
+done
+date
+
+####  End of the Main Part of Script  ####

+ 32 - 0
transfer/download_atm_nudging.sh

@@ -0,0 +1,32 @@
+#!/bin/bash
+# Before downloading; the nudging reference files it should be well prepared at ECMWF's c2a
+# in the context of cfu's infrastructure (use "prepare_atm_nudging.cmd" over there first).
+# nohup ./download_atm_nudging.sh T255L91 b0ir >& b0ir.log &
+#
+set -xuve
+date
+
+grd=$1
+ver=$2
+
+src=ec:/c3m/nudging/atmos/$grd/$ver
+dst=/esnas/releases/nudging/atmos/$grd/$ver
+
+mkdir -p $dst
+list=atm-${grd}-${ver}
+
+if [[ ! -a $list ]]; then
+   ecaccess-file-dir $src > $list
+fi
+
+for l in $(cat $list); do
+ ecaccess-file-get $src/$l $dst/$l
+ if [[ $? -ne 0 ]]; then
+    exit 1
+ fi
+ cat $list | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp $list
+done
+cat $list; rm $list
+
+date

+ 32 - 0
transfer/download_atm_perturb.sh

@@ -0,0 +1,32 @@
+#!/bin/bash
+# Before downloading; the perturb it should be well prepared at ECMWF's c2a in the
+# context of cfu's infrastructure (use "prepare_atm_perturb.cmd" over there first).
+# nohup ./download_atm_perturb.sh T255L62 b0ga >& download.log &
+#
+set -xuve
+date
+
+grd=$1
+ver=$2
+
+src=ec:/c3m/perturb/atmos/$grd/$ver
+dst=/cfu/releases/perturb/atmos/$grd/$ver
+
+mkdir -p $dst
+list=atm-${grd}-${ver}
+
+if [[ ! -a $list ]]; then
+   ecaccess-file-dir $src > $list
+fi
+
+for l in $(cat $list); do
+ ecaccess-file-get $src/$l $dst/$l
+ if [[ $? -ne 0 ]]; then
+    exit 1
+ fi
+ cat $list | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp $list
+done
+cat $list; rm $list
+
+date

+ 265 - 0
transfer/download_hsm

@@ -0,0 +1,265 @@
+#!/usr/bin/env bash
+set -exf
+###########################################
+# get USER ID on BSC for differnet CFU users #
+# You have to be able to login in to BSC with the command "ssh mn" without using password
+###########################################
+case $USER in
+ fdoblasreyes)
+ MN_USER='ecm86998'
+ ;;
+ huidu)
+ MN_USER='ecm86133'
+ ;;
+ jgarcia)
+ MN_USER='ecm86734'
+ ;;
+ masif)
+ MN_USER='ecm86010'
+ ;;
+ omula)
+ MN_USER='ecm86843'
+ ;;
+ vguemas)
+ MN_USER='ecm86859'
+ ;;
+ acarrassi)
+ MN_USER='ecm86014'
+ ;;
+esac
+
+#######################
+## get arguments
+######################
+
+
+while getopts e:s:m:t:f:g: option
+do
+  case $option in
+    e) exp_id=$OPTARG;;
+    s) sdate=$OPTARG;;
+    m) mem=$OPTARG;;
+    t) data_type=$OPTARG;;
+    f) file=$OPTARG;;
+    g) filetype=$OPTARG;; 
+    \?) exit 1;;
+  esac
+done
+
+######################
+## functions 
+######################
+
+get_sdate(){
+sdate_lst=`ssh mn ssh login8 ls $BASE_DIR/$1`
+}
+
+get_member(){
+member_lst=`ssh mn ssh login8 ls $BASE_DIR/$1/$2`
+}
+
+grab_data(){  # this will download all the data for one specific member
+
+sdate=$1
+mem=$2
+file_type=$3
+
+HSM_DIR=/HSM/ecm86/exp/${exp_id}/${sdate}/${mem}/outputs
+TARGET_DIR=/cfunas/exp/ecearth/${exp_id}/${sdate}/${mem}/outputs
+
+mkdir -p ${TARGET_DIR}
+
+file_list=`ssh mn ssh login8 find $HSM_DIR -type f -iname "ICM${file_type}*.grb"`
+
+if [[ ${#file_list[@]} == 0 ]]; then #IV
+ echo "There are no elements in the given PATH: $1"
+ return 1
+fi  #IV
+
+for f in ${file_list[@]};do
+ f1=`basename $f`
+ ssh mn rsync login7:$f $MN_CONTAINER/ # HSM to SCRATCH of BSC
+ rsync mn:$MN_CONTAINER/$f1 ${TARGET_DIR}/ # SCRATCH of BSC to cfunas
+ ssh mn rm $MN_CONTAINER/$f1  # clean BSC
+done #loop for files
+
+}
+
+grab_one_file(){
+
+sdate=$1
+mem=$2
+f=$3
+HSM_DIR=/HSM/ecm86/exp/${exp_id}/${sdate}/${mem}/outputs
+TARGET_DIR=/cfunas/exp/ecearth/${exp_id}/${sdate}/${mem}/outputs
+mkdir -p ${TARGET_DIR}
+ssh mn rsync login7:$HSM_DIR/$f $MN_CONTAINER/ # HSM to SCRATCH of BSC
+rsync mn:$MN_CONTAINER/$f ${TARGET_DIR}/ # SCRATCH of BSC to cfunas
+ssh mn rm $MN_CONTAINER/$f  # clean BSC
+
+}
+
+
+print_help(){
+echo 
+echo "File type (netcdf or grb) have to be specified: -g <filetype>, where filetype is either netcdf or grb"
+echo "USAGE: $(basename $0) -g <filetype> -e <exp_id> -s <startdate> -m <mem> -t <data_type>"
+echo "If <filetype> is grb file, <data_type> has to be either sh or gg"
+echo "OR: $(basename $0) -f <filename>"
+echo "if no filename (which have to including the information of sdates and member) is specify, <exp_id> is required, in this case, all outputs, restart and log files will be downloaded"
+echo "<data_type> can be mm/mmo/mma/rest/resta/resto/rest/log/diags/sh/gg"
+echo
+
+}
+
+if [[ $filetype == '' ]];then
+  print_help
+  exit 1
+else
+  case $filetype in 
+   nc)
+    if [[ $file == '' ]] && [[ $exp_id == '' ]];then
+    print_help
+    exit 1
+    fi 
+   ;;
+   grb)
+    if [[ $file != '' ]]; then
+      if [[ $sdate == '' ]] || [[ $mem == '' ]];then 
+       echo 
+       echo "You have to specify both the starting date and member for this grb file"
+       echo 
+      fi  
+
+    fi
+   ;;
+  esac
+
+fi
+
+##################
+# Constants      #
+##################
+BASE_DIR='/HSM/ecm86/exp'
+MN_CONTAINER="/gpfs/scratch/ecm86/${MN_USER}/container/"
+HSM_NODE='login8'
+HSM_DIR=/${BASE_DIR}/${exp_id}/${sdate}/${mem}/
+
+#####################
+# create directoris #yy
+#####################
+
+ssh mn "mkdir -p ${MN_CONTAINER}"
+
+##################
+# main scripts 
+##################
+
+case $filetype in 
+
+ grb)
+
+if [[ $file == '' ]]; then
+
+ if [[ $sdate == '' ]];then  #I
+
+   get_sdate ${exp_id}
+
+   if [[ $mem == '' ]];then #II
+     for sdate in ${sdate_lst[@]};do 
+
+      get_member ${exp_id} ${sdate}
+      for mem in ${member_lst};do
+       grab_data ${sdate} ${mem} ${data_type}
+      done # loop for member
+
+     done #loop for starting dates
+
+   else
+
+    for sdate in ${sdate_lst[@]};do
+      grab_data ${sdate} ${mem} ${data_type}
+    done #loop for starting dates
+
+  fi #II
+
+ else #I
+  
+  if [[ $mem == '' ]];then
+    get_member ${exp_id} $sdate
+     for mem in ${member_lst};do
+      grab_data ${sdate} ${mem} ${data_type}
+     done # loop for member
+  else
+    grab_data ${sdate} ${mem} ${data_type}
+
+  fi
+
+ 
+ fi #I
+
+
+else 
+
+ if [[ $sdate == '' ]] || [[ $mem == '' ]];then
+  echo "You have to specify both sdate and mem in this casei"
+  return 1
+ else
+  
+  grab_one_file $sdate $mem $file
+
+ fi
+
+
+fi
+
+
+;;
+
+ nc)  
+
+ if [ -z "$file" ]; then 
+ file_list=`ssh mn ssh login8 find $HSM_DIR -type f -iname "$data_type*.tar"`
+ else
+ exp_id=`echo $file|cut -f2 -d'_'`
+ file_list=`ssh mn ssh login8 find $BASE_DIR/${expid} -type f -iname "$file"`
+ fi
+
+# file_list=`ssh mn ssh login8 find $HSM_DIR -type f -iname "$data_type*"`
+ if [[ ${#file_list[@]} == 0 ]]; then
+  echo "here are no elements in the given MMO PATH: $1"
+  return 1
+ fi
+ 
+
+ for f in ${file_list[@]};do
+  
+  f1=`basename $f`
+  type=`echo $f1|cut -f1 -d'_'`
+  sdate=`echo $f1|cut -f3 -d'_'`
+  mem=`echo $f1|cut -f4 -d'_'`
+  echo $type $sdate $mem
+  case $type in 
+   'RESTA'|'RESTO')
+    TARGET_DIR=/cfunas/exp/ecearth/${exp_id}/${sdate}/${mem}/restarts
+    ;;
+   'MMA'|'MMO'|'diags')
+    TARGET_DIR=/cfunas/exp/ecearth/${exp_id}/${sdate}/${mem}/outputs 
+    ;;
+    'logfile')
+    TARGET_DIR=/cfunas/exp/ecearth/${exp_id}/${sdate}/${mem}/log
+    ;;
+  esac
+
+  if [[ ! -d "$TARGET_DIR" ]]; then
+   mkdir -p $TARGET_DIR
+  fi
+ 
+  ssh mn rsync login7:$f $MN_CONTAINER/ # HSM to SCRATCH of BSC
+  rsync mn:$MN_CONTAINER/$f1 ${TARGET_DIR}/ # SCRATCH of BSC to cfunas
+  ssh mn rm $MN_CONTAINER/$f1  # clean BSC
+ done
+
+ ;;
+
+esac

+ 33 - 0
transfer/download_ic.sh

@@ -0,0 +1,33 @@
+#!/bin/bash
+# Before downloading; the ic's should be well prepared at ECMWF's CCA.
+# nohup ./download_ic.sh atmos T255L62 b0ga >& download.log &
+# nohup ./download_ic.sh ocean ORCA1 fa9p >& download.log &
+#
+set -xuve
+date
+
+typ=$1
+grd=$2
+ver=$3
+
+src=ec:/c3m/ic/$typ/$grd/$ver
+dst=/esnas/releases/ic/$typ/$grd/$ver
+
+mkdir -p $dst
+list=${typ}-${grd}-${ver}
+
+if [[ ! -a $list ]]; then
+   ecaccess-file-dir $src > $list
+fi
+
+for l in $(cat $list); do
+ ecaccess-file-get $src/$l $dst/$l
+ if [[ $? -ne 0 ]]; then
+    exit 1
+ fi
+ cat $list | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp $list
+done
+cat $list; rm $list
+
+date

+ 66 - 0
transfer/exp_job_info.sh

@@ -0,0 +1,66 @@
+#!/bin/bash
+
+#set -xuve
+
+if [ $# -ne 3 ]; then
+	echo "Wrong number of arguments"
+	echo "E.g.: $0 <ARCH> <EXPID> <ESTIMATED RUNTIME>"
+	exit 1
+fi
+
+ARCH=$1
+EXPID=$2
+estimated_time=$3 # sim (job) estimated computing time (in hours)
+
+case $ARCH in
+	mn)
+		rsrc=/gpfs/scratch/*/*
+		error_file="err"
+		out_file="out"
+		;;
+	ithaca)
+		rsrc=/scratch/cfu/*
+		error_file="\.e"
+		;;
+	ecmwf)
+		rsrc=c2a:/scratch/ms
+		error_file="err"
+		;;
+	hector)
+		rsrc=
+		error_file=""
+		;;
+	lindgren)
+		rsrc=/cfs/klemming/scratch/*/*
+		;;
+	jaguar)
+		rsrc=
+		error_file=""
+		;;
+	*)
+		echo "!!! $ARCH is not available !!!"
+		exit 1
+		;;
+esac
+
+TYPE="sim"
+LOG_PATH="$rsrc/$EXPID/LOG_$EXPID"
+sdates=$(ls -1 ${LOG_PATH}/${EXPID}_*_${TYPE}_COMPLETED | cut -d '_' -f 3 | uniq)
+echo "Name,Queue,Run,Wasted,Failed"
+for s in $sdates; do
+	mems_chunks=$(ls -1tr ${LOG_PATH}/${EXPID}_${s}_*_${TYPE}_COMPLETED | cut -d '_' -f 4-5 | uniq)
+	for m_c in $mems_chunks; do
+		job=${EXPID}_${s}_${m_c}_${TYPE}
+		submit_time=$(ls -ltr ${LOG_PATH}/${job}.cmd | awk '{print $6 " " $7 " " $8}')
+		submit_time=$(date --date="${submit_time}" +%s)
+		failed_times=$(($(ls -1tr ${LOG_PATH}/${job}*${error_file} | wc -l) -1 ))
+		start_time=$(grep "Started" $(ls -1tr ${LOG_PATH}/${job}*${out_file} | tail -n 1) | cut -d" " -f 3-)
+		start_time=$(date --date="${start_time}" +%s)
+		end_time=$(grep "Results" $(ls -1tr ${LOG_PATH}/${job}*${out_file} | tail -n 1) | cut -d" " -f 4-)
+		end_time=$(date --date="${end_time}" +%s)
+		queued_time=$(echo "scale=2; (${start_time}-${submit_time}) / 3600" | bc)
+		run_time=$(echo "scale=2; (${end_time}-${start_time}) / 3600" | bc)
+		wasted_time=$(echo "scale=2; ${run_time}-${estimated_time}" |bc)
+		echo "${job},${queued_time},${run_time},${wasted_time},${failed_times}"
+	done
+done

+ 148 - 0
transfer/migrate_exp.sh

@@ -0,0 +1,148 @@
+#!/bin/bash
+#
+# OBJECTIVE: 
+# In Autosubmit 2.x experiments three projects were cloned (model, templates and ocean_diagnostics).
+# New Autosubmit 3.x experiments only allow cloning one project (auto-ecearth).
+# Auto-ecearth is a single project containing model, templates and ocean_diagnostics among others.
+#
+# This script covers the need of having to run an Autosubmit 2.x experiment with Autosubmit 3.x.
+# It assumes that no auto-ecearth available version can reproduce what the old experiment was doing,
+# and it adapts a copy of that old experiment, reshuffling the configuration files and removing 
+# unnecessary variables from the old templates to adapt them to the Autosubmit 3.x framework.
+#
+# DESCRIPTION:
+# This script will modify and adapt the experiment configuration file of the TARGET_EXPID
+# according to Autosubmit 3.x standard.
+#
+# This script will also clone model, templates and ocean_diagnostics
+# from ORIGINAL_EXPID into TARGET_EXPID "proj" folder. 
+#
+# This script will also remove unnecessary template header/tailer variables.
+#
+# USE:
+# 1st step
+# --------
+# Before executing this script you must use the copy option of Autosubmit 3.x
+# e.g. autosubmit expid -y i000 -H ithaca -d "copied from original i000"
+#
+# 2nd step
+# --------
+# ./migrate_exp.sh ORIGINAL_EXPID TARGET_EXPID
+#
+# CONTACT:
+# Written by Domingo Manubens-Gil
+#
+# Institut Català de Ciències del Clima / Climate Forecasting Unit (IC3/CFU)
+# Created:  November 13, 2014
+copy_from="$1"
+target_expid="$2"
+experiments_path="/home/dmanubens/debug/autosubmit"
+target_file="$experiments_path/$target_expid/conf/expdef_$target_expid.conf"
+
+set -ex
+
+if [[ -d $experiments_path/$target_expid/proj ]]; then
+	cd $experiments_path/$target_expid/proj
+	echo "Existing clone for model, templates and ocean_diagnostics..."
+else
+	mkdir $experiments_path/$target_expid/proj
+	cd $experiments_path/$target_expid/proj
+	git clone $experiments_path/$copy_from/git/model model
+	git clone $experiments_path/$copy_from/git/templates templates
+	git clone $experiments_path/$copy_from/git/ocean_diagnostics ocean_diagnostics
+
+	echo "Cloned successfully model, templates and ocean_diagnostics..."
+fi
+
+
+if grep -q TEMPLATE_NAME "$target_file" ; then
+	template=$(grep TEMPLATE_NAME $target_file | awk '{print $3}')
+	sed -i '/%HEADER%/d' templates/$template/*
+	sed -i '/%HEADER%/d' templates/common/*
+	sed -i '/%AS-HEADER-LOC%/d' templates/$template/*
+	sed -i '/%AS-HEADER-LOC%/d' templates/common/*
+	sed -i '/%AS-HEADER-REM%/d' templates/$template/*
+	sed -i '/%AS-HEADER-REM%/d' templates/common/*
+	sed -i '/%AS-TAILER-LOC%/d' templates/$template/*
+	sed -i '/%AS-TAILER-LOC%/d' templates/common/*
+	sed -i '/%AS-TAILER-REM%/d' templates/$template/*
+	sed -i '/%AS-TAILER-REM%/d' templates/common/*
+	
+	echo "Unnecessary template header/tailer variables deleted..."
+else
+	echo "No variables deleted..."
+fi
+
+if grep -q HPCPROJ "$target_file" ; then
+	sed -i '/^HPCPROJ/ a\# HPCPROJ moved to queues.conf (Autosubmit 3.x migration)' $target_file
+	sed -i '/^HPCPROJ/ d' $target_file
+	sed -i '/^HPCUSER/ a\# HPCUSER moved to queues.conf (Autosubmit 3.x migration)' $target_file
+	sed -i '/^HPCUSER/ d' $target_file
+	sed -i '/^RERUN/ a\# RERUN moved to rerun section (Autosubmit 3.x migration)' $target_file
+	sed -i '/^RERUN/ d' $target_file
+	sed -i '/^CHUNKLIST/ a\# CHUNKLIST moved to rerun section (Autosubmit 3.x migration)' $target_file
+	sed -i '/^CHUNKLIST/ d' $target_file
+	sed -i '/^WALLCLOCK/ a\# WALLCLOCK moved to jobs.conf (Autosubmit 3.x migration)' $target_file
+	sed -i '/^WALLCLOCK/ d' $target_file
+	sed -i '/^NUMPROC/ a\# NUMPROC moved to jobs.conf (Autosubmit 3.x migration)' $target_file
+	sed -i '/^NUMPROC/ d' $target_file
+
+	echo "Unnecessary configuration variables deleted..."
+else
+	echo "No configuration variables deleted..."
+fi
+
+if grep -q CALENDAR "$target_file" ; then
+	echo "No configuration variables added..."
+else
+
+sed -i '/CHUNKINI =*/a \
+# Calendar used. LIST: standard, noleap \
+CALENDAR = standard \
+' $target_file
+
+sed -i '/TEMPLATE_NAME =*/a \
+\
+[project] \
+# Select project type. STRING = git, svn, local, none \
+# If PROJECT_TYPE is set to none, Autosubmit self-contained dummy templates will be used \
+PROJECT_TYPE = local \
+\
+# If PROJECT_TYPE is not git, no need to change \
+[git] \
+# Repository URL  STRING = "'"https://github.com/torvalds/linux.git"'" \
+PROJECT_ORIGIN = \
+# Select branch or tag, STRING, default = "'"master"'", help = {"'"master"'" (default), "'"develop"'", "'"v3.1b"'", ...} \
+PROJECT_BRANCH = \
+# type = STRING, default = leave empty, help = if model branch is a TAG leave empty \
+PROJECT_COMMIT = \
+\
+# If PROJECT_TYPE is not svn, no need to change \
+[svn] \
+# type = STRING, help = "'"https://svn.ec-earth.org/ecearth3"'" \
+PROJECT_URL = \
+# Select revision number. NUMERIC = 1778 \
+PROJECT_REVISION = \
+\
+# If PROJECT_TYPE is not local, no need to change \
+[local] \
+# type = STRING, help = /foo/bar/ecearth  \
+PROJECT_PATH = /esnas/autosubmit/'$copy_from'/git \
+\
+# If PROJECT_TYPE is none, no need to change \
+[project_files] \
+# Where is PROJECT CONFIGURATION file location relative to project root path \
+FILE_PROJECT_CONF = ../conf/expdef_'$target_expid'.conf \
+\
+[rerun] \
+# Is a rerun or not? [Default: Do set FALSE]. BOOLEAN = TRUE, FALSE \
+RERUN = FALSE \
+# If RERUN = TRUE then supply the list of chunks to rerun \
+# LIST = "[ 19601101 [ fc0 [1 2 3 4] fc1 [1] ] 19651101 [ fc0 [16-30] ] ]" \
+CHUNKLIST = \
+' $target_file
+
+echo "New needed configuration variables added succesfuly..."
+fi
+
+exit 0

+ 59 - 0
transfer/plot_job_info.py

@@ -0,0 +1,59 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import sys
+
+if len(sys.argv) != 2:
+	print("Wrong number of arguments.")
+	print("Usage: " + sys.argv[0] + " <file>")
+	sys.exit(1)
+
+t = np.genfromtxt(sys.argv[1], names=True, delimiter=',', dtype=None)
+
+MAX=25.0
+
+N = len(t['Run'])
+num_plots=int(np.ceil(N/MAX))
+
+ind = np.arange(int(MAX))  # the x locations for the groups
+width = 0.25       # the width of the bars
+
+def short_name(job_name):
+	n = job_name.split('_')
+	return n[1][:6] + "_" + n[2][2:] + "_" + n[4][:1] + "_" + n[3]
+	
+plt.close('all')
+fig = plt.figure()
+ax=[]
+
+expid = t['Name'][0].split('_')[0]
+
+t['Name'] = map(short_name, t['Name'])
+
+for plot in range(1,num_plots+1):
+	ax.append(fig.add_subplot(num_plots,1, plot))
+	l1=(plot-1)*MAX
+	l2=plot*MAX
+	if plot == num_plots:
+		ind = np.arange(len(t['Queue'][l1:l2]))
+	rects1 = ax[plot-1].bar(ind, t['Queue'][l1:l2], width, color='r')
+	rects2 = ax[plot-1].bar(ind+width, t['Run'][l1:l2], width, color='g')
+	rects3 = ax[plot-1].bar(ind+width*2, t['Wasted'][l1:l2], width, color='b')
+	rects4 = ax[plot-1].bar(ind+width*3, t['Failed'][l1:l2], width, color='y')
+	# ax[plot-1].set_ylabel('hours')
+	ax[plot-1].set_xticks(ind+width)
+	ax[plot-1].set_xticklabels( t['Name'][l1:l2], rotation='vertical')
+	box = ax[plot-1].get_position()
+	ax[plot-1].set_position([box.x0, box.y0, box.width * 0.8, box.height*0.8])
+	ax[plot-1].set_title(expid, fontsize=20, fontweight='bold')
+
+	
+	lgd = ax[plot-1].legend( (rects1[0], rects2[0], rects3[0], rects4[0]), ('Queued (h)', 'Run (h)', 'Wasted (h)', 'Failed (#)'), loc="upper left", bbox_to_anchor=(1,1) )
+
+fig.set_size_inches(14,num_plots*6)
+#plt.tight_layout()
+plt.savefig(sys.argv[1]+ '.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
+#plt.savefig('foo.pdf', bbox_inches='tight')
+
+# plt.show()
+
+

+ 98 - 0
transfer/prepare_atm_ic.cmd

@@ -0,0 +1,98 @@
+#!/bin/ksh
+#
+#@ shell            = /usr/bin/ksh
+#@ class            = ns
+#@ job_type         = serial
+#@ job_name         = prepare_atm_ic
+#@ output           = $(job_name).$(jobid).log
+#@ error            = $(job_name).$(jobid).log
+#@ notification     = error
+#@ resources        = ConsumableCpus(1) ConsumableMemory(1200mb)
+#@ wall_clock_limit = 24:00:00
+#@ queue
+#
+
+set -xuve
+date
+tar="/opt/freeware/bin/tar"
+
+# b0ga
+#grid=T255L62
+#expid=b0ga
+#src=ec:/xes/public/longrange/$expid
+
+# b0io
+grid=T255L91
+expid=b0io
+src=ec:/xes/public/longrange/$expid
+
+dst=ec:/c3m/ic/atmos/$grid/$expid
+cup=$(pwd)
+list=$cup/$expid
+
+# Gather the list of original ic's
+if [[ ! -a ${list} ]]; then
+ emkdir -p $dst
+ # b0gc
+ #els $src > $list
+ # b0io
+ for y in $(seq 1993 2009); do
+  months="05 11"
+  for m in $months; do
+   files=$(els $src/${y}${m}0100)
+   for f in $files; do
+    echo "${y}${m}0100/$f" >> $list
+   done
+  done
+ done  
+ #exit
+fi
+
+tmp=$TMPDIR/prepare_atm_ic_$expid
+rm -rf $tmp
+mkdir -p $tmp
+cd $tmp
+
+# Prepare and store the ic's after reformatting properly
+for l in $(cat $list); do
+ # start
+ ecp $src/$l .
+ ic=$(ls -1 *.tar | cut -d '.' -f 1) 
+ $tar -xvf $ic.tar
+ for f in $(ls -1 $ic); do
+  case $f in
+   ICMSH*T|shml*|pua*)  mv $ic/$f ICMSHXXXXINIT ;;
+   ICMGG*T|ggsfc*|psu*) mv $ic/$f ICMGGXXXXINIT ;;
+   ICMGG*UA|ggml*)      mv $ic/$f ICMGGXXXXINIUA ;;
+   ICMCL*T)             mv $ic/$f ICMCLXXXXINIT ;;
+  esac
+ done
+ case $expid in
+  b0ga)
+   exp=$(echo $ic | cut -d '_' -f 1)
+   mem=$(echo $ic | cut -d '_' -f 4)
+   ymds=$(echo $ic | cut -d '_' -f 5)
+  ;;
+  b0io)
+   exp=$(echo $ic | cut -d '_' -f 1)
+   mem=$(echo $ic | cut -d '_' -f 3)
+   ymds=$(echo $ic | cut -d '_' -f 4)
+  ;;
+  *)
+   echo "!!!caution!!!"
+   exit 1
+  ;;
+ esac
+ nic=${exp}_${mem}_${ymds} # new name of initial condition
+ $tar -cvzf $nic.tar.gz *XXXX*
+ ecp $nic.tar.gz $dst
+ # end
+ rm -rf *${ic}* *${nic}* *XXXX*
+ cat ${list} | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp ${list}
+done
+rm ${list}
+cd; rm -rf $tmp
+
+date
+

+ 53 - 0
transfer/prepare_atm_ic_i05e.sh

@@ -0,0 +1,53 @@
+#!/bin/bash
+# -----------------------------------------------------------------
+# Description: Short script to merge atmospheric intial conditions
+#              with a re-run of ERA-Land analysis. This script has
+#              been used to create the initial conditions i05e.
+#
+# Date: 7.8.2012
+#
+# Author: Omar Bellprat (omar.bellprat@ic3.cat)
+#
+#-------------------------------------------------------------------
+
+indir='/esnas/releases/ic/atmos/T255L91' # Dir of initical conditions (ic)
+landdir='/esnas/releases/ic/atmos/T255L91/g53c' # Dir of ERA-Land analysis
+refic='b0io' # ERA-Interim ic's
+newic='i05e' # New atmospheric ic's using land parameters from g53c
+
+smonths=("05" "11") # Start dates (May,Nov)
+
+param="32.128/33.128/39.128/40.128/41.128/42.128/139.128/141.128/170.128/183.128/235.128/236.128/238.128"  # List of parameters from ERA-Land
+param=("asn/rsn/swvl1/swvl2/swvl3/swvl4/stl1/sd/stl2/stl3/skt/stl4/tsn") # Short Names of parameters
+
+tmpdir='/scratch/${USER}/tmp_i05e'
+
+if [ ! -d "${tmpdir}" ]; then
+  mkdir -p ${tmpdir}
+fi
+
+if [ ! -d "${indir}/${newic}" ]; then
+  mkdir -p ${indir}/${newic}
+fi
+
+cd ${tmpdir}
+
+for i in $(seq 1993 2009) # Time window of ic's
+do
+    echo 'GRIB copying year' ${i}
+    for f in $(seq 0 9)
+    do
+    echo 'GRIB copying member fc'${f}
+
+        for j in "${smonths[@]}"
+        do  
+            rm ${tmpdir}/*
+            tar -xf ${indir}/${refic}/${refic}_fc${f}_${i}${j}0100.tar.gz -C .
+            grib_copy -w shortName!=$param ICMGGXXXXINIT ICMGGXXXXINIT_noland # Copy atmosperhic fields excluding land parameters
+            cat ${landdir}/land_ini-gr3c_${i}${j}01.grb >> ICMGGXXXXINIT_noland # Concatenate new land parameters to original ic's
+            mv ICMGGXXXXINIT_noland ICMGGXXXXINIT
+            tar -zcvf ${indir}/${newic}/${newic}_fc${f}_${i}${j}0100.tar.gz ICM*
+        done
+    done
+done
+rm -r ${tmpdir}

+ 55 - 0
transfer/prepare_atm_ic_i05f.sh

@@ -0,0 +1,55 @@
+#!/bin/bash
+# -----------------------------------------------------------------
+# Description: Short script to merge atmospheric intial conditions
+#              with a re-run of ERA-Land analysis. This script has
+#              been used to create the initial conditions i05f.
+#
+# Date: 7.8.2012
+#
+# Author: Omar Bellprat (omar.bellprat@ic3.cat)
+#
+#-------------------------------------------------------------------
+
+indir='/esnas/releases/ic/atmos/T511L91'  # Dir of initical conditions (ic)
+landdir='/esnas/releases/ic/atmos/T511L91/g53m' # Dir of ERA-Land analysis
+refic='b0it' # ERA-Interim ic's
+newic='i05f'  # New atmospheric ic's using land parameters from g53m
+
+smonths=("05" "11") # Start dates (May,Nov)
+
+param="32.128/33.128/39.128/40.128/41.128/42.128/139.128/141.128/170.128/183.128/235.128/236.128/238.128"  # List of parameters from ERA-Land
+param=("asn/rsn/swvl1/swvl2/swvl3/swvl4/stl1/sd/stl2/stl3/skt/stl4/tsn") # Short Names of parameters
+
+tmpdir='/scratch/${USER}/tmp'
+
+if [ ! -d "${tmpdir}" ]; then
+  mkdir -p ${tmpdir}
+fi
+
+if [ ! -d "${indir}/${newic}" ]; then
+  mkdir -p ${indir}/${newic}
+fi
+
+cd ${tmpdir}
+
+for i in $(seq 1993 2009)  # Time window of ic's
+do
+    echo 'GRIB copying year' ${i}
+    for f in $(seq 0 9)
+    do
+    echo 'GRIB copying member fc'${f}
+
+        for j in "${smonths[@]}"
+        do  
+            rm ${tmpdir}/*
+            tar -xf ${indir}/${refic}/${refic}_fc${f}_${i}${j}0100.tar.gz -C .
+            grib_copy -w shortName!=$param ICMGGXXXXINIT ICMGGXXXXINIT_noland # Copy atmosperhic fields excluding land parameters
+            cat ${landdir}/land_ini-gr3m_${i}${j}01.grb >> ICMGGXXXXINIT_noland # Concatenate new land parameters to original ic's
+            mv ICMGGXXXXINIT_noland ICMGGXXXXINIT
+            tar -zcvf ${indir}/${newic}/${newic}_fc${f}_${i}${j}0100.tar.gz ICM*
+        done
+    done
+done
+rm -r ${tmpdir}
+
+

+ 54 - 0
transfer/prepare_atm_ic_i05i.sh

@@ -0,0 +1,54 @@
+#!/bin/bash
+# -----------------------------------------------------------------
+# Description: Short script to merge atmospheric intial conditions
+#              with a re-run of ERA-Land analysis. This script has
+#              been used to create the initial conditions i05e.
+#
+# Date: 7.8.2012
+#
+# Author: Omar Bellprat (omar.bellprat@ic3.cat)
+#
+#-------------------------------------------------------------------
+
+indir='/esnas/releases/ic/atmos/T255L91'  # Dir of initical conditions (ic)
+landdir='/esnas/releases/ic/atmos/T255L91/g53c_clim'  # Dir of ERA-Land analysis
+refic='b0io' # ERA-Interim ic's
+newic='i05i' # New atmospheric ic's using land parameters from g53c
+
+smonths=("05" "11") # Start dates (May,Nov)
+
+param="32.128/33.128/39.128/40.128/41.128/42.128/139.128/141.128/170.128/183.128/235.128/236.128/238.128"  # List of parameters from ERA-Land
+param=("asn/rsn/swvl1/swvl2/swvl3/swvl4/stl1/sd/stl2/stl3/skt/stl4/tsn") # Short Names of parameters
+
+tmpdir='/scratch/${USER}/tmp_i05i'
+
+
+if [ ! -d "${tmpdir}" ]; then
+  mkdir -p ${tmpdir}
+fi
+
+if [ ! -d "${indir}/${newic}" ]; then
+  mkdir -p ${indir}/${newic}
+fi
+
+cd ${tmpdir}
+
+for i in $(seq 1993 2009) # Time window of ic's
+do
+    echo 'GRIB copying year' ${i}
+    for f in $(seq 0 9)
+    do
+    echo 'GRIB copying member fc'${f}
+
+        for j in "${smonths[@]}"
+        do  
+            rm ${tmpdir}/*
+            tar -xf ${indir}/${refic}/${refic}_fc${f}_${i}${j}0100.tar.gz -C .
+            grib_copy -w shortName!=$param ICMGGXXXXINIT ICMGGXXXXINIT_noland # Copy atmosperhic fields excluding land parameters
+            cat ${landdir}/clim_g53c_1993${j}01.grb >> ICMGGXXXXINIT_noland # Concatenate new land parameters to original ic's
+            mv ICMGGXXXXINIT_noland ICMGGXXXXINIT
+            tar -zcvf ${indir}/${newic}/${newic}_fc${f}_${i}${j}0100.tar.gz ICM*
+        done
+    done
+done
+rm -r ${tmpdir}

+ 53 - 0
transfer/prepare_atm_ic_i05j.sh

@@ -0,0 +1,53 @@
+#!/bin/bash
+# -----------------------------------------------------------------
+# Description: Short script to merge atmospheric intial conditions
+#              with a re-run of ERA-Land analysis. This script has
+#              been used to create the initial conditions i05j.
+#
+# Date: 7.8.2012
+#
+# Author: Omar Bellprat (omar.bellprat@ic3.cat)
+#
+#-------------------------------------------------------------------
+
+indir='/esnas/releases/ic/atmos/T511L91' # Dir of initical conditions (ic)
+landdir='/esnas/releases/ic/atmos/T511L91/g53m_clim' # Dir of ERA-Land analysis
+refic='b0it' # ERA-Interim ic's
+newic='i05j' # New atmospheric ic's using land parameters from g53c
+
+smonths=("05" "11") # Start dates (May,Nov)
+
+param="32.128/33.128/39.128/40.128/41.128/42.128/139.128/141.128/170.128/183.128/235.128/236.128/238.128"  # List of parameters from ERA-Land
+param=("asn/rsn/swvl1/swvl2/swvl3/swvl4/stl1/sd/stl2/stl3/skt/stl4/tsn") # Short Names of parameters
+
+tmpdir='/scratch/obellprat/tmp_i05j'
+
+if [ ! -d "${tmpdir}" ]; then
+  mkdir -p ${tmpdir}
+fi
+
+if [ ! -d "${indir}/${newic}" ]; then
+  mkdir -p ${indir}/${newic}
+fi
+
+cd ${tmpdir}
+
+for i in $(seq 1993 2009) # Time window of ic's
+do
+    echo 'GRIB copying year' ${i}
+    for f in $(seq 0 9)
+    do
+    echo 'GRIB copying member fc'${f}
+
+        for j in "${smonths[@]}"
+        do  
+            rm ${tmpdir}/*
+            tar -xf ${indir}/${refic}/${refic}_fc${f}_${i}${j}0100.tar.gz -C .
+            grib_copy -w shortName!=$param ICMGGXXXXINIT ICMGGXXXXINIT_noland # Copy atmosperhic fields excluding land parameters
+            cat ${landdir}/clim_g53m_1993${j}01.grb >> ICMGGXXXXINIT_noland # Concatenate new land parameters to original ic's
+            mv ICMGGXXXXINIT_noland ICMGGXXXXINIT
+            tar -zcvf ${indir}/${newic}/${newic}_fc${f}_${i}${j}0100.tar.gz ICM*
+        done
+    done
+done
+rm -r ${tmpdir}

+ 62 - 0
transfer/prepare_atm_nudging.cmd

@@ -0,0 +1,62 @@
+#!/bin/ksh
+#
+#@ shell            = /usr/bin/ksh
+#@ class            = ns
+#@ job_type         = serial
+#@ job_name         = prepare_atm_nudging
+#@ output           = $(job_name).$(jobid).log
+#@ error            = $(job_name).$(jobid).log
+#@ notification     = error
+#@ resources        = ConsumableCpus(1) ConsumableMemory(1200mb)
+#@ wall_clock_limit = 48:00:00
+#@ queue
+#
+
+set -xuve
+date
+tar="/opt/freeware/bin/tar"
+
+grid=T255L91
+expid=b0ir
+mem=fc0
+src=ec:/xes/public/longrange/$expid
+dst=ec:/c3m/nudging/atmos/$grid/$expid
+
+cup=$(pwd)
+list=$cup/$expid
+
+# Gather the list of original atm nudging ref. files
+if [[ ! -a ${list} ]]; then
+ emkdir -p $dst
+ els -1 $src/* | cut -d '_' -f 4 | cut -c 1-6 | uniq >> $list
+fi
+
+tmp=$TMPDIR/prepare_atm_nudging_$expid
+rm -rf $tmp
+mkdir -p $tmp
+cd $tmp
+
+# Prepare and store the nudging ref. files after reformatting properly: e.g star with b0ir_reference_fc0_201003mmdd_1.tar and end up b0ir_fc0_201003.tar.gz
+for l in $(cat $list); do
+ rm -rf final; mkdir final
+ ff=${expid}_${mem}_${l}.tar.gz
+ ecp "$src/${expid}_reference_${mem}_${l}*" .
+ for f in $(ls -1 ${expid}_reference_${mem}_${l}* | cut -d '.' -f 1); do
+  nf=$(echo $f | cut -d '_' -f 4)
+  $tar -xvf $f.tar
+  mv $f/sh* final/${nf}sh
+  mv $f/gg* final/${nf}gg
+ done
+ cd final
+ $tar -cvzf ../$ff *${l}*
+ emv ../$ff $dst
+ cd ..
+ rm -rf *_reference_*
+ cat ${list} | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp ${list}
+done
+rm ${list}
+cd; rm -rf $tmp
+
+date
+

+ 51 - 0
transfer/prepare_atm_perturb.cmd

@@ -0,0 +1,51 @@
+#!/bin/ksh
+#
+#@ shell            = /usr/bin/ksh
+#@ class            = ns
+#@ job_type         = serial
+#@ job_name         = prepare_atm_perturb
+#@ output           = $(job_name).$(jobid).log
+#@ error            = $(job_name).$(jobid).log
+#@ notification     = error
+#@ resources        = ConsumableCpus(1) ConsumableMemory(1200mb)
+#@ wall_clock_limit = 24:00:00
+#@ queue
+#
+
+set -xuve
+date
+tar="/opt/freeware/bin/tar"
+
+grid=T255L91
+expid=e00l
+src=ec:/c3l/errfiles/$expid
+dst=ec:/c3m/perturb/atmos/$grid/$expid
+
+cup=$(pwd)
+list=$cup/$expid
+
+# Gather the list of original perturb files
+if [[ ! -a ${list} ]]; then
+ emkdir -p $dst
+ els -1 $src/* | cut -d '_' -f 4 | cut -d '.' -f 1 | sort | uniq >> $list
+fi
+
+tmp=$TMPDIR/prepare_atm_perturb_$expid
+rm -rf $tmp
+mkdir -p $tmp
+cd $tmp
+
+# Prepare and store the perturb file after reformatting properly
+for l in $(cat $list); do
+ ecp $src/??_inierr_${expid}_${l}.grb .
+ $tar -cvzf ${expid}_${l}.tar.gz ??_inierr_${expid}_${l}.grb
+ emv ${expid}_${l}.tar.gz $dst
+ rm -f ??_inierr_${expid}_${l}.grb
+ cat ${list} | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp ${list}
+done
+rm ${list}
+cd; rm -rf $tmp
+
+date
+

+ 39 - 0
transfer/prepare_oce_ic.sh

@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+
+set -xuve
+date
+
+grid=ORCA1L46
+expid=m01x
+src=/esnas/releases/ic/ocean/$grid/$expid
+dst=/esnas/releases/ic/ocean/$grid/$expid.new
+ref=/esnas/exp/MERCATOR/GLORYS2V1/restarts/ocean/$grid/restart_glorys2v1_oce_19930504_$grid.nc
+tmpdir=/scratch/masif
+mkdir -p $dst
+
+list=$PWD/$expid
+
+if [[ ! -a ${list} ]]; then
+ ls -1 $src | cut -d '.' -f 1 | sort | uniq >> $list
+fi
+
+tmp=$tmpdir/prepare_oce_ic_$expid
+rm -rf $tmp
+mkdir -p $tmp
+cd $tmp
+
+for l in $(cat $list); do
+ cp -p $src/$l.nc.gz .
+ gunzip $l.nc.gz
+ ncwa -O -a t $l.nc $l.nc # removing t
+ ncks -A -v z $ref $l.nc # appending z from ref.
+ gzip -9 -f $l.nc
+ mv $l.nc.gz $dst
+ cat ${list} | sed -e '1d' > ${list}.tmp
+ mv ${list}.tmp ${list}
+done
+rm ${list}
+cd; rm -rf $tmp
+
+date

+ 45 - 0
transfer/update_ic.sh

@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# nohup ./update_ic.sh ithaca atmos T255L62 b0ga >& update.log &
+#
+set -xuve
+date
+
+src=/esnas/releases/ic
+plt=$1
+typ=$2
+grd=$3
+ver=$4
+
+case $plt in
+ ithaca)   dst=/share/data/cfu/ic ;;
+ mn-*)     dst=/gpfs/scratch/ecm86/ecm86010/ic ;;
+ ecmwf)    dst=ec:/c3m/ic ;;
+ ht-*)     dst=/work/pr1u1011/pr1u1011/pr1e1001/ic ;;
+ lindgren) dst=/cfs/klemming/nobackup/a/asifsami/ic ;;
+ jaguar)   dst= ;;
+ ar-*)     dst=/work/pr1u1011/pr1u1011/shared/ic ;;
+esac
+
+if [[ $plt != ecmwf ]]; then
+ ssh $plt mkdir -p $dst/$typ/$grd
+ rsync -acrv $src/$typ/$grd/$ver ${plt}:${dst}/$typ/$grd
+else
+ set +e
+ ecaccess-file-mkdir $dst/$typ
+ ecaccess-file-mkdir $dst/$typ/$grd
+ ecaccess-file-mkdir $dst/$typ/$grd/$ver
+ set -e
+ list=/tmp/${typ}-${grd}-${ver}
+ if [[ ! -a $list ]]; then
+  ls -1 $src/$typ/$grd/$ver > $list
+ fi
+ for l in $(cat $list); do
+  ecaccess-file-put $src/$typ/$grd/$ver/$l $dst/$typ/$grd/$ver/$l
+  cat $list | sed -e '1d' > ${list}.tmp
+  mv ${list}.tmp $list
+ done
+ cat $list; rm $list
+fi
+
+date

Some files were not shown because too many files changed in this diff