diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 0efb18c..981129e 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -33,11 +33,9 @@ jobs: - name: Test with pytest run: | pip install . - python setup.py pytest --addopts "--cov ." -# - name: Upload coverage to Codecov -# uses: codecov/codecov-action@v4 -# env: -# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} -# with: -# fail_ci_if_error: true -# verbose: true + coverage run --branch -m pytest tests + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v4.0.1 + with: + token: ${{ secrets.CODECOV_TOKEN }} + slug: jmbhughes/overlappogram diff --git a/README.md b/README.md index 7d19200..90d23de 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ Overlappogram is a Python package for inverting overlappogram observations of the Sun, for examples MaGIXS observations or ECCCO observations. +This code was originally written by Dyana Beabout. + ## How to Use `python run_multiion_inversion.py ./path/to/image.fits ./path/to/config.toml` diff --git a/configs/old/img_norm.toml b/configs/old/img_norm.toml index 83787a9..9b446ad 100644 --- a/configs/old/img_norm.toml +++ b/configs/old/img_norm.toml @@ -30,7 +30,7 @@ rhos = [1.0] # Inversion Time = 75.56018900871277 # my no shared memory thread pool executor with 11 max workers # Inversion Time = 77.73930597305298 # my process pool executor with copy on write for global with 11 max workers # Inversion Time = 88.20389604568481 # same as above with numpy having 2 threads -# Inversion Time = 42.56954908370972 # same as above wtih numpy having 1 thread and selection='cyclic' +# Inversion Time = 42.56954908370972 # same as above with numpy having 1 thread and selection='cyclic' # changing alpha can dramatically slow things # set alpha = 0.1 then Inversion Time = 1208.5097889900208 diff --git a/docs/conf.py b/docs/conf.py index f567f52..20dad9a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -6,23 +6,22 @@ # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -project = 'overlappogram' -copyright = '2024, Dyana Beabout, J. Marcus Hughes' -author = 'Dyana Beabout, J. Marcus Hughes' -release = '0.0.1' +project = "overlappogram" +copyright = "2024, Dyana Beabout, J. Marcus Hughes" +author = "Dyana Beabout, J. Marcus Hughes" +release = "0.0.1" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [] -templates_path = ['_templates'] -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'alabaster' -html_static_path = ['_static'] +html_theme = "alabaster" +html_static_path = ["_static"] diff --git a/experiment_scripts/ECCCO_multiion_inversion.py b/experiment_scripts/ECCCO_multiion_inversion.py index 09086d5..c9bcebe 100644 --- a/experiment_scripts/ECCCO_multiion_inversion.py +++ b/experiment_scripts/ECCCO_multiion_inversion.py @@ -10,14 +10,14 @@ import numpy as np from astropy.io import fits -#from overlappogram.inversion_field_angles_logts_ions import Inversion +# from overlappogram.inversion_field_angles_logts_ions import Inversion from sklearn.linear_model import ElasticNet as enet from magixs_data_products import MaGIXSDataProducts from overlappogram.elasticnet_model import ElasticNetModel as model from overlappogram.inversion_field_angles import Inversion -'''def calculate_weights(data, weights, sig_read, exp_time): +"""def calculate_weights(data, weights, sig_read, exp_time): # Read image image_hdul = fits.open(data) image = image_hdul[0].data @@ -31,90 +31,107 @@ hdu = fits.PrimaryHDU(data = sample_weights) hdulist = fits.HDUList([hdu]) hdulist.writeto(sample_weight_file, overwrite=True) - return sample_weight_file''' - -if __name__ == '__main__': - - channel = 'all' - abund='feldman' - fwhm='0' - #psfs=[2,3,4,5] - #psfs=[1,2,3,4,5] - #psfs=[3,5] - psfs=[4] + return sample_weight_file""" + +if __name__ == "__main__": + channel = "all" + abund = "feldman" + fwhm = "0" + # psfs=[2,3,4,5] + # psfs=[1,2,3,4,5] + # psfs=[3,5] + psfs = [4] for psf in psfs: + mdp = MaGIXSDataProducts() + # PSA for magixs1 + # Response function file path. + response_dir = "data/" - mdp=MaGIXSDataProducts() - #PSA for magixs1 - - # Response function file path. - response_dir ='data/' - - # Response file. - #cube_file = response_dir + 'eccco_is_response_feldman_m_el_with_tables_lw_pm1230_'+str(psf)+'pix.fits' + # Response file. + # cube_file = response_dir + 'eccco_is_response_feldman_m_el_with_tables_lw_pm1230_'+str(psf)+'pix.fits' # cube_file = response_dir +'D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_slw_coopersun.fits' - cube_file = response_dir + 'D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_lw_coopersun.fits' - #cube_file = response_dir + "response_img_normalized.fits" - #cube_file = response_dir + 'D1Aug2023_eccco_response_feldman_m_el_with_tables_lw.fits' - #cube_file = response_dir + 'D14Feb2024_eccco_response_feldman_m_el_with_tables_lw.fits' - - #weight_file = response_dir + 'oawave_eccco_is_lw.txt' - - #Data directory and data file - data_dir ='data/' - # summed_image = data_dir + 'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el_decon.fits' - summed_image = data_dir+'eccco_is_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el.fits' - #summed_img = data_dir+'forwardmodel_img_normalized.fits' - sample_weights_data = data_dir +'eccco_is_lw_forwardmodel_sample_weights_psf'+str(psf)+'pix_el.fits' - #sample_weights_data = data_dir + 'weights_img_normalized.fits' - #summed_image = data_dir+'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el.fits' - #sample_weights_data = data_dir +'eccco_lw_forwardmodel_sample_weights_psf'+str(psf)+'pix_el.fits' - - #The inversion directory is where the output will be written - inversion_dir = 'output/' - - #Read in response, + cube_file = ( + response_dir + + "D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_lw_coopersun.fits" + ) + # cube_file = response_dir + "response_img_normalized.fits" + # cube_file = response_dir + 'D1Aug2023_eccco_response_feldman_m_el_with_tables_lw.fits' + # cube_file = response_dir + 'D14Feb2024_eccco_response_feldman_m_el_with_tables_lw.fits' + + # weight_file = response_dir + 'oawave_eccco_is_lw.txt' + + # Data directory and data file + data_dir = "data/" + # summed_image = data_dir + 'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el_decon.fits' + summed_image = ( + data_dir + + "eccco_is_lw_forwardmodel_thermal_response_psf" + + str(psf) + + "pix_el.fits" + ) + # summed_img = data_dir+'forwardmodel_img_normalized.fits' + sample_weights_data = ( + data_dir + + "eccco_is_lw_forwardmodel_sample_weights_psf" + + str(psf) + + "pix_el.fits" + ) + # sample_weights_data = data_dir + 'weights_img_normalized.fits' + # summed_image = data_dir+'eccco_lw_forwardmodel_thermal_response_psf'+str(psf)+'pix_el.fits' + # sample_weights_data = data_dir +'eccco_lw_forwardmodel_sample_weights_psf'+str(psf)+'pix_el.fits' + + # The inversion directory is where the output will be written + inversion_dir = "output/" + + # Read in response, rsp_func_hdul = fits.open(cube_file) solution_fov_width = 2 detector_row_range = [450, 455] - #detector_row_range = None + # detector_row_range = None field_angle_range = [-2160, 2160] - #field_angle_range = [-1260,1260] + # field_angle_range = [-1260,1260] # field_angle_range = None - rsp_dep_name = 'logt' + rsp_dep_name = "logt" rsp_dep_list = np.round((np.arange(57, 78, 1) / 10.0), decimals=1) - #smooth_over = 'spatial' - smooth_over = 'dependence' - - inversion = Inversion(rsp_func_cube_file=cube_file, - rsp_dep_name=rsp_dep_name, rsp_dep_list=rsp_dep_list, - solution_fov_width=solution_fov_width,smooth_over=smooth_over,field_angle_range=field_angle_range) - - #inversion.initialize_input_data(summed_image)#,image_mask_file) - #sample_weights_data = calculate_weights(summed_image, weight_file, 8., 1.) - #print(sample_weights_data) - #syntax (summed image, mask image, sample weights image) + # smooth_over = 'spatial' + smooth_over = "dependence" + + inversion = Inversion( + rsp_func_cube_file=cube_file, + rsp_dep_name=rsp_dep_name, + rsp_dep_list=rsp_dep_list, + solution_fov_width=solution_fov_width, + smooth_over=smooth_over, + field_angle_range=field_angle_range, + ) + + # inversion.initialize_input_data(summed_image)#,image_mask_file) + # sample_weights_data = calculate_weights(summed_image, weight_file, 8., 1.) + # print(sample_weights_data) + # syntax (summed image, mask image, sample weights image) inversion.initialize_input_data(summed_image, None, sample_weights_data) - #new forweights + # new forweights # (photon convert file name:str, sigma read:float,exptime:float) - #error_parameters=(response_dir + 'oawave_eccco_is_lw.txt',8.,1.) + # error_parameters=(response_dir + 'oawave_eccco_is_lw.txt',8.,1.) alphas = [5] - rhos = [.1] + rhos = [0.1] for rho in rhos: for alpha in alphas: - enet_model = enet(alpha=alpha, - l1_ratio=rho, - max_iter=100000, - precompute=True, - positive=True, - fit_intercept=False, - selection='cyclic') + enet_model = enet( + alpha=alpha, + l1_ratio=rho, + max_iter=100000, + precompute=True, + positive=True, + fit_intercept=False, + selection="cyclic", + ) inv_model = model(enet_model) # regressor = SGDRegressor(penalty='elasticnet', alpha=alpha, l1_ratio=rho, fit_intercept=False) @@ -126,10 +143,20 @@ basename = os.path.splitext(os.path.basename(summed_image))[0] start = time.time() - inversion.multiprocessing_invert(inv_model, inversion_dir, output_file_prefix=basename, - #inversion.invert(inv_model, inversion_dir, output_file_prefix=basename, - output_file_postfix='x'+str(solution_fov_width)+'_'+str(rho*10)+'_'+str(alpha)+'_wpsf' , - detector_row_range=detector_row_range, - score=False) + inversion.multiprocessing_invert( + inv_model, + inversion_dir, + output_file_prefix=basename, + # inversion.invert(inv_model, inversion_dir, output_file_prefix=basename, + output_file_postfix="x" + + str(solution_fov_width) + + "_" + + str(rho * 10) + + "_" + + str(alpha) + + "_wpsf", + detector_row_range=detector_row_range, + score=False, + ) end = time.time() print("Inversion Time =", end - start) diff --git a/experiment_scripts/compare_spectrally_pure.py b/experiment_scripts/compare_spectrally_pure.py index fa382ac..b6dd63f 100644 --- a/experiment_scripts/compare_spectrally_pure.py +++ b/experiment_scripts/compare_spectrally_pure.py @@ -3,16 +3,19 @@ import matplotlib.pyplot as plt import numpy as np from astropy.io import fits -from scipy.signal import convolve2d from scipy.ndimage import gaussian_filter output_dir = "output/photons/plots/" -ground_truth_path = "output/photons/ground_truth_spectrally_pure_data_cube_reshaped.fits" +ground_truth_path = ( + "output/photons/ground_truth_spectrally_pure_data_cube_reshaped.fits" +) dep_ref = "data/ECCCO_speedtest_runs/eccco_sp_puremaps_psf4pix_inelectrons_cm3perspersr_with_tables.fits" -output_paths = ["output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.1_wpsf.fits", - "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.01_wpsf.fits", - "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.2_wpsf.fits", - "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.005_wpsf.fits"] +output_paths = [ + "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.1_wpsf.fits", + "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.01_wpsf.fits", + "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.2_wpsf.fits", + "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_spectrally_pure_data_cube_x2_1.0_0.005_wpsf.fits", +] with fits.open(dep_ref) as hdul: dep_table = hdul[2].data @@ -31,12 +34,17 @@ # initial image comparison fig, axs = plt.subplots(ncols=3, figsize=(30, 10)) - im1 = axs[0].imshow(gt, vmin=vmin, vmax=vmax, origin='lower') + im1 = axs[0].imshow(gt, vmin=vmin, vmax=vmax, origin="lower") axs[0].set_title("Ground truth") - axs[1].imshow(output_data[i], vmin=vmin, vmax=vmax, origin='lower') + axs[1].imshow(output_data[i], vmin=vmin, vmax=vmax, origin="lower") axs[1].set_title("Output") - im2 = axs[2].imshow(gt - output_data[i], - origin='lower', cmap='seismic', vmin=-interval, vmax=interval) + im2 = axs[2].imshow( + gt - output_data[i], + origin="lower", + cmap="seismic", + vmin=-interval, + vmax=interval, + ) axs[2].set_title("Ground truth - output") for ax in axs: ax.set_aspect(0.5) @@ -48,8 +56,8 @@ # scatter plots fig, ax = plt.subplots() - ax.plot(gt.flatten(), output_data[i].flatten(), '.', ms=1) - ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], 'r-') + ax.plot(gt.flatten(), output_data[i].flatten(), ".", ms=1) + ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], "r-") ax.set_title(dep_table[i][1]) ax.set_xlabel("Ground truth") ax.set_ylabel("Unfolded recreation") @@ -59,8 +67,8 @@ # histogram Z, xedges, yedges = np.histogram2d(gt.flatten(), output_data[i].flatten(), 50) fig, ax = plt.subplots() - im = ax.pcolormesh(xedges, yedges, np.log10(Z.T + 1E-3)) - ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], 'r-') + im = ax.pcolormesh(xedges, yedges, np.log10(Z.T + 1e-3)) + ax.plot([0, np.max(ground_truth[i])], [0, np.max(ground_truth[i])], "r-") ax.set_title(dep_table[i][1]) fig.colorbar(im, ax=ax) fig.savefig(output_dir + f"histogram_{dep_table[i][1]}_{short_path}.png") diff --git a/experiment_scripts/create_spectrally_pure_images.py b/experiment_scripts/create_spectrally_pure_images.py deleted file mode 100644 index 7eb4822..0000000 --- a/experiment_scripts/create_spectrally_pure_images.py +++ /dev/null @@ -1,18 +0,0 @@ -import numpy as np - -from magixs_data_products import MaGIXSDataProducts - -dir_path = "output/test/" - -mdp = MaGIXSDataProducts() -# image_list = ["output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_em_data_cube_x2_1.0_0.2_wpsf.fits", -# "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_em_data_cube_x2_1.0_0.1_wpsf.fits", -# "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_em_data_cube_x2_1.0_0.01_wpsf.fits", -# "output/photons/combined_ECCCO_trim_sw_lw_s_i_scaled_em_data_cube_x2_1.0_0.005_wpsf.fits"] -#image_list = ["/Users/jhughes/Desktop/ECCCO_unfolding_share/output/combined_ECCCO_trim_sw_lw_s_i_scaled_em_data_cube_x2_1.0_0.2_wpsf.fits"] -image_list = ["data/ECCCO_speedtest_runs/combined_ECCCO_sw_lw_s_i_scaled_em_data_cube_x2_1.0_0.005_wpsf.fits"] -# gnt_file = "data/ECCCO_speedtest_runs/master_gnt_eccco_inelectrons_cm3perspersr_with_tables.fits" -gnt_file = "data/ECCCO_speedtest_runs/master_gnt_eccco_inphotons_cm3persperpix_with_tables.fits" - -rsp_dep_list = [5.7, 5.8, 5.9, 6.0 , 6.1, 6.2, 6.3, 6.4, 6.5, 6.6, 6.7, 6.8] #np.round((np.arange(57, 68+1, 1) / 10.0), decimals=1) -mdp.create_level2_0_spectrally_pure_images(image_list, gnt_file, rsp_dep_list, dir_path) diff --git a/experiment_scripts/restart_after_convergence_warm.py b/experiment_scripts/restart_after_convergence_warm.py index b59b863..bd59b8d 100644 --- a/experiment_scripts/restart_after_convergence_warm.py +++ b/experiment_scripts/restart_after_convergence_warm.py @@ -10,49 +10,53 @@ ALPHA = 0.1 -if __name__ == '__main__': +if __name__ == "__main__": with open("img_norm.toml") as f: config = toml.load(f) - response_cube = fits.getdata(config['paths']['response']) - inversion = Inversion(rsp_func_cube_file=config['paths']['response'], - rsp_dep_name=config['settings']['response_dependency_name'], - rsp_dep_list=config['settings']['response_dependency_list'], - solution_fov_width=config['settings']['solution_fov_width'], - smooth_over=config['settings']['smooth_over'], - field_angle_range=config['settings']['field_angle_range']) - inversion.initialize_input_data(config['paths']['image'], - None, - config['paths']['weights']) + response_cube = fits.getdata(config["paths"]["response"]) + inversion = Inversion( + rsp_func_cube_file=config["paths"]["response"], + rsp_dep_name=config["settings"]["response_dependency_name"], + rsp_dep_list=config["settings"]["response_dependency_list"], + solution_fov_width=config["settings"]["solution_fov_width"], + smooth_over=config["settings"]["smooth_over"], + field_angle_range=config["settings"]["field_angle_range"], + ) + inversion.initialize_input_data( + config["paths"]["image"], None, config["paths"]["weights"] + ) # fits.writeto("response_matrix.fits", inversion.get_response_function()) X_d = inversion.get_response_function() X_shape = inversion.get_response_function().shape - X = RawArray('d', X_shape[0] * X_shape[1]) + X = RawArray("d", X_shape[0] * X_shape[1]) X_np = np.frombuffer(X).reshape(X_shape) np.copyto(X_np, inversion.get_response_function()) - overlappogram_d = fits.getdata(config['paths']['image']) + overlappogram_d = fits.getdata(config["paths"]["image"]) overlappogram_shape = overlappogram_d.shape - overlappogram = RawArray('d', overlappogram_shape[0] * overlappogram_shape[1]) + overlappogram = RawArray("d", overlappogram_shape[0] * overlappogram_shape[1]) overlappogram_np = np.frombuffer(overlappogram).reshape(overlappogram_shape) np.copyto(overlappogram_np, overlappogram_d) - weights_d = fits.getdata(config['paths']['weights']) + weights_d = fits.getdata(config["paths"]["weights"]) weights_shape = weights_d.shape - weights = RawArray('d', weights_shape[0] * weights_shape[1]) + weights = RawArray("d", weights_shape[0] * weights_shape[1]) weights_np = np.frombuffer(weights).reshape(weights_shape) np.copyto(weights_np, weights_d) - enet_model = ElasticNet(alpha=ALPHA, - l1_ratio=0.1, - max_iter=10_000, - precompute=False, - positive=True, - copy_X=False, - fit_intercept=False, - warm_start=True, - selection='cyclic') + enet_model = ElasticNet( + alpha=ALPHA, + l1_ratio=0.1, + max_iter=10_000, + precompute=False, + positive=True, + copy_X=False, + fit_intercept=False, + warm_start=True, + selection="cyclic", + ) i = 700 start = time.time() diff --git a/experiment_scripts/run_multiion_inversion_parallel.py b/experiment_scripts/run_multiion_inversion_parallel.py index 5e54ddc..1037893 100644 --- a/experiment_scripts/run_multiion_inversion_parallel.py +++ b/experiment_scripts/run_multiion_inversion_parallel.py @@ -11,7 +11,7 @@ from overlappogram.inversion_field_angles import Inversion ALPHA = 5 -SELECTION = 'cyclic' +SELECTION = "cyclic" # A global dictionary storing the variables passed from the initializer. var_dict = {} @@ -19,73 +19,80 @@ with open("img_norm.toml") as f: config = toml.load(f) -response_cube = fits.getdata(config['paths']['response']) -inversion = Inversion(rsp_func_cube_file=config['paths']['response'], - rsp_dep_name=config['settings']['response_dependency_name'], - rsp_dep_list=config['settings']['response_dependency_list'], - solution_fov_width=config['settings']['solution_fov_width'], - smooth_over=config['settings']['smooth_over'], - field_angle_range=config['settings']['field_angle_range']) -inversion.initialize_input_data(config['paths']['image'], - None, - config['paths']['weights']) +response_cube = fits.getdata(config["paths"]["response"]) +inversion = Inversion( + rsp_func_cube_file=config["paths"]["response"], + rsp_dep_name=config["settings"]["response_dependency_name"], + rsp_dep_list=config["settings"]["response_dependency_list"], + solution_fov_width=config["settings"]["solution_fov_width"], + smooth_over=config["settings"]["smooth_over"], + field_angle_range=config["settings"]["field_angle_range"], +) +inversion.initialize_input_data( + config["paths"]["image"], None, config["paths"]["weights"] +) # fits.writeto("response_matrix.fits", inversion.get_response_function()) X_d = inversion.get_response_function() X_shape = inversion.get_response_function().shape -X = RawArray('d', X_shape[0] * X_shape[1]) +X = RawArray("d", X_shape[0] * X_shape[1]) X_np = np.frombuffer(X).reshape(X_shape) np.copyto(X_np, inversion.get_response_function()) -overlappogram_d = fits.getdata(config['paths']['image']) +overlappogram_d = fits.getdata(config["paths"]["image"]) overlappogram_shape = overlappogram_d.shape -overlappogram = RawArray('d', overlappogram_shape[0] * overlappogram_shape[1]) +overlappogram = RawArray("d", overlappogram_shape[0] * overlappogram_shape[1]) overlappogram_np = np.frombuffer(overlappogram).reshape(overlappogram_shape) np.copyto(overlappogram_np, overlappogram_d) -weights_d = fits.getdata(config['paths']['weights']) +weights_d = fits.getdata(config["paths"]["weights"]) weights_shape = weights_d.shape -weights = RawArray('d', weights_shape[0] * weights_shape[1]) +weights = RawArray("d", weights_shape[0] * weights_shape[1]) weights_np = np.frombuffer(weights).reshape(weights_shape) np.copyto(weights_np, weights_d) + def init_worker(X, X_shape, overlappogram, overlappogram_shape, weights, weights_shape): # Using a dictionary is not strictly necessary. You can also # use global variables. - var_dict['X'] = X - var_dict['X_shape'] = X_shape - var_dict['overlappogram'] = overlappogram - var_dict['overlappogram_shape'] = overlappogram_shape - var_dict['weights'] = weights - var_dict['weights_shape'] = weights_shape + var_dict["X"] = X + var_dict["X_shape"] = X_shape + var_dict["overlappogram"] = overlappogram + var_dict["overlappogram_shape"] = overlappogram_shape + var_dict["weights"] = weights + var_dict["weights_shape"] = weights_shape def init_worker2(): # Using a dictionary is not strictly necessary. You can also # use global variables. - var_dict['X'] = X - var_dict['X_shape'] = X_shape - var_dict['overlappogram'] = overlappogram - var_dict['overlappogram_shape'] = overlappogram_shape - var_dict['weights'] = weights - var_dict['weights_shape'] = weights_shape + var_dict["X"] = X + var_dict["X_shape"] = X_shape + var_dict["overlappogram"] = overlappogram + var_dict["overlappogram_shape"] = overlappogram_shape + var_dict["weights"] = weights + var_dict["weights_shape"] = weights_shape def worker_func(i): print(i) # Simply computes the sum of the i-th row of the input matrix X - response_matrix = np.frombuffer(var_dict['X']).reshape(var_dict['X_shape']) - overlappogram = np.frombuffer(var_dict['overlappogram']).reshape(var_dict['overlappogram_shape']) - weights = np.frombuffer(var_dict['weights']).reshape(var_dict['weights_shape']) - - enet_model = ElasticNet(alpha=ALPHA, - l1_ratio=0.1, - max_iter=10_000, - precompute=False, - positive=True, - copy_X=False, - fit_intercept=False, - selection=SELECTION) + response_matrix = np.frombuffer(var_dict["X"]).reshape(var_dict["X_shape"]) + overlappogram = np.frombuffer(var_dict["overlappogram"]).reshape( + var_dict["overlappogram_shape"] + ) + weights = np.frombuffer(var_dict["weights"]).reshape(var_dict["weights_shape"]) + + enet_model = ElasticNet( + alpha=ALPHA, + l1_ratio=0.1, + max_iter=10_000, + precompute=False, + positive=True, + copy_X=False, + fit_intercept=False, + selection=SELECTION, + ) enet_model.fit(response_matrix, overlappogram[i, :], sample_weight=weights[i, :]) data_out = enet_model.predict(response_matrix) em = enet_model.coef_ @@ -100,18 +107,18 @@ def worker_func_no_init(i): # overlappogram = np.frombuffer(var_dict['overlappogram']).reshape(var_dict['overlappogram_shape']) # weights = np.frombuffer(var_dict['weights']).reshape(var_dict['weights_shape']) - enet_model = ElasticNet(alpha=ALPHA, - l1_ratio=0.1, - max_iter=10_000, - precompute=False, - positive=True, - copy_X=False, - fit_intercept=False, - selection=SELECTION) - - enet_model.fit(X_d, - overlappogram_d[i, :], - sample_weight=weights_d[i, :]) + enet_model = ElasticNet( + alpha=ALPHA, + l1_ratio=0.1, + max_iter=10_000, + precompute=False, + positive=True, + copy_X=False, + fit_intercept=False, + selection=SELECTION, + ) + + enet_model.fit(X_d, overlappogram_d[i, :], sample_weight=weights_d[i, :]) data_out = enet_model.predict(X_d) em = enet_model.coef_ return em, data_out @@ -123,6 +130,7 @@ def worker_func_no_init(i): # # return glm.beta_, yhat + def worker_func_no_init_pass(i, X_d, overlappogram_d, weights_d): print(i) # Simply computes the sum of the i-th row of the input matrix X @@ -131,28 +139,29 @@ def worker_func_no_init_pass(i, X_d, overlappogram_d, weights_d): # overlappogram = np.frombuffer(var_dict['overlappogram']).reshape(var_dict['overlappogram_shape']) # weights = np.frombuffer(var_dict['weights']).reshape(var_dict['weights_shape']) - enet_model = ElasticNet(alpha=ALPHA, - l1_ratio=0.1, - max_iter=10_000, - precompute=False, - positive=True, - copy_X=False, - fit_intercept=False, - selection=SELECTION) - - enet_model.fit(X_d, - overlappogram_d[i, :], - sample_weight=weights_d[i, :]) + enet_model = ElasticNet( + alpha=ALPHA, + l1_ratio=0.1, + max_iter=10_000, + precompute=False, + positive=True, + copy_X=False, + fit_intercept=False, + selection=SELECTION, + ) + + enet_model.fit(X_d, overlappogram_d[i, :], sample_weight=weights_d[i, :]) data_out = enet_model.predict(X_d) em = enet_model.coef_ return em, data_out + # We need this check for Windows to prevent infinitely spawning new child # processes. -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Inverts overlappograms') +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Inverts overlappograms") - parser.add_argument('config') + parser.add_argument("config") args = parser.parse_args() # read config @@ -195,17 +204,17 @@ def worker_func_no_init_pass(i, X_d, overlappogram_d, weights_d): initargs = (X, X_shape, overlappogram, overlappogram_shape, weights, weights_shape) start = time.time() - #with Pool(processes=11, initializer=init_worker, initargs=initargs) as pool: + # with Pool(processes=11, initializer=init_worker, initargs=initargs) as pool: # with ThreadPool(11, initializer=init_worker, initargs=initargs) as pool: # result = pool.map(worker_func, range(config['settings']['detector_row_range'][0], # config['settings']['detector_row_range'][1])) with concurrent.futures.ProcessPoolExecutor(max_workers=11) as executor: - # with concurrent.futures.ProcessPoolExecutor(max_workers=11, - # initializer=init_worker, - # initargs=(X, X_shape, overlappogram, - # overlappogram_shape, weights, weights_shape)) as executor: - # # Start the load operations and mark each future with its URL + # with concurrent.futures.ProcessPoolExecutor(max_workers=11, + # initializer=init_worker, + # initargs=(X, X_shape, overlappogram, + # overlappogram_shape, weights, weights_shape)) as executor: + # # Start the load operations and mark each future with its URL # future_to_url = [executor.submit(worker_func, row) # for row in range(config['settings']['detector_row_range'][0], # config['settings']['detector_row_range'][1])] @@ -213,9 +222,13 @@ def worker_func_no_init_pass(i, X_d, overlappogram_d, weights_d): # future.result() # out = executor.map(worker_func_no_init, range(config['settings']['detector_row_range'][0], # config['settings']['detector_row_range'][1])) - future_to_url = [executor.submit(worker_func_no_init, row) - for row in range(config['settings']['detector_row_range'][0], - config['settings']['detector_row_range'][1])] + future_to_url = [ + executor.submit(worker_func_no_init, row) + for row in range( + config["settings"]["detector_row_range"][0], + config["settings"]["detector_row_range"][1], + ) + ] for future in concurrent.futures.as_completed(future_to_url): future.result() diff --git a/experiment_scripts/search.py b/experiment_scripts/search.py index 34aeb3c..0ee8f84 100644 --- a/experiment_scripts/search.py +++ b/experiment_scripts/search.py @@ -9,36 +9,39 @@ with open("img_norm.toml") as f: config = toml.load(f) -response_cube = fits.getdata(config['paths']['response']) -inversion = Inversion(rsp_func_cube_file=config['paths']['response'], - rsp_dep_name=config['settings']['response_dependency_name'], - rsp_dep_list=config['settings']['response_dependency_list'], - solution_fov_width=config['settings']['solution_fov_width'], - smooth_over=config['settings']['smooth_over'], - field_angle_range=config['settings']['field_angle_range']) -inversion.initialize_input_data(config['paths']['image'], - None, - config['paths']['weights']) +response_cube = fits.getdata(config["paths"]["response"]) +inversion = Inversion( + rsp_func_cube_file=config["paths"]["response"], + rsp_dep_name=config["settings"]["response_dependency_name"], + rsp_dep_list=config["settings"]["response_dependency_list"], + solution_fov_width=config["settings"]["solution_fov_width"], + smooth_over=config["settings"]["smooth_over"], + field_angle_range=config["settings"]["field_angle_range"], +) +inversion.initialize_input_data( + config["paths"]["image"], None, config["paths"]["weights"] +) X_d = inversion.get_response_function() X_shape = inversion.get_response_function().shape -overlappogram_d = fits.getdata(config['paths']['image']) +overlappogram_d = fits.getdata(config["paths"]["image"]) overlappogram_shape = overlappogram_d.shape -weights_d = fits.getdata(config['paths']['weights']) +weights_d = fits.getdata(config["paths"]["weights"]) weights_shape = weights_d.shape elastic = ElasticNet(fit_intercept=False) # elastic.fit(X_d, overlappogram_d[700, :]) -search = GridSearchCV(estimator=elastic, - param_grid={"alpha": np.logspace(-2, 10, 8), - "l1_ratio": np.linspace(0.1, 0.9, 8)}, - scoring="r2", - n_jobs=10, - refit=False, - cv=2) +search = GridSearchCV( + estimator=elastic, + param_grid={"alpha": np.logspace(-2, 10, 8), "l1_ratio": np.linspace(0.1, 0.9, 8)}, + scoring="r2", + n_jobs=10, + refit=False, + cv=2, +) search.fit(X_d, overlappogram_d[700, :]) print(search.best_params_) print(abs(search.best_score_)) diff --git a/experiment_scripts/trim_imager.py b/experiment_scripts/trim_imager.py index 84e7d4b..35e29c8 100644 --- a/experiment_scripts/trim_imager.py +++ b/experiment_scripts/trim_imager.py @@ -1,7 +1,9 @@ # remove the imager from the response, weights, and overlappogram from astropy.io import fits -response = "data/D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_lw_coopersun.fits" +response = ( + "data/D16Feb2024_eccco_response_feldman_m_el_with_tables_s_i_lw_coopersun.fits" +) weights = "data/eccco_is_lw_forwardmodel_sample_weights_psf4pix_el.fits" image = "data/eccco_is_lw_forwardmodel_thermal_response_psf4pix_el.fits" @@ -13,8 +15,15 @@ response_hdul_img_norm = response_hdul.copy() response_hdul_img_norm[0].data = response_hdul[0].data[:, :, :4096] response_hdul_img_norm.writeto("data/response_only_spectra.fits", overwrite=True) -fits.writeto("data/forward_model_only_spectra.fits", - image_hdul[0].data[:, :4096], - header=image_hdul[0].header, overwrite=True) -fits.writeto("data/weights_only_spectra.fits", weights_hdul[0].data[:, :4096], - header=weights_hdul[0].header, overwrite=True) +fits.writeto( + "data/forward_model_only_spectra.fits", + image_hdul[0].data[:, :4096], + header=image_hdul[0].header, + overwrite=True, +) +fits.writeto( + "data/weights_only_spectra.fits", + weights_hdul[0].data[:, :4096], + header=weights_hdul[0].header, + overwrite=True, +) diff --git a/magixs_data_products.py b/magixs_data_products.py index 5330cb8..377e678 100644 --- a/magixs_data_products.py +++ b/magixs_data_products.py @@ -1,58 +1,145 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Jun 8 13:47:48 2021 - -@author: dbeabout -""" - +import glob +import math +import os +import os.path +import time +import typing as tp from dataclasses import dataclass +from datetime import datetime, timedelta, timezone + +import astropy.wcs +import dateutil import numpy as np import numpy.ma as ma -from astropy.io import fits -import astropy.wcs -import typing as tp import pandas as pd -import os.path -from datetime import datetime, date, timedelta, timezone -import dateutil -import glob -from scipy.io import loadmat -import os +from astropy.io import fits from astropy.table import Table -from overlappogram.inversion_field_angles import Inversion +from PIL import Image +from scipy.io import loadmat from sklearn.linear_model import ElasticNet as enet + from overlappogram.elasticnet_model import ElasticNetModel as enet_model -from sklearn.linear_model import LassoLars as llars -#from passion.lassolars_model import LassoLarsModel as llars_model -import time -from PIL import Image -import math +from overlappogram.inversion_field_angles import Inversion # Launch T0 (seconds since beginning of year) and timestamp. launch_t0 = 18296410.2713640 -launch_timestamp = '2021-07-30T18:22:21Z' +launch_timestamp = "2021-07-30T18:22:21Z" # Pixel deltas (y, x). pixel_8_deltas = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)] -pixel_16_deltas = [(-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), (-1, -2), (-1, 2), (0, -2), (0, 2), (1, -2), (1, 2), (2, -2), (2, -1), (2, 0), (2, 1), (2, 2)] -pixel_16_box_deltas = [(-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), (-1, -2), (-1, -1), (-1, 0), (-1, 1), (-1, 2), (0, -2), (0, -1), (0, 1), (0, 2), (1, -2), (1, -1), (1, 0), (1, 1), (1, 2), (2, -2), (2, -1), (2, 0), (2, 1), (2, 2)] -pixel_16_adjacent_deltas = [(-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), (-1, 2), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (2, -1), (2, -2), (1, -2), (0, -2), (-1, -2)] -pixel_8_adjacent_deltas = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (-1, 0)] -pixel_16_adjacent_wrap_deltas = [(-1, -2), (-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), (-1, 2), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (2, -1), (2, -2), (1, -2), (0, -2), (-1, -2)] +pixel_16_deltas = [ + (-2, -2), + (-2, -1), + (-2, 0), + (-2, 1), + (-2, 2), + (-1, -2), + (-1, 2), + (0, -2), + (0, 2), + (1, -2), + (1, 2), + (2, -2), + (2, -1), + (2, 0), + (2, 1), + (2, 2), +] +pixel_16_box_deltas = [ + (-2, -2), + (-2, -1), + (-2, 0), + (-2, 1), + (-2, 2), + (-1, -2), + (-1, -1), + (-1, 0), + (-1, 1), + (-1, 2), + (0, -2), + (0, -1), + (0, 1), + (0, 2), + (1, -2), + (1, -1), + (1, 0), + (1, 1), + (1, 2), + (2, -2), + (2, -1), + (2, 0), + (2, 1), + (2, 2), +] +pixel_16_adjacent_deltas = [ + (-2, -2), + (-2, -1), + (-2, 0), + (-2, 1), + (-2, 2), + (-1, 2), + (0, 2), + (1, 2), + (2, 2), + (2, 1), + (2, 0), + (2, -1), + (2, -2), + (1, -2), + (0, -2), + (-1, -2), +] +pixel_8_adjacent_deltas = [ + (-1, -1), + (-1, 0), + (-1, 1), + (0, 1), + (1, 1), + (1, 0), + (1, -1), + (-1, 0), +] +pixel_16_adjacent_wrap_deltas = [ + (-1, -2), + (-2, -2), + (-2, -1), + (-2, 0), + (-2, 1), + (-2, 2), + (-1, 2), + (0, 2), + (1, 2), + (2, 2), + (2, 1), + (2, 0), + (2, -1), + (2, -2), + (1, -2), + (0, -2), + (-1, -2), +] + def calc_analog_cal(raw_values): - return ((raw_values / 1024.0) * 5.0) + return (raw_values / 1024.0) * 5.0 + def calc_cold_block_cal(values): - return ((52.37 * calc_analog_cal(values)) - 157.55) + return (52.37 * calc_analog_cal(values)) - 157.55 + def calc_ccd_holder_cal(values): - return ((0.5513 * calc_analog_cal(values)**3) + (1.3523 * calc_analog_cal(values)**2) + (29.942 * calc_analog_cal(values)) - 116.85) + return ( + (0.5513 * calc_analog_cal(values) ** 3) + + (1.3523 * calc_analog_cal(values) ** 2) + + (29.942 * calc_analog_cal(values)) + - 116.85 + ) + @dataclass(order=True) -class MaGIXSDataProducts(): - ''' +class MaGIXSDataProducts: + """ Inversion for overlap-a-gram data. Attributes @@ -62,11 +149,11 @@ class MaGIXSDataProducts(): ------- None. - ''' + """ def __post_init__(self): - self.active_pixels_height:np.uit32 = 1024 - self.active_pixels_width:np.uint32 = 2048 + self.active_pixels_height: np.uit32 = 1024 + self.active_pixels_width: np.uint32 = 2048 self.pixel_fov_width = 2.8 self.ll_gain = 2.6 self.lr_gain = 2.6 @@ -78,7 +165,7 @@ def __post_init__(self): self.ur_bias = 0.0 def remove_bias(self, image: np.ndarray) -> np.ndarray: - ''' + """ Blank pixels are used to calculate the bias and remove bias from image. This algorithm using the 35 inner most columns of the blank pixels and the rows of the blank pixels that contains the rows of the active image @@ -94,7 +181,7 @@ def remove_bias(self, image: np.ndarray) -> np.ndarray: image : np.ndarray Image with bias removed. - ''' + """ height, width = np.shape(image) quad_height = height // 2 @@ -102,20 +189,20 @@ def remove_bias(self, image: np.ndarray) -> np.ndarray: # Calculate bias for each quadrant. self.ll_bias = np.mean(image[8:quad_height, 15:50]) - self.lr_bias = np.mean(image[8:quad_height, width - 50:width - 15]) - self.ul_bias = np.mean(image[quad_height:height - 8, 15:50]) - self.ur_bias = np.mean(image[quad_height:height - 8, width - 50:width - 15]) + self.lr_bias = np.mean(image[8:quad_height, width - 50 : width - 15]) + self.ul_bias = np.mean(image[quad_height : height - 8, 15:50]) + self.ur_bias = np.mean(image[quad_height : height - 8, width - 50 : width - 15]) # Subtract bias for each quadrant. image[8:quad_height, 50:quad_width] -= self.ll_bias - image[8:quad_height, quad_width:width - 50] -= self.lr_bias - image[quad_height:height - 8, 50:quad_width] -= self.ul_bias - image[quad_height:height - 8, quad_width:width - 50] -= self.ur_bias + image[8:quad_height, quad_width : width - 50] -= self.lr_bias + image[quad_height : height - 8, 50:quad_width] -= self.ul_bias + image[quad_height : height - 8, quad_width : width - 50] -= self.ur_bias return image def remove_bias_by_row(self, image: np.ndarray) -> np.ndarray: - ''' + """ Blank pixels are used to calculate the bias and remove bias from image. This algorithm using the 35 inner most columns of the blank pixels and the rows of the blank pixels that contains the rows of the active image @@ -131,7 +218,7 @@ def remove_bias_by_row(self, image: np.ndarray) -> np.ndarray: image : np.ndarray Image with bias removed. - ''' + """ height, width = np.shape(image) quad_height = height // 2 @@ -139,20 +226,28 @@ def remove_bias_by_row(self, image: np.ndarray) -> np.ndarray: # Calculate bias for each quadrant. self.ll_bias = np.mean(image[8:quad_height, 15:50]) - self.lr_bias = np.mean(image[8:quad_height, width - 50:width - 15]) - self.ul_bias = np.mean(image[quad_height:height - 8, 15:50]) - self.ur_bias = np.mean(image[quad_height:height - 8, width - 50:width - 15]) + self.lr_bias = np.mean(image[8:quad_height, width - 50 : width - 15]) + self.ul_bias = np.mean(image[quad_height : height - 8, 15:50]) + self.ur_bias = np.mean(image[quad_height : height - 8, width - 50 : width - 15]) # Subtract bias for each quadrant. - image[8:quad_height, 50:quad_width] -= np.mean(image[8:quad_height, 15:50], axis=1)[:, None] - image[8:quad_height, quad_width:width - 50] -= np.mean(image[8:quad_height, width - 50:width - 15], axis=1)[:, None] - image[quad_height:height - 8, 50:quad_width] -= np.mean(image[quad_height:height - 8, 15:50], axis=1)[:, None] - image[quad_height:height - 8, quad_width:width - 50] -= np.mean(image[quad_height:height - 8, width - 50:width - 15], axis=1)[:, None] + image[8:quad_height, 50:quad_width] -= np.mean( + image[8:quad_height, 15:50], axis=1 + )[:, None] + image[8:quad_height, quad_width : width - 50] -= np.mean( + image[8:quad_height, width - 50 : width - 15], axis=1 + )[:, None] + image[quad_height : height - 8, 50:quad_width] -= np.mean( + image[quad_height : height - 8, 15:50], axis=1 + )[:, None] + image[quad_height : height - 8, quad_width : width - 50] -= np.mean( + image[quad_height : height - 8, width - 50 : width - 15], axis=1 + )[:, None] return image def remove_inactive_pixels(self, image: np.ndarray) -> np.ndarray: - ''' + """ Inactive pixels (i.e. blank and overscan pixels) are removed from the image. @@ -166,28 +261,32 @@ def remove_inactive_pixels(self, image: np.ndarray) -> np.ndarray: active_image : np.ndarray Image with inactive pixels removed. - ''' + """ height, width = np.shape(image) - #print("rm inact h, w = ", height, width) + # print("rm inact h, w = ", height, width) - #quad_height = height // 2 + # quad_height = height // 2 active_height = height - (8 * 2) quad_width = width // 2 active_width = width - ((50 + 2) * 2) active_half_width = active_width // 2 - active_image = np.zeros((active_height, active_width), dtype = image.dtype) + active_image = np.zeros((active_height, active_width), dtype=image.dtype) # Left side. - active_image[0:active_height, 0:active_half_width] = image[8:active_height + 8, 50:50 + active_half_width] + active_image[0:active_height, 0:active_half_width] = image[ + 8 : active_height + 8, 50 : 50 + active_half_width + ] # Right side. - active_image[0:active_height, active_half_width:active_width] = image[8:active_height + 8, quad_width + 2:width - 50] + active_image[0:active_height, active_half_width:active_width] = image[ + 8 : active_height + 8, quad_width + 2 : width - 50 + ] return active_image def adjust_gain(self, image: np.ndarray) -> np.ndarray: - ''' + """ Adjust the gain of the image. Parameters @@ -200,21 +299,23 @@ def adjust_gain(self, image: np.ndarray) -> np.ndarray: image : np.ndarray Image with gain adjusted. - ''' + """ height, width = np.shape(image) quad_height = height // 2 quad_width = width // 2 image[0:quad_height, 0:quad_width] *= self.ll_gain - image[0:quad_height, quad_width:width] *= self.lr_gain - image[quad_height:height, 0:quad_width] *= self.ul_gain - image[quad_height:height, quad_width:width] *= self.ur_gain + image[0:quad_height, quad_width:width] *= self.lr_gain + image[quad_height:height, 0:quad_width] *= self.ul_gain + image[quad_height:height, quad_width:width] *= self.ur_gain return image - def replace_bad_pixels(self, image: np.ndarray, bad_pixel_mask: np.ndarray) -> [np.ndarray, pd.DataFrame]: - ''' + def replace_bad_pixels( + self, image: np.ndarray, bad_pixel_mask: np.ndarray + ) -> [np.ndarray, pd.DataFrame]: + """ Replace bad pixels in image. The algorithm uses the median of the surrounding pixels to replace the bad pixel value. @@ -232,31 +333,53 @@ def replace_bad_pixels(self, image: np.ndarray, bad_pixel_mask: np.ndarray) -> [ original_values: pd.DataFrame Bad pixel replaced values (y, x, value). - ''' + """ height, width = np.shape(image) bad_pixel_list = np.where(bad_pixel_mask != 0) if len(bad_pixel_list[0]) > 0: - #data = np.zeros((len(bad_pixel_list[0]),), dtype=[("y", "i4"), ("x", "i4"), ("value", "f4")]) - data = np.zeros((len(bad_pixel_list[0]),), dtype=[("y", np.int32), ("x", np.int32), ("value", np.float32)]) + # data = np.zeros((len(bad_pixel_list[0]),), dtype=[("y", "i4"), ("x", "i4"), ("value", "f4")]) + data = np.zeros( + (len(bad_pixel_list[0]),), + dtype=[("y", np.int32), ("x", np.int32), ("value", np.float32)], + ) original_values = pd.DataFrame(data) for c in range(len(bad_pixel_list[0])): adjacent_pixel_list = [] for i in [-1, 0, 1]: for j in [-1, 0, 1]: # check if location is within boundary - if 0 <= bad_pixel_list[0][c] + i < height and 0 <= bad_pixel_list[1][c] + j < width: - adjacent_pixel_list.append(image[bad_pixel_list[0][c] + i, bad_pixel_list[1][c] + j]) + if ( + 0 <= bad_pixel_list[0][c] + i < height + and 0 <= bad_pixel_list[1][c] + j < width + ): + adjacent_pixel_list.append( + image[ + bad_pixel_list[0][c] + i, bad_pixel_list[1][c] + j + ] + ) median_value = np.median(adjacent_pixel_list) original_value = image[bad_pixel_list[0][c], bad_pixel_list[1][c]] image[bad_pixel_list[0][c], bad_pixel_list[1][c]] = median_value - original_values.loc[c, ['y', 'x', 'value']] = [bad_pixel_list[0][c], bad_pixel_list[1][c], original_value] + original_values.loc[c, ["y", "x", "value"]] = [ + bad_pixel_list[0][c], + bad_pixel_list[1][c], + original_value, + ] return image, original_values - else: - return image, pd.DataFrame(columns=['y', 'x', 'value']) - - def create_bad_pixel_mask(self, image_list: list, pre_master_dark_file: str, post_master_dark_file: str, sigma: np.float32, percent_threshold: np.float32, bad_pixel_mask_file: str): - ''' + else: # noqa: RET505 + return image, pd.DataFrame(columns=["y", "x", "value"]) + + def create_bad_pixel_mask( + self, + image_list: list, + pre_master_dark_file: str, + post_master_dark_file: str, + sigma: np.float32, + percent_threshold: np.float32, + bad_pixel_mask_file: str, + ): + """ Creates a bad pixel mask. This algorithm uses values from the entire image. The algorithm for determining bad pixels is any values above the mean - (sigma * standard deviation) For any values below the mean + (sigma * standard deviation where the percent threshold is met. @@ -280,67 +403,124 @@ def create_bad_pixel_mask(self, image_list: list, pre_master_dark_file: str, pos ------- None. - ''' - assert(percent_threshold > 0.0 and percent_threshold <=1.0) - + """ number_images = len(image_list) - image_cube = np.zeros((number_images, self.active_pixels_height, self.active_pixels_width), dtype=np.float32) + image_cube = np.zeros( + (number_images, self.active_pixels_height, self.active_pixels_width), + dtype=np.float32, + ) with fits.open(pre_master_dark_file) as dark_hdul: pre_master_dark = dark_hdul[0].data.astype(np.float32).copy() - pre_dark_temp = dark_hdul[0].header['ADCTEMP6'] + pre_dark_temp = dark_hdul[0].header["ADCTEMP6"] with fits.open(post_master_dark_file) as dark_hdul: post_master_dark = dark_hdul[0].data.astype(np.float32).copy() - post_dark_temp = dark_hdul[0].header['ADCTEMP6'] + post_dark_temp = dark_hdul[0].header["ADCTEMP6"] img_seq_num_list = [] first_image = True for index in range(number_images): with fits.open(image_list[index]) as image_hdul: if first_image is True: - camera_id = image_hdul[0].header['CAM_ID'] - camera_sn = image_hdul[0].header['CAM_SN'] + camera_id = image_hdul[0].header["CAM_ID"] + camera_sn = image_hdul[0].header["CAM_SN"] first_image = False - img_seq_num_list.append(image_hdul[0].header['IMG_ISN']) - image, image_header, bad_pixels_replaced_values = self.create_adjusted_light(image_hdul[0].data, image_hdul[0].header, pre_master_dark, pre_dark_temp, post_master_dark, post_dark_temp, None) + img_seq_num_list.append(image_hdul[0].header["IMG_ISN"]) + ( + image, + image_header, + bad_pixels_replaced_values, + ) = self.create_adjusted_light( + image_hdul[0].data, + image_hdul[0].header, + pre_master_dark, + pre_dark_temp, + post_master_dark, + post_dark_temp, + None, + ) image_cube[index, :, :] = image # Despike. - image_cube[np.where(image_cube > (np.nanmedian(image_cube, axis=0, keepdims=True) + (sigma * np.nanstd(image_cube, axis=0, keepdims=True))))] = np.nan - #print("despike_len = ", np.count_nonzero(np.isnan(image_cube))) - - bad_pixel_mask = np.zeros((self.active_pixels_height, self.active_pixels_width), dtype=np.float32) - bad_pixels = np.zeros((number_images, self.active_pixels_height, self.active_pixels_width), dtype=np.float32) + image_cube[ + np.where( + image_cube + > ( + np.nanmedian(image_cube, axis=0, keepdims=True) + + (sigma * np.nanstd(image_cube, axis=0, keepdims=True)) + ) + ) + ] = np.nan + # print("despike_len = ", np.count_nonzero(np.isnan(image_cube))) + + bad_pixel_mask = np.zeros( + (self.active_pixels_height, self.active_pixels_width), dtype=np.float32 + ) + bad_pixels = np.zeros( + (number_images, self.active_pixels_height, self.active_pixels_width), + dtype=np.float32, + ) bad_pixels[np.where(np.isnan(image_cube))] = np.nan for index in range(number_images): - bad_pixels[index, :, :][np.where(image_cube[index, :, :] < np.nanmean(image_cube[index, :, :], axis=(0,1)) - (sigma * np.nanstd(image_cube[index, :, :], axis=(0,1))))] = 1 - bad_pixels[index, :, :][np.where(image_cube[index, :, :] > np.nanmean(image_cube[index, :, :], axis=(0,1)) + (sigma * np.nanstd(image_cube[index, :, :], axis=(0,1))))] = 1 + bad_pixels[index, :, :][ + np.where( + image_cube[index, :, :] + < np.nanmean(image_cube[index, :, :], axis=(0, 1)) + - (sigma * np.nanstd(image_cube[index, :, :], axis=(0, 1))) + ) + ] = 1 + bad_pixels[index, :, :][ + np.where( + image_cube[index, :, :] + > np.nanmean(image_cube[index, :, :], axis=(0, 1)) + + (sigma * np.nanstd(image_cube[index, :, :], axis=(0, 1))) + ) + ] = 1 # Set dark and hot pixels where number is equal to or greater than percent threshold - bad_pixel_mask[np.where(np.nanmean(bad_pixels, axis=0) >= percent_threshold)] = 1 + bad_pixel_mask[ + np.where(np.nanmean(bad_pixels, axis=0) >= percent_threshold) + ] = 1 bad_pixel_mask_header = fits.Header() - bad_pixel_mask_header['CAM_ID'] = (camera_id, 'Camera ID') - bad_pixel_mask_header['CAM_SN'] = (camera_sn, 'Camera Serial Number') - bad_pixel_mask_header['LEVEL'] = ('0.1', 'Data Product Level') - image_sequence_numbers = self.create_image_sequence_number_list(img_seq_num_list) - bad_pixel_mask_header['IMGSEQNM'] = (image_sequence_numbers, 'Image Sequence Numbers') - bad_pixel_mask_header ['SIGMA'] = (sigma, 'Sigma') - bad_pixel_mask_header['PCNTTHLD'] = (percent_threshold, 'Percent Threshold') - bad_pixel_mask_header['PREMDARK'] = (os.path.basename(pre_master_dark_file), 'Pre Master Dark') - bad_pixel_mask_header['PSTMDARK'] = (os.path.basename(post_master_dark_file), 'Post Master Dark') - bad_pixel_mask_header['INTPTEMP'] = ('ADCTEMP6', 'Interpolation Temperature') + bad_pixel_mask_header["CAM_ID"] = (camera_id, "Camera ID") + bad_pixel_mask_header["CAM_SN"] = (camera_sn, "Camera Serial Number") + bad_pixel_mask_header["LEVEL"] = ("0.1", "Data Product Level") + image_sequence_numbers = self.create_image_sequence_number_list( + img_seq_num_list + ) + bad_pixel_mask_header["IMGSEQNM"] = ( + image_sequence_numbers, + "Image Sequence Numbers", + ) + bad_pixel_mask_header["SIGMA"] = (sigma, "Sigma") + bad_pixel_mask_header["PCNTTHLD"] = (percent_threshold, "Percent Threshold") + bad_pixel_mask_header["PREMDARK"] = ( + os.path.basename(pre_master_dark_file), + "Pre Master Dark", + ) + bad_pixel_mask_header["PSTMDARK"] = ( + os.path.basename(post_master_dark_file), + "Post Master Dark", + ) + bad_pixel_mask_header["INTPTEMP"] = ("ADCTEMP6", "Interpolation Temperature") # Create output directory. os.makedirs(os.path.dirname(bad_pixel_mask_file), exist_ok=True) - hdu = fits.PrimaryHDU(data=bad_pixel_mask, header=bad_pixel_mask_header ) + hdu = fits.PrimaryHDU(data=bad_pixel_mask, header=bad_pixel_mask_header) hdulist = fits.HDUList([hdu]) hdulist.writeto(bad_pixel_mask_file, overwrite=True) - def create_bad_pixel_mask_by_tap(self, image_list: list, sigma: np.float32, percent_threshold: np.float32, bad_pixel_mask_file: str): - ''' + def create_bad_pixel_mask_by_tap( + self, + image_list: list, + sigma: np.float32, + percent_threshold: np.float32, + bad_pixel_mask_file: str, + ): + """ Creates a bad pixel mask. This algorithm uses values from the entire image. The algorithm for determining bad pixels is any values above the mean - (sigma * standard deviation) For any values below the median + (sigma * standard deviation where the percent threshold is met. @@ -364,67 +544,250 @@ def create_bad_pixel_mask_by_tap(self, image_list: list, sigma: np.float32, perc ------- None. - ''' - assert(percent_threshold > 0.0 and percent_threshold <=1.0) - + """ number_images = len(image_list) - image_cube = np.zeros((number_images, self.active_pixels_height, self.active_pixels_width), dtype=np.float32) + image_cube = np.zeros( + (number_images, self.active_pixels_height, self.active_pixels_width), + dtype=np.float32, + ) img_seq_num_list = [] first_image = True for index in range(number_images): with fits.open(image_list[index]) as image_hdul: if first_image is True: - camera_id = image_hdul[0].header['CAM_ID'] - camera_sn = image_hdul[0].header['CAM_SN'] + camera_id = image_hdul[0].header["CAM_ID"] + camera_sn = image_hdul[0].header["CAM_SN"] first_image = False - img_seq_num_list.append(image_hdul[0].header['IMG_ISN']) + img_seq_num_list.append(image_hdul[0].header["IMG_ISN"]) image = self.remove_bias(image_hdul[0].data.astype(np.float32)) image = self.remove_inactive_pixels(image) image_cube[index, :, :] = image # Despike. - image_cube[np.where(image_cube > (np.nanmedian(image_cube, axis=0, keepdims=True) + (sigma * np.nanstd(image_cube, axis=0, keepdims=True))))] = np.nan - #print("despike_len = ", np.count_nonzero(np.isnan(image_cube))) - - bad_pixel_mask = np.zeros((self.active_pixels_height, self.active_pixels_width), dtype=np.float32) - bad_pixels = np.zeros((number_images, self.active_pixels_height, self.active_pixels_width), dtype=np.float32) + image_cube[ + np.where( + image_cube + > ( + np.nanmedian(image_cube, axis=0, keepdims=True) + + (sigma * np.nanstd(image_cube, axis=0, keepdims=True)) + ) + ) + ] = np.nan + # print("despike_len = ", np.count_nonzero(np.isnan(image_cube))) + + bad_pixel_mask = np.zeros( + (self.active_pixels_height, self.active_pixels_width), dtype=np.float32 + ) + bad_pixels = np.zeros( + (number_images, self.active_pixels_height, self.active_pixels_width), + dtype=np.float32, + ) quad_height = self.active_pixels_height // 2 quad_width = self.active_pixels_width // 2 bad_pixels[np.where(np.isnan(image_cube))] = np.nan for index in range(number_images): - bad_pixels[index, 0:quad_height, 0:quad_width][np.where(image_cube[index, 0:quad_height, 0:quad_width] < np.nanmedian(image_cube[index, 0:quad_height, 0:quad_width]) - (sigma * np.nanstd(image_cube[index, 0:quad_height, 0:quad_width])))] = 1 - bad_pixels[index, 0:quad_height, quad_width:self.active_pixels_width][np.where(image_cube[index, 0:quad_height, quad_width:self.active_pixels_width] < np.nanmedian(image_cube[index, 0:quad_height, quad_width:self.active_pixels_width]) - (sigma * np.nanstd(image_cube[index, 0:quad_height, quad_width:self.active_pixels_width])))] = 1 - bad_pixels[index, quad_height:self.active_pixels_height, 0:quad_width][np.where(image_cube[index, quad_height:self.active_pixels_height, 0:quad_width] < np.nanmedian(image_cube[index, quad_height:self.active_pixels_height, 0:quad_width]) - (sigma * np.nanstd(image_cube[index, quad_height:self.active_pixels_height, 0:quad_width])))] = 1 - bad_pixels[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width][np.where(image_cube[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width] < np.nanmedian(image_cube[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width]) - (sigma * np.nanstd(image_cube[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width])))] = 1 - bad_pixels[index, 0:quad_height, 0:quad_width][np.where(image_cube[index, 0:quad_height, 0:quad_width] > np.nanmedian(image_cube[index, 0:quad_height, 0:quad_width]) + (sigma * np.nanstd(image_cube[index, 0:quad_height, 0:quad_width])))] = 1 - bad_pixels[index, 0:quad_height, quad_width:self.active_pixels_width][np.where(image_cube[index, 0:quad_height, quad_width:self.active_pixels_width] > np.nanmedian(image_cube[index, 0:quad_height, quad_width:self.active_pixels_width]) + (sigma * np.nanstd(image_cube[index, 0:quad_height, quad_width:self.active_pixels_width])))] = 1 - bad_pixels[index, quad_height:self.active_pixels_height, 0:quad_width][np.where(image_cube[index, quad_height:self.active_pixels_height, 0:quad_width] > np.nanmedian(image_cube[index, quad_height:self.active_pixels_height, 0:quad_width]) + (sigma * np.nanstd(image_cube[index, quad_height:self.active_pixels_height, 0:quad_width])))] = 1 - bad_pixels[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width][np.where(image_cube[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width] > np.nanmedian(image_cube[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width]) + (sigma * np.nanstd(image_cube[index, quad_height:self.active_pixels_height, quad_width:self.active_pixels_width])))] = 1 + bad_pixels[index, 0:quad_height, 0:quad_width][ + np.where( + image_cube[index, 0:quad_height, 0:quad_width] + < np.nanmedian(image_cube[index, 0:quad_height, 0:quad_width]) + - ( + sigma + * np.nanstd(image_cube[index, 0:quad_height, 0:quad_width]) + ) + ) + ] = 1 + bad_pixels[index, 0:quad_height, quad_width : self.active_pixels_width][ + np.where( + image_cube[ + index, 0:quad_height, quad_width : self.active_pixels_width + ] + < np.nanmedian( + image_cube[ + index, 0:quad_height, quad_width : self.active_pixels_width + ] + ) + - ( + sigma + * np.nanstd( + image_cube[ + index, + 0:quad_height, + quad_width : self.active_pixels_width, + ] + ) + ) + ) + ] = 1 + bad_pixels[index, quad_height : self.active_pixels_height, 0:quad_width][ + np.where( + image_cube[ + index, quad_height : self.active_pixels_height, 0:quad_width + ] + < np.nanmedian( + image_cube[ + index, quad_height : self.active_pixels_height, 0:quad_width + ] + ) + - ( + sigma + * np.nanstd( + image_cube[ + index, + quad_height : self.active_pixels_height, + 0:quad_width, + ] + ) + ) + ) + ] = 1 + bad_pixels[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ][ + np.where( + image_cube[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ] + < np.nanmedian( + image_cube[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ] + ) + - ( + sigma + * np.nanstd( + image_cube[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ] + ) + ) + ) + ] = 1 + bad_pixels[index, 0:quad_height, 0:quad_width][ + np.where( + image_cube[index, 0:quad_height, 0:quad_width] + > np.nanmedian(image_cube[index, 0:quad_height, 0:quad_width]) + + ( + sigma + * np.nanstd(image_cube[index, 0:quad_height, 0:quad_width]) + ) + ) + ] = 1 + bad_pixels[index, 0:quad_height, quad_width : self.active_pixels_width][ + np.where( + image_cube[ + index, 0:quad_height, quad_width : self.active_pixels_width + ] + > np.nanmedian( + image_cube[ + index, 0:quad_height, quad_width : self.active_pixels_width + ] + ) + + ( + sigma + * np.nanstd( + image_cube[ + index, + 0:quad_height, + quad_width : self.active_pixels_width, + ] + ) + ) + ) + ] = 1 + bad_pixels[index, quad_height : self.active_pixels_height, 0:quad_width][ + np.where( + image_cube[ + index, quad_height : self.active_pixels_height, 0:quad_width + ] + > np.nanmedian( + image_cube[ + index, quad_height : self.active_pixels_height, 0:quad_width + ] + ) + + ( + sigma + * np.nanstd( + image_cube[ + index, + quad_height : self.active_pixels_height, + 0:quad_width, + ] + ) + ) + ) + ] = 1 + bad_pixels[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ][ + np.where( + image_cube[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ] + > np.nanmedian( + image_cube[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ] + ) + + ( + sigma + * np.nanstd( + image_cube[ + index, + quad_height : self.active_pixels_height, + quad_width : self.active_pixels_width, + ] + ) + ) + ) + ] = 1 # Set dark and hot pixels where number is equal to or greater than percent threshold - bad_pixel_mask[np.where(np.nanmean(bad_pixels, axis=0) >= percent_threshold)] = 1 + bad_pixel_mask[ + np.where(np.nanmean(bad_pixels, axis=0) >= percent_threshold) + ] = 1 bad_pixel_mask_header = fits.Header() - bad_pixel_mask_header['CAM_ID'] = (camera_id, 'Camera ID') - bad_pixel_mask_header['CAM_SN'] = (camera_sn, 'Camera Serial Number') - bad_pixel_mask_header['LEVEL'] = ('0.1', 'Data Product Level') - image_sequence_numbers = self.create_image_sequence_number_list(img_seq_num_list) - bad_pixel_mask_header['IMGSEQNM'] = (image_sequence_numbers, 'Image Sequence Numbers') - bad_pixel_mask_header ['SIGMA'] = (sigma, 'Sigma') - bad_pixel_mask_header['PCNTTHLD'] = (percent_threshold, 'Percent Threshold') + bad_pixel_mask_header["CAM_ID"] = (camera_id, "Camera ID") + bad_pixel_mask_header["CAM_SN"] = (camera_sn, "Camera Serial Number") + bad_pixel_mask_header["LEVEL"] = ("0.1", "Data Product Level") + image_sequence_numbers = self.create_image_sequence_number_list( + img_seq_num_list + ) + bad_pixel_mask_header["IMGSEQNM"] = ( + image_sequence_numbers, + "Image Sequence Numbers", + ) + bad_pixel_mask_header["SIGMA"] = (sigma, "Sigma") + bad_pixel_mask_header["PCNTTHLD"] = (percent_threshold, "Percent Threshold") # Create output directory. os.makedirs(os.path.dirname(bad_pixel_mask_file), exist_ok=True) - hdu = fits.PrimaryHDU(data=bad_pixel_mask, header=bad_pixel_mask_header ) + hdu = fits.PrimaryHDU(data=bad_pixel_mask, header=bad_pixel_mask_header) hdulist = fits.HDUList([hdu]) hdulist.writeto(bad_pixel_mask_file, overwrite=True) - def create_master_dark(self, image_list: list, sigma: np.float32, master_dark_file: str): - ''' + def create_master_dark( + self, image_list: list, sigma: np.float32, master_dark_file: str + ): + """ Creates a master dark image. Spikes are ignored in the creation of the master dark. The algorithm for spike detection is median + (sigma * standard deviation). The image housekeeping parameters are averaged and stored with the master dark. @@ -442,10 +805,15 @@ def create_master_dark(self, image_list: list, sigma: np.float32, master_dark_fi ------- None. - ''' + """ number_images = len(image_list) - image_cube = np.zeros((number_images, self.active_pixels_height, self.active_pixels_width), dtype=np.float32) - master_dark = np.zeros((self.active_pixels_height, self.active_pixels_width), dtype=np.float32) + image_cube = np.zeros( + (number_images, self.active_pixels_height, self.active_pixels_width), + dtype=np.float32, + ) + master_dark = np.zeros( + (self.active_pixels_height, self.active_pixels_width), dtype=np.float32 + ) summed_image_exposure_time = 0.0 summed_measured_exposure_time = 0.0 @@ -460,53 +828,91 @@ def create_master_dark(self, image_list: list, sigma: np.float32, master_dark_fi for index in range(number_images): with fits.open(image_list[index]) as image_hdul: if first_image is True: - camera_id = image_hdul[0].header['CAM_ID'] - camera_sn = image_hdul[0].header['CAM_SN'] + camera_id = image_hdul[0].header["CAM_ID"] + camera_sn = image_hdul[0].header["CAM_SN"] first_image = False - summed_image_exposure_time += image_hdul[0].header['IMG_EXP'] - summed_measured_exposure_time += image_hdul[0].header['MEAS_EXP'] - summed_fpga_temp += image_hdul[0].header['FPGATEMP'] - summed_fpga_vint += image_hdul[0].header['FPGAVINT'] - summed_fpga_vaux += image_hdul[0].header['FPGAVAUX'] - summed_fpga_vbrm += image_hdul[0].header['FPGAVBRM'] + summed_image_exposure_time += image_hdul[0].header["IMG_EXP"] + summed_measured_exposure_time += image_hdul[0].header["MEAS_EXP"] + summed_fpga_temp += image_hdul[0].header["FPGATEMP"] + summed_fpga_vint += image_hdul[0].header["FPGAVINT"] + summed_fpga_vaux += image_hdul[0].header["FPGAVAUX"] + summed_fpga_vbrm += image_hdul[0].header["FPGAVBRM"] for t in range(8): - summed_adc_temp[t] += image_hdul[0].header['ADCTEMP{}'.format(t + 1)] - img_seq_num_list.append(image_hdul[0].header['IMG_ISN']) + summed_adc_temp[t] += image_hdul[0].header[f"ADCTEMP{t+1}"] + img_seq_num_list.append(image_hdul[0].header["IMG_ISN"]) image = self.remove_bias(image_hdul[0].data.astype(np.float32)) image = self.remove_inactive_pixels(image) image_cube[index, :, :] = image # Despike. - image_cube[np.where(image_cube > (np.nanmedian(image_cube, axis=0) + (sigma * np.nanstd(image_cube, axis=0))))] = np.nan + image_cube[ + np.where( + image_cube + > ( + np.nanmedian(image_cube, axis=0) + + (sigma * np.nanstd(image_cube, axis=0)) + ) + ) + ] = np.nan master_dark = np.nanmean(image_cube, axis=0) master_dark_header = fits.Header() - master_dark_header['CAM_ID'] = (camera_id, 'Camera ID') - master_dark_header['CAM_SN'] = (camera_sn, 'Camera Serial Number') - master_dark_header['IMG_EXP'] = (summed_image_exposure_time / number_images, 'Exposure (seconds)') - master_dark_header['MEAS_EXP'] = (summed_measured_exposure_time / number_images, 'Measured Exposure (seconds)') - master_dark_header['FPGATEMP'] = (summed_fpga_temp / number_images, 'FPGA Temperature (degC)') - master_dark_header['FPGAVINT'] = (summed_fpga_vint / number_images, 'FPGA VccInt Voltage (volts)') - master_dark_header['FPGAVAUX'] = (summed_fpga_vaux / number_images, 'FPGA VccAux (volts)') - master_dark_header['FPGAVBRM'] = (summed_fpga_vbrm / number_images, 'FPGA VccBram (volts)') + master_dark_header["CAM_ID"] = (camera_id, "Camera ID") + master_dark_header["CAM_SN"] = (camera_sn, "Camera Serial Number") + master_dark_header["IMG_EXP"] = ( + summed_image_exposure_time / number_images, + "Exposure (seconds)", + ) + master_dark_header["MEAS_EXP"] = ( + summed_measured_exposure_time / number_images, + "Measured Exposure (seconds)", + ) + master_dark_header["FPGATEMP"] = ( + summed_fpga_temp / number_images, + "FPGA Temperature (degC)", + ) + master_dark_header["FPGAVINT"] = ( + summed_fpga_vint / number_images, + "FPGA VccInt Voltage (volts)", + ) + master_dark_header["FPGAVAUX"] = ( + summed_fpga_vaux / number_images, + "FPGA VccAux (volts)", + ) + master_dark_header["FPGAVBRM"] = ( + summed_fpga_vbrm / number_images, + "FPGA VccBram (volts)", + ) for t in range(8): - master_dark_header.append(('ADCTEMP{}'.format(t + 1), summed_adc_temp[t] / number_images, 'ADC Tempature{} (degC'.format(t + 1)), end=True) - - master_dark_header['LEVEL'] = ('0.2', 'Data Product Level') - image_sequence_numbers = self.create_image_sequence_number_list(img_seq_num_list) - master_dark_header['IMGSEQNM'] = (image_sequence_numbers, 'Image Sequence Numbers') - master_dark_header['SIGMA'] = (sigma, 'Sigma') + master_dark_header.append( + ( + f"ADCTEMP{t+1}", + summed_adc_temp[t] / number_images, + f"ADC Temperature{t+1} (degC", + ), + end=True, + ) + + master_dark_header["LEVEL"] = ("0.2", "Data Product Level") + image_sequence_numbers = self.create_image_sequence_number_list( + img_seq_num_list + ) + master_dark_header["IMGSEQNM"] = ( + image_sequence_numbers, + "Image Sequence Numbers", + ) + master_dark_header["SIGMA"] = (sigma, "Sigma") # Create output directory. os.makedirs(os.path.dirname(master_dark_file), exist_ok=True) - hdu = fits.PrimaryHDU(data = master_dark, header = master_dark_header) + hdu = fits.PrimaryHDU(data=master_dark, header=master_dark_header) hdulist = fits.HDUList([hdu]) hdulist.writeto(master_dark_file, overwrite=True) def create_level0_5_images(self, image_list: list, output_dir: str): - ''' + """ Creates Level 0.5 images with corrected timestamps. Level 0.5 header information is added. Parameters @@ -520,43 +926,78 @@ def create_level0_5_images(self, image_list: list, output_dir: str): ------- None. - ''' + """ number_images = len(image_list) launch_year = datetime(2021, 1, 1, tzinfo=timezone.utc) - launch_date = dateutil.parser.isoparse('2021-07-30T18:22:21Z') + launch_date = dateutil.parser.isoparse("2021-07-30T18:22:21Z") exp_launch_time = launch_date - launch_year - t0_delta = (launch_t0 - timedelta(days=1).total_seconds()) - exp_launch_time.total_seconds() + t0_delta = ( + launch_t0 - timedelta(days=1).total_seconds() + ) - exp_launch_time.total_seconds() # Correct for launch signal trigger plus verification time. t0_delta = t0_delta - 1.1 for index in range(number_images): with fits.open(image_list[index]) as image_hdul: - image_date = dateutil.parser.isoparse(image_hdul[0].header['IMG_TS']) - measured_exposure = image_hdul[0].header['MEAS_EXP'] - image_hdul[0].header['LEVEL'] = ('0.5', 'Data Product Level') - image_hdul[0].header['EXPTIME'] = (measured_exposure, 'seconds') - date_obs_image_date = image_date + timedelta(seconds=t0_delta) - timedelta(seconds=measured_exposure) - image_hdul[0].header['DATE_OBS'] = (date_obs_image_date.isoformat(timespec='milliseconds').replace("+00:00", "Z"), 'Date Observation') - t_obs_image_date = date_obs_image_date + timedelta(seconds=(measured_exposure / 2.0)) - image_hdul[0].header['T_OBS'] = (t_obs_image_date.isoformat(timespec='milliseconds').replace("+00:00", "Z"), 'Telescope Observation') - image_hdul[0].header['TELESCOP'] = ('MaGIXS', 'Telescope') - image_hdul[0].header['INSTRUME'] = ('MaGIXS', 'Instrument') - image_hdul[0].header['IMGACQST'] = ('', 'Image Acquisition State') - image_hdul[0].header['CCDHLDR'] = (0.0, 'CCD Holder Temperature (degC)') - image_hdul[0].header['COLDBLOC'] =( 0.0, 'Cold Block Temperature (degC)') - image_hdul[0].header['UNITS'] = ('DN', 'Units') - - image_timestamp = date_obs_image_date.isoformat(timespec='milliseconds').replace("+00:00", "Z") + image_date = dateutil.parser.isoparse(image_hdul[0].header["IMG_TS"]) + measured_exposure = image_hdul[0].header["MEAS_EXP"] + image_hdul[0].header["LEVEL"] = ("0.5", "Data Product Level") + image_hdul[0].header["EXPTIME"] = (measured_exposure, "seconds") + date_obs_image_date = ( + image_date + + timedelta(seconds=t0_delta) + - timedelta(seconds=measured_exposure) + ) + image_hdul[0].header["DATE_OBS"] = ( + date_obs_image_date.isoformat(timespec="milliseconds").replace( + "+00:00", "Z" + ), + "Date Observation", + ) + t_obs_image_date = date_obs_image_date + timedelta( + seconds=(measured_exposure / 2.0) + ) + image_hdul[0].header["T_OBS"] = ( + t_obs_image_date.isoformat(timespec="milliseconds").replace( + "+00:00", "Z" + ), + "Telescope Observation", + ) + image_hdul[0].header["TELESCOP"] = ("MaGIXS", "Telescope") + image_hdul[0].header["INSTRUME"] = ("MaGIXS", "Instrument") + image_hdul[0].header["IMGACQST"] = ("", "Image Acquisition State") + image_hdul[0].header["CCDHLDR"] = (0.0, "CCD Holder Temperature (degC)") + image_hdul[0].header["COLDBLOC"] = ( + 0.0, + "Cold Block Temperature (degC)", + ) + image_hdul[0].header["UNITS"] = ("DN", "Units") + + image_timestamp = date_obs_image_date.isoformat( + timespec="milliseconds" + ).replace("+00:00", "Z") # Create output directory. os.makedirs(output_dir, exist_ok=True) - output_file = output_dir + 'magixs_L0.5_' + image_timestamp.replace(":", ".") + '.fits' - hdu = fits.PrimaryHDU(data = image_hdul[0].data, header = image_hdul[0].header) + output_file = ( + output_dir + + "magixs_L0.5_" + + image_timestamp.replace(":", ".") + + ".fits" + ) + hdu = fits.PrimaryHDU( + data=image_hdul[0].data, header=image_hdul[0].header + ) hdulist = fits.HDUList([hdu]) hdulist.writeto(output_file, overwrite=True) - def update_level0_5_image_acquisition_state(self, input_dir: str, image_sequence_number_list: list, image_acquisition_state: str): - ''' + def update_level0_5_image_acquisition_state( + self, + input_dir: str, + image_sequence_number_list: list, + image_acquisition_state: str, + ): + """ Updates Level 0.5 images with the Image Acquisition State. Parameters @@ -573,19 +1014,21 @@ def update_level0_5_image_acquisition_state(self, input_dir: str, image_sequence ------- None. - ''' + """ os.chdir(input_dir) for file in glob.glob("*.fits"): with fits.open(file) as image_hdul: - img_seq_num = image_hdul[0].header['IMG_ISN'] + img_seq_num = image_hdul[0].header["IMG_ISN"] if img_seq_num in image_sequence_number_list: - image_hdul[0].header['IMGACQST'] = image_acquisition_state - hdu = fits.PrimaryHDU(data = image_hdul[0].data, header = image_hdul[0].header) + image_hdul[0].header["IMGACQST"] = image_acquisition_state + hdu = fits.PrimaryHDU( + data=image_hdul[0].data, header=image_hdul[0].header + ) hdulist = fits.HDUList([hdu]) hdulist.writeto(file, overwrite=True) def update_level0_5_temperatures(self, input_dir: str, matlab_file: str): - ''' + """ Updates Level 0.5 CCD holder and cold block temperatures. Parameters @@ -593,22 +1036,22 @@ def update_level0_5_temperatures(self, input_dir: str, matlab_file: str): input_dir : str Directory of Level 0.5 images to update.. matlab_file : str - Matlab filename containing the tempertures. This is a NSROC file. + Matlab filename containing the temperatures. This is a NSROC file. Returns ------- None. - ''' + """ matlab_data = loadmat(matlab_file) - ccd_param = matlab_data['ccdt'] - ccd_param_time = matlab_data['ccdt_Time'] + ccd_param = matlab_data["ccdt"] + ccd_param_time = matlab_data["ccdt_Time"] ccd_param_time -= timedelta(days=1).total_seconds() ccd_param_cal = calc_ccd_holder_cal(ccd_param) ccd_param_cal = np.array(ccd_param_cal).flatten() ccd_param_time = np.array(ccd_param_time).flatten() - cold_block_param = matlab_data['cldblkt'] - cold_block_param_time = matlab_data['cldblkt_Time'] + cold_block_param = matlab_data["cldblkt"] + cold_block_param_time = matlab_data["cldblkt_Time"] cold_block_param_time -= timedelta(days=1).total_seconds() cold_block_param_cal = calc_cold_block_cal(cold_block_param) cold_block_param_cal = np.array(cold_block_param_cal).flatten() @@ -619,19 +1062,27 @@ def update_level0_5_temperatures(self, input_dir: str, matlab_file: str): os.chdir(input_dir) for file in glob.glob("*.fits"): with fits.open(file) as image_hdul: - image_date = dateutil.parser.isoparse(image_hdul[0].header['DATE_OBS']) + image_date = dateutil.parser.isoparse(image_hdul[0].header["DATE_OBS"]) image_delta = (image_date - launch_year).total_seconds() index = np.argmax(ccd_param_time > image_delta) - image_hdul[0].header['CCDHLDR'] = (ccd_param_cal[index], 'degC') + image_hdul[0].header["CCDHLDR"] = (ccd_param_cal[index], "degC") index = np.argmax(cold_block_param_time > image_delta) - image_hdul[0].header['COLDBLOC'] = (cold_block_param_cal[index], 'degC') - hdu = fits.PrimaryHDU(data = image_hdul[0].data, header = image_hdul[0].header) + image_hdul[0].header["COLDBLOC"] = (cold_block_param_cal[index], "degC") + hdu = fits.PrimaryHDU( + data=image_hdul[0].data, header=image_hdul[0].header + ) hdulist = fits.HDUList([hdu]) hdulist.writeto(file, overwrite=True) - #print(image_delta, ccd_param_cal[index], ccd_param_time[index], cold_block_param_cal[index], cold_block_param_time[index]) - def create_level1_0_images(self, image_list: list, pre_master_dark_file: str, post_master_dark_file: str, bad_pixel_mask_file: tp.Union[str, None], output_dir: str): - ''' + def create_level1_0_images( + self, + image_list: list, + pre_master_dark_file: str, + post_master_dark_file: str, + bad_pixel_mask_file: tp.Union[str, None], + output_dir: str, + ): + """ Creates Level 1.0 images. If bad pixels replaced, replaced values and their locations are stored as a table in the image FITS file. @@ -652,53 +1103,108 @@ def create_level1_0_images(self, image_list: list, pre_master_dark_file: str, po ------- None. - ''' + """ number_images = len(image_list) with fits.open(pre_master_dark_file) as dark_hdul: pre_master_dark = dark_hdul[0].data.astype(np.float32).copy() - pre_dark_temp = dark_hdul[0].header['ADCTEMP6'] + pre_dark_temp = dark_hdul[0].header["ADCTEMP6"] with fits.open(post_master_dark_file) as dark_hdul: post_master_dark = dark_hdul[0].data.astype(np.float32).copy() - post_dark_temp = dark_hdul[0].header['ADCTEMP6'] + post_dark_temp = dark_hdul[0].header["ADCTEMP6"] if bad_pixel_mask_file is not None: with fits.open(bad_pixel_mask_file) as bad_pixel_mask_hdul: - bad_pixel_mask =bad_pixel_mask_hdul[0].data.astype(np.float32).copy() + bad_pixel_mask = bad_pixel_mask_hdul[0].data.astype(np.float32).copy() else: bad_pixel_mask = None - despike_table_hdu = fits.BinTableHDU.from_columns([fits.Column(name='y', format='J'), - fits.Column(name='x', format='J'), - fits.Column(name='value', format='E')]) - wcs_table_hdu = fits.BinTableHDU.from_columns([fits.Column(name='wavelength' , format='E'), - fits.Column(name='plate_scale_x', format='E'), - fits.Column(name='pixel_x', format='E'), - fits.Column(name='solar_coord_x', format='E'), - fits.Column(name='plate_scale_y' , format='E'), - fits.Column(name='pixel_y', format='E'), - fits.Column(name='solar_coord_y', format='E'), - fits.Column(name='roll' , format='E')]) + despike_table_hdu = fits.BinTableHDU.from_columns( + [ + fits.Column(name="y", format="J"), + fits.Column(name="x", format="J"), + fits.Column(name="value", format="E"), + ] + ) + wcs_table_hdu = fits.BinTableHDU.from_columns( + [ + fits.Column(name="wavelength", format="E"), + fits.Column(name="plate_scale_x", format="E"), + fits.Column(name="pixel_x", format="E"), + fits.Column(name="solar_coord_x", format="E"), + fits.Column(name="plate_scale_y", format="E"), + fits.Column(name="pixel_y", format="E"), + fits.Column(name="solar_coord_y", format="E"), + fits.Column(name="roll", format="E"), + ] + ) for index in range(number_images): with fits.open(image_list[index]) as image_hdul: - image, image_header, bad_pixels_replaced_values = self.create_adjusted_light(image_hdul[0].data, image_hdul[0].header, pre_master_dark, pre_dark_temp, post_master_dark, post_dark_temp, bad_pixel_mask) - image_header['PREMDARK'] = (os.path.basename(pre_master_dark_file), 'Pre Master Dark') - image_header['PSTMDARK'] = (os.path.basename(post_master_dark_file), 'Post Master Dark') - image_header['INTPTEMP'] = ('ADCTEMP6', 'Interpolation Temperature') + ( + image, + image_header, + bad_pixels_replaced_values, + ) = self.create_adjusted_light( + image_hdul[0].data, + image_hdul[0].header, + pre_master_dark, + pre_dark_temp, + post_master_dark, + post_dark_temp, + bad_pixel_mask, + ) + image_header["PREMDARK"] = ( + os.path.basename(pre_master_dark_file), + "Pre Master Dark", + ) + image_header["PSTMDARK"] = ( + os.path.basename(post_master_dark_file), + "Post Master Dark", + ) + image_header["INTPTEMP"] = ("ADCTEMP6", "Interpolation Temperature") # Create output directory. os.makedirs(output_dir, exist_ok=True) - image_date = dateutil.parser.isoparse(image_hdul[0].header['DATE_OBS']) - image_timestamp = image_date.isoformat(timespec='milliseconds').replace("+00:00", "Z") - output_file = output_dir + 'magixs_L1.0_' + image_timestamp.replace(":", ".") + '.fits' - fits_hdu = fits.PrimaryHDU(data = image, header = image_header) - bad_pixels_replaced_dict = {'y':np.int32, 'x':np.int32, 'value':np.float32} - bad_pixels_replaced_values = bad_pixels_replaced_values.astype(bad_pixels_replaced_dict) + image_date = dateutil.parser.isoparse(image_hdul[0].header["DATE_OBS"]) + image_timestamp = image_date.isoformat(timespec="milliseconds").replace( + "+00:00", "Z" + ) + output_file = ( + output_dir + + "magixs_L1.0_" + + image_timestamp.replace(":", ".") + + ".fits" + ) + fits_hdu = fits.PrimaryHDU(data=image, header=image_header) + bad_pixels_replaced_dict = { + "y": np.int32, + "x": np.int32, + "value": np.float32, + } + bad_pixels_replaced_values = bad_pixels_replaced_values.astype( + bad_pixels_replaced_dict + ) bad_pixels_table = Table.from_pandas(bad_pixels_replaced_values) - hdu_list = fits.HDUList([fits_hdu, fits.table_to_hdu(bad_pixels_table), despike_table_hdu, wcs_table_hdu]) + hdu_list = fits.HDUList( + [ + fits_hdu, + fits.table_to_hdu(bad_pixels_table), + despike_table_hdu, + wcs_table_hdu, + ] + ) hdu_list.writeto(output_file, overwrite=True) - def create_adjusted_light(self, image: np.ndarray, level0_header: astropy.io.fits.header.Header, pre_master_dark: np.ndarray, pre_master_dark_temp: np.float32, post_master_dark: np.ndarray, post_master_dark_temp: np.float32, bad_pixel_mask: tp.Union[np.ndarray, None]=None) -> [np.ndarray, astropy.io.fits.header.Header, pd.DataFrame]: - ''' + def create_adjusted_light( + self, + image: np.ndarray, + level0_header: astropy.io.fits.header.Header, + pre_master_dark: np.ndarray, + pre_master_dark_temp: np.float32, + post_master_dark: np.ndarray, + post_master_dark_temp: np.float32, + bad_pixel_mask: tp.Union[np.ndarray, None] = None, + ) -> [np.ndarray, astropy.io.fits.header.Header, pd.DataFrame]: + """ Creates an adjusted light image. The bias is removed, the inactive pixels are removed, the master dark is removed, the gain is adjusted, and optionally the bad pixels are replaced. The master dark is removed using a @@ -727,63 +1233,70 @@ def create_adjusted_light(self, image: np.ndarray, level0_header: astropy.io.fit bad_pixels_replaced_values: pd.DataFrame Bad pixel replaced values (y, x, value). - ''' + """ image_height, image_width = np.shape(image) - adjusted_image = np.zeros((self.active_pixels_height, self.active_pixels_width), dtype=np.float32) + adjusted_image = np.zeros( + (self.active_pixels_height, self.active_pixels_width), dtype=np.float32 + ) - image_temp = level0_header['ADCTEMP6'] + image_temp = level0_header["ADCTEMP6"] level1_header = level0_header - level1_header['LEVEL'] = '1.0' + level1_header["LEVEL"] = "1.0" adjusted_image = self.remove_bias(image.astype(np.float32)) - level1_header['DEBIASED'] = (1, 'Bias Removed') + level1_header["DEBIASED"] = (1, "Bias Removed") adjusted_image = self.remove_inactive_pixels(adjusted_image) - #print("pre", adjusted_image[544, 1517]) - # if image_temp < pre_master_dark_temp: - # master_dark_scale = 0.0 - # elif image_temp > post_master_dark_temp: - # master_dark_scale = 1.0 - # else: - # master_dark_scale = 1.0 - ((post_master_dark_temp - image_temp)/(post_master_dark_temp - pre_master_dark_temp)) - #master_dark_scale = 1.0 - ((post_master_dark_temp - image_temp)/(post_master_dark_temp - pre_master_dark_temp)) - master_dark_scale = (image_temp - pre_master_dark_temp )/(post_master_dark_temp - pre_master_dark_temp) - #print(master_dark_scale) - scaled_master_dark = pre_master_dark.astype(np.float32) + (master_dark_scale * (post_master_dark.astype(np.float32) - pre_master_dark.astype(np.float32))) + master_dark_scale = (image_temp - pre_master_dark_temp) / ( + post_master_dark_temp - pre_master_dark_temp + ) + # print(master_dark_scale) + scaled_master_dark = pre_master_dark.astype(np.float32) + ( + master_dark_scale + * (post_master_dark.astype(np.float32) - pre_master_dark.astype(np.float32)) + ) adjusted_image -= scaled_master_dark.astype(np.float32) - #print("post", adjusted_image[544, 1517]) - level1_header['DEDARKED'] = (1, 'Dark Removed') + # print("post", adjusted_image[544, 1517]) + level1_header["DEDARKED"] = (1, "Dark Removed") adjusted_image = self.adjust_gain(adjusted_image) - level1_header['GAINADJ'] = (1, 'Gain Adjusted') + level1_header["GAINADJ"] = (1, "Gain Adjusted") if bad_pixel_mask is not None: - adjust_image, bad_pixels_replaced_values = self.replace_bad_pixels(adjusted_image, bad_pixel_mask) - #print("bad pixels replaced values =", bad_pixels_replaced_values) - level1_header['BDPIXRPL'] = (1, 'Bad Pixels Replaced') + adjust_image, bad_pixels_replaced_values = self.replace_bad_pixels( + adjusted_image, bad_pixel_mask + ) + # print("bad pixels replaced values =", bad_pixels_replaced_values) + level1_header["BDPIXRPL"] = (1, "Bad Pixels Replaced") else: - bad_pixels_replaced_values = pd.DataFrame(columns=['y', 'x', 'value']) - level1_header['BDPIXRPL'] = (0, 'Bad Pixels Replaced') - #print("final", adjusted_image[544, 1517]) - #print("") + bad_pixels_replaced_values = pd.DataFrame(columns=["y", "x", "value"]) + level1_header["BDPIXRPL"] = (0, "Bad Pixels Replaced") + # print("final", adjusted_image[544, 1517]) + # print("") - level1_header['DESPIKED'] = (0, 'Despiked') - level1_header['ABSORADJ'] = (0, 'Absorption Adjusted') + level1_header["DESPIKED"] = (0, "Despiked") + level1_header["ABSORADJ"] = (0, "Absorption Adjusted") - level1_header['UNITS'] = ('Electrons', 'Units') + level1_header["UNITS"] = ("Electrons", "Units") - level1_header['BIAS1'] = (self.ll_bias, 'Bias Tap E (Lower Left)') - level1_header['BIAS2'] = (self.lr_bias, 'Bias Tap F (Lower Right)') - level1_header['BIAS3'] = (self.ur_bias, 'Bias Tap G (Upper Right)') - level1_header['BIAS4'] = (self.ul_bias, 'Bias Tap H (Upper Left)') - level1_header['GAIN1'] = (self.ll_gain, 'Gain Tap E (Lower Left)') - level1_header['GAIN2'] = (self.lr_gain, 'Gain Tap F (Lower Right)') - level1_header['GAIN3'] = (self.ur_gain, 'Gain Tap G (Upper Right)') - level1_header['GAIN4'] = (self.ul_gain, 'Gain Tap H (Upper Left)') + level1_header["BIAS1"] = (self.ll_bias, "Bias Tap E (Lower Left)") + level1_header["BIAS2"] = (self.lr_bias, "Bias Tap F (Lower Right)") + level1_header["BIAS3"] = (self.ur_bias, "Bias Tap G (Upper Right)") + level1_header["BIAS4"] = (self.ul_bias, "Bias Tap H (Upper Left)") + level1_header["GAIN1"] = (self.ll_gain, "Gain Tap E (Lower Left)") + level1_header["GAIN2"] = (self.lr_gain, "Gain Tap F (Lower Right)") + level1_header["GAIN3"] = (self.ur_gain, "Gain Tap G (Upper Right)") + level1_header["GAIN4"] = (self.ul_gain, "Gain Tap H (Upper Left)") return adjusted_image, level1_header, bad_pixels_replaced_values - def create_level1_5_images(self, image_list: list, despike_areas: list, despike_sigma: np.float32, output_dir: str): - ''' + def create_level1_5_images( + self, + image_list: list, + despike_areas: list, + despike_sigma: np.float32, + output_dir: str, + ): + """ Creates Level 1.5 images that are ready for inversion. Parameters @@ -801,30 +1314,54 @@ def create_level1_5_images(self, image_list: list, despike_areas: list, despike_ ------- None. - ''' + """ number_images = len(image_list) for index in range(number_images): with fits.open(image_list[index]) as image_hdul: - image, image_header, spike_replaced_values = self.create_inversion_ready_light_with_mask(image_hdul[0].data, image_hdul[0].header, despike_areas, despike_sigma) + ( + image, + image_header, + spike_replaced_values, + ) = self.create_inversion_ready_light_with_mask( + image_hdul[0].data, + image_hdul[0].header, + despike_areas, + despike_sigma, + ) # Create output directory. os.makedirs(output_dir, exist_ok=True) - image_date = dateutil.parser.isoparse(image_hdul[0].header['DATE_OBS']) - image_timestamp = image_date.isoformat(timespec='milliseconds').replace("+00:00", "Z") - output_file = output_dir + 'magixs_L1.5_' + image_timestamp.replace(":", ".") + '.fits' + image_date = dateutil.parser.isoparse(image_hdul[0].header["DATE_OBS"]) + image_timestamp = image_date.isoformat(timespec="milliseconds").replace( + "+00:00", "Z" + ) + output_file = ( + output_dir + + "magixs_L1.5_" + + image_timestamp.replace(":", ".") + + ".fits" + ) image_hdul[0].data = image image_hdul[0].header = image_header - spike_dict = {'y':np.int32, 'x':np.int32, 'value':np.float32} + spike_dict = {"y": np.int32, "x": np.int32, "value": np.float32} spike_replaced_values = spike_replaced_values.astype(spike_dict) - #print(spike_replaced_values) + # print(spike_replaced_values) spike_table = Table.from_pandas(spike_replaced_values) spike_table_hdu = fits.table_to_hdu(spike_table) image_hdul[2].data = spike_table_hdu.data image_hdul[2].header = spike_table_hdu.header image_hdul.writeto(output_file, overwrite=True) - def despike_pixel_with_mask(self, despiked_image: np.ndarray, y_pixel: np.float32, x_pixel: np.float32, despike_threshold: np.float32, despike_sigma: np.float32, spikes: list) -> [np.ndarray, pd.DataFrame]: - ''' + def despike_pixel_with_mask( + self, + despiked_image: np.ndarray, + y_pixel: np.float32, + x_pixel: np.float32, + despike_threshold: np.float32, + despike_sigma: np.float32, + spikes: list, + ) -> [np.ndarray, pd.DataFrame]: + """ Parameters @@ -849,10 +1386,10 @@ def despike_pixel_with_mask(self, despiked_image: np.ndarray, y_pixel: np.float3 spike_replaced_values : TYPE DESCRIPTION. - ''' - #print("despiking pixel (", y_pixel, ", ", x_pixel, ")") + """ + # print("despiking pixel (", y_pixel, ", ", x_pixel, ")") - spike_replaced_values = pd.DataFrame(columns=['y', 'x', 'value']) + spike_replaced_values = pd.DataFrame(columns=["y", "x", "value"]) surrounding_pixels_values = [] surrounding_pixels_list = [] @@ -862,19 +1399,21 @@ def despike_pixel_with_mask(self, despiked_image: np.ndarray, y_pixel: np.float3 for index, delta in enumerate(pixel_16_adjacent_deltas): y = y_pixel + delta[0] x = x_pixel + delta[1] - #print("y, x", y, x) + # print("y, x", y, x) # If the first pixel is greater than threshold, skip last. if (index == (len(pixel_16_adjacent_deltas) - 1)) and skip_last: continue if skip: # If adjacent pixels are greater than threshold, skip again. - if (y,x) not in spikes: - #if (despiked_image[y, x] < despike_threshold): + if (y, x) not in spikes: + # if (despiked_image[y, x] < despike_threshold): skip = False continue - if (y >= 0 and y < self.active_pixels_height) and (x >= 0 and x < self.active_pixels_width): - if (y,x) not in spikes and despiked_image[(y, x)] is not ma.masked: - #if (despiked_image[y, x] < despike_threshold): + if (y >= 0 and y < self.active_pixels_height) and ( + x >= 0 and x < self.active_pixels_width + ): + if (y, x) not in spikes and despiked_image[(y, x)] is not ma.masked: + # if (despiked_image[y, x] < despike_threshold): surrounding_pixels_values += [despiked_image[y, x]] surrounding_pixels_list += [(y, x)] else: @@ -888,57 +1427,103 @@ def despike_pixel_with_mask(self, despiked_image: np.ndarray, y_pixel: np.float3 for index, delta in enumerate(pixel_8_adjacent_deltas): y = y_pixel + delta[0] x = x_pixel + delta[1] - if (y,x) in spikes: + if (y, x) in spikes: for i in range(index * 2, (index * 2) + 3): try: list_y = y_pixel + pixel_16_adjacent_wrap_deltas[i][0] list_x = x_pixel + pixel_16_adjacent_wrap_deltas[i][1] list_index = surrounding_pixels_list.index((list_y, list_x)) del surrounding_pixels_list[list_index] - except: + except: # noqa: E722 # TODO figure out what exception was expected pass - #if len(surrounding_pixels_values) > 1: + # if len(surrounding_pixels_values) > 1: if len(surrounding_pixels_list) > 1: surrounding_pixels_values = [] for coords in surrounding_pixels_list: surrounding_pixels_values += [despiked_image[coords]] - if despiked_image[y_pixel, x_pixel] > np.median(surrounding_pixels_values) + (despike_sigma * np.std(surrounding_pixels_values)): - #despiked_image[y_pixel, x_pixel] = np.mean(surrounding_pixels_values) - spike_replaced_values.loc[len(spike_replaced_values.index)] = [y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]] - #print(y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]) + if despiked_image[y_pixel, x_pixel] > np.median( + surrounding_pixels_values + ) + (despike_sigma * np.std(surrounding_pixels_values)): + # despiked_image[y_pixel, x_pixel] = np.mean(surrounding_pixels_values) + spike_replaced_values.loc[len(spike_replaced_values.index)] = [ + y_pixel, + x_pixel, + despiked_image[y_pixel, x_pixel], + ] + # print(y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]) despiked_image[y_pixel, x_pixel] = np.median(surrounding_pixels_values) - #print("despike_pixel =", np.mean(surrounding_pixels_values)) - #print("despike_pixel =", np.median(surrounding_pixels_values)) + # print("despike_pixel =", np.mean(surrounding_pixels_values)) + # print("despike_pixel =", np.median(surrounding_pixels_values)) for delta in pixel_8_deltas: - #for delta in pixel_16_deltas: + # for delta in pixel_16_deltas: y = y_pixel + delta[0] x = x_pixel + delta[1] - if (y >= 0 and y < self.active_pixels_height) and (x >= 0 and x < self.active_pixels_width) and (despiked_image[y, x] < despike_threshold) and despiked_image[(y, x)] is not ma.masked: - despiked_image, area_spike_values_replaced = self.despike_pixel_with_mask(despiked_image, y, x, despike_threshold, despike_sigma, spikes) - spike_replaced_values = spike_replaced_values.append(area_spike_values_replaced, ignore_index=True) + if ( + (y >= 0 and y < self.active_pixels_height) + and (x >= 0 and x < self.active_pixels_width) + and (despiked_image[y, x] < despike_threshold) + and despiked_image[(y, x)] is not ma.masked + ): + ( + despiked_image, + area_spike_values_replaced, + ) = self.despike_pixel_with_mask( + despiked_image, + y, + x, + despike_threshold, + despike_sigma, + spikes, + ) + spike_replaced_values = spike_replaced_values.append( + area_spike_values_replaced, ignore_index=True + ) elif len(surrounding_pixels_list) == 1: - if despiked_image[y_pixel, x_pixel] > despiked_image[surrounding_pixels_list[0]]: - spike_replaced_values.loc[len(spike_replaced_values.index)] = [y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]] - #print(y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]) - despiked_image[y_pixel, x_pixel] = despiked_image[surrounding_pixels_list[0]] + if ( + despiked_image[y_pixel, x_pixel] + > despiked_image[surrounding_pixels_list[0]] + ): + spike_replaced_values.loc[len(spike_replaced_values.index)] = [ + y_pixel, + x_pixel, + despiked_image[y_pixel, x_pixel], + ] + # print(y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]) + despiked_image[y_pixel, x_pixel] = despiked_image[ + surrounding_pixels_list[0] + ] else: surrounding_pixels_values = [] for index, delta in enumerate(pixel_16_adjacent_deltas): y = y_pixel + delta[0] x = x_pixel + delta[1] - if (y >= 0 and y < self.active_pixels_height) and (x >= 0 and x < self.active_pixels_width) and (despiked_image[y, x] < despike_threshold) and despiked_image[(y, x)] is not ma.masked: + if ( + (y >= 0 and y < self.active_pixels_height) + and (x >= 0 and x < self.active_pixels_width) + and (despiked_image[y, x] < despike_threshold) + and despiked_image[(y, x)] is not ma.masked + ): surrounding_pixels_values += [despiked_image[y, x]] if len(surrounding_pixels_values) > 0: - spike_replaced_values.loc[len(spike_replaced_values.index)] = [y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]] - #print(y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]) + spike_replaced_values.loc[len(spike_replaced_values.index)] = [ + y_pixel, + x_pixel, + despiked_image[y_pixel, x_pixel], + ] + # print(y_pixel, x_pixel, despiked_image[y_pixel, x_pixel]) despiked_image[y_pixel, x_pixel] = np.median(surrounding_pixels_values) return despiked_image, spike_replaced_values - def despike_with_mask(self, image: ma.masked_array, despike_threshold: np.float32, despike_sigma: np.float32) -> [np.ndarray, pd.DataFrame]: - ''' + def despike_with_mask( + self, + image: ma.masked_array, + despike_threshold: np.float32, + despike_sigma: np.float32, + ) -> [np.ndarray, pd.DataFrame]: + """ Parameters @@ -957,29 +1542,44 @@ def despike_with_mask(self, image: ma.masked_array, despike_threshold: np.float3 spike_replaced_values : TYPE DESCRIPTION. - ''' + """ despiked_image = image height, width = np.shape(image) - #print("image h,w", height, width) - spike_replaced_values = pd.DataFrame(columns=['y', 'x', 'value']) + # print("image h,w", height, width) + spike_replaced_values = pd.DataFrame(columns=["y", "x", "value"]) spikes = list(zip(*np.ma.where(despiked_image >= despike_threshold))) original_spikes = spikes.copy() spike_values = [despiked_image[i] for i in spikes] - #print("Number of spikes =", len(spike_values)) + # print("Number of spikes =", len(spike_values)) while spike_values: min_spike_index = np.argmin(spike_values) y_pixel = spikes[min_spike_index][0] x_pixel = spikes[min_spike_index][1] - #print("despiking pixel (", y_pixel, ", ", x_pixel, "), value", spike_values[min_spike_index]) - despiked_image, area_spike_replaced_values = self.despike_pixel_with_mask(despiked_image, y_pixel, x_pixel, despike_threshold, despike_sigma, original_spikes) - spike_replaced_values = spike_replaced_values.append(area_spike_replaced_values, ignore_index=True) + # print("despiking pixel (", y_pixel, ", ", x_pixel, "), value", spike_values[min_spike_index]) + despiked_image, area_spike_replaced_values = self.despike_pixel_with_mask( + despiked_image, + y_pixel, + x_pixel, + despike_threshold, + despike_sigma, + original_spikes, + ) + spike_replaced_values = spike_replaced_values.append( + area_spike_replaced_values, ignore_index=True + ) del spikes[min_spike_index] del spike_values[min_spike_index] return despiked_image.data, spike_replaced_values - def create_inversion_ready_light_with_mask(self, image: np.ndarray, level1_header: astropy.io.fits.header.Header, despike_areas: list, despike_sigma: np.float32) -> [np.ndarray, astropy.io.fits.header.Header, pd.DataFrame]: - ''' + def create_inversion_ready_light_with_mask( + self, + image: np.ndarray, + level1_header: astropy.io.fits.header.Header, + despike_areas: list, + despike_sigma: np.float32, + ) -> [np.ndarray, astropy.io.fits.header.Header, pd.DataFrame]: + """ Parameters @@ -1002,17 +1602,21 @@ def create_inversion_ready_light_with_mask(self, image: np.ndarray, level1_heade spike_replaced_values : TYPE DESCRIPTION. - ''' + """ image_height, image_width = np.shape(image) - inversion_ready_image = np.zeros((self.active_pixels_height, self.active_pixels_width), dtype=np.float32) - spike_replaced_values = pd.DataFrame(columns=['y', 'x', 'value']) - mask = np.zeros((self.active_pixels_height, self.active_pixels_width), dtype=np.bool) + inversion_ready_image = np.zeros( + (self.active_pixels_height, self.active_pixels_width), dtype=np.float32 + ) + spike_replaced_values = pd.DataFrame(columns=["y", "x", "value"]) + mask = np.zeros( + (self.active_pixels_height, self.active_pixels_width), dtype=np.bool + ) inversion_ready_image = image.astype(np.float32) level1_5_header = level1_header - level1_5_header['LEVEL'] = ('1.5', 'Data Product Level') - level1_header['DESPIKED'] = (1, 'Despiked') + level1_5_header["LEVEL"] = ("1.5", "Data Product Level") + level1_header["DESPIKED"] = (1, "Despiked") for despike_area in despike_areas: mask[:, :] = True @@ -1020,49 +1624,76 @@ def create_inversion_ready_light_with_mask(self, image: np.ndarray, level1_heade for area in despike_area[0]: y0 = area[0][0] y1 = area[0][1] - assert(y1 > y0 and y0 >= 0 and y1 <= self.active_pixels_height) x0 = area[1][0] x1 = area[1][1] - assert(x1 > x0 and x0 >= 0 and x1 <= self.active_pixels_width) - #print(y0, y1, x0, x1, despike_threshold) mask[y0:y1, x0:x1] = False - masked_image = ma.masked_array(inversion_ready_image, mask=mask, fill_value = np.nan) - inversion_ready_image, area_spike_replaced_values = self.despike_with_mask(masked_image, despike_threshold, despike_sigma) + masked_image = ma.masked_array( + inversion_ready_image, mask=mask, fill_value=np.nan + ) + inversion_ready_image, area_spike_replaced_values = self.despike_with_mask( + masked_image, despike_threshold, despike_sigma + ) - spike_replaced_values = spike_replaced_values.append(area_spike_replaced_values, ignore_index=True) + spike_replaced_values = spike_replaced_values.append( + area_spike_replaced_values, ignore_index=True + ) - return inversion_ready_image, level1_5_header, spike_replaced_values + return inversion_ready_image, level1_5_header, spike_replaced_values - def update_level1_pointing(self, image_list: list, level1_wcs_table: str, solar_fov_coords: str): + def update_level1_pointing( + self, image_list: list, level1_wcs_table: str, solar_fov_coords: str + ): number_images = len(image_list) # Read Level 1 WCS table information. wcs_table = pd.read_excel(level1_wcs_table, index_col=None) rows, columns = wcs_table.shape - assert rows >= 1 and columns == 8 and wcs_table.columns[0] == 'wavelength' # Read solar FOV coordinates. fov_table = pd.read_excel(solar_fov_coords, index_col=None) rows, columns = fov_table.shape - assert rows == 1 and columns == 8 and fov_table.columns[0] == 'lower_left_x' for index in range(number_images): with fits.open(image_list[index]) as image_hdul: try: - data_product_level = image_hdul[0].header['LEVEL'] + data_product_level = image_hdul[0].header["LEVEL"] data_product_level.rstrip() except KeyError: - data_product_level = '' - assert data_product_level == '1.0' or data_product_level == '1.5' - image_hdul[0].header['SFOVLLX'] = (fov_table.values[0, 0], 'Slot FOV Lower Left X') - image_hdul[0].header['SFOVLLY'] = (fov_table.values[0, 1], 'Slot FOV Lower Left Y') - image_hdul[0].header['SFOVULX'] = (fov_table.values[0, 2], 'Slot FOV Upper Left X') - image_hdul[0].header['SFOVULY'] = (fov_table.values[0, 3], 'Slot FOV Upper Left Y') - image_hdul[0].header['SFOVLRX'] = (fov_table.values[0, 4], 'Slot FOV Lower Right X') - image_hdul[0].header['SFOVLRY'] = (fov_table.values[0, 5], 'Slot FOV Lower Right Y') - image_hdul[0].header['SFOVURX'] = (fov_table.values[0, 6], 'Slot FOV Upper Right X') - image_hdul[0].header['SFOVURY'] = (fov_table.values[0, 7], 'Slot FOV Upper Right Y') - image_hdul[0].header['ROLL'] = (wcs_table.values[0, 7], 'degrees') + data_product_level = "" + assert data_product_level == "1.0" or data_product_level == "1.5" + image_hdul[0].header["SFOVLLX"] = ( + fov_table.values[0, 0], + "Slot FOV Lower Left X", + ) + image_hdul[0].header["SFOVLLY"] = ( + fov_table.values[0, 1], + "Slot FOV Lower Left Y", + ) + image_hdul[0].header["SFOVULX"] = ( + fov_table.values[0, 2], + "Slot FOV Upper Left X", + ) + image_hdul[0].header["SFOVULY"] = ( + fov_table.values[0, 3], + "Slot FOV Upper Left Y", + ) + image_hdul[0].header["SFOVLRX"] = ( + fov_table.values[0, 4], + "Slot FOV Lower Right X", + ) + image_hdul[0].header["SFOVLRY"] = ( + fov_table.values[0, 5], + "Slot FOV Lower Right Y", + ) + image_hdul[0].header["SFOVURX"] = ( + fov_table.values[0, 6], + "Slot FOV Upper Right X", + ) + image_hdul[0].header["SFOVURY"] = ( + fov_table.values[0, 7], + "Slot FOV Upper Right Y", + ) + image_hdul[0].header["ROLL"] = (wcs_table.values[0, 7], "degrees") pointing_table = Table.from_pandas(wcs_table) pointing_table_hdu = fits.table_to_hdu(pointing_table) image_hdul[3].data = pointing_table_hdu.data @@ -1070,8 +1701,10 @@ def update_level1_pointing(self, image_list: list, level1_wcs_table: str, solar_ image_hdul.writeto(image_list[index], overwrite=True) - def update_sun_radius(self, image_list: list, sun_radius_observed: np.float32, sun_radius: np.float32): - ''' + def update_sun_radius( + self, image_list: list, sun_radius_observed: np.float32, sun_radius: np.float32 + ): + """ Updates the sun radius information in the primary header. Parameters @@ -1087,17 +1720,17 @@ def update_sun_radius(self, image_list: list, sun_radius_observed: np.float32, s ------- None. - ''' + """ number_images = len(image_list) for index in range(number_images): with fits.open(image_list[index]) as image_hdul: - image_hdul[0].header['RSUN_OBS'] = (sun_radius_observed, 'arcsecs') - image_hdul[0].header['R_SUN'] = (sun_radius, 'arcsecs') + image_hdul[0].header["RSUN_OBS"] = (sun_radius_observed, "arcsecs") + image_hdul[0].header["R_SUN"] = (sun_radius, "arcsecs") image_hdul.writeto(image_list[index], overwrite=True) def create_level2_1_summed_image(self, image_list: list, output_dir: str): - ''' + """ Creates a Level 1.5 summed image for a Level 2.1 inversion. All light images are summed and normalized. @@ -1112,82 +1745,148 @@ def create_level2_1_summed_image(self, image_list: list, output_dir: str): ------- None. - ''' + """ # Create output directory. os.makedirs(output_dir, exist_ok=True) - #image_list.sort() - #print(image_list) + # image_list.sort() + # print(image_list) light_summed_image = np.zeros((1024, 2048), dtype=np.float64) summed_image_exposure_time = 0.0 - spike_replaced_values = pd.DataFrame(columns=['y', 'x', 'value']) + spike_replaced_values = pd.DataFrame(columns=["y", "x", "value"]) level2_0_header = fits.Header() - level2_0_header['LEVEL'] = '1.5' + level2_0_header["LEVEL"] = "1.5" running_summed_length = len(image_list) for i in range(running_summed_length): with fits.open(image_list[i]) as image_hdul: light_summed_image += image_hdul[0].data.astype(np.float64) if i == 0: - level2_0_header['DATE_OBS'] = (image_hdul[0].header['DATE_OBS'], 'Date Observation') - level2_0_header['T_OBS'] = (image_hdul[0].header['T_OBS'], 'Telescope Observation') - level2_0_header['TELESCOP'] = (image_hdul[0].header['TELESCOP'], 'Telescope') - level2_0_header['INSTRUME'] = (image_hdul[0].header['INSTRUME'], 'Instrument') - level2_0_header['DEBIASED'] = (image_hdul[0].header['DEBIASED'], 'Bias Removed') - level2_0_header['DEDARKED'] = (image_hdul[0].header['DEDARKED'], 'Dark Removed') - level2_0_header['GAINADJ'] = (image_hdul[0].header['GAINADJ'], 'Gain Adjusted') - level2_0_header['BDPIXRPL'] = (image_hdul[0].header['BDPIXRPL'], 'Bad Pixels Replaced') - level2_0_header['DESPIKED'] = (image_hdul[0].header['DESPIKED'], 'Despiked') - level2_0_header['ABSORADJ'] = (image_hdul[0].header['ABSORADJ'], 'Absorption Adjusted') - level2_0_header['UNITS'] = ('Electrons s-1', 'Units') - image_timestamp_begin = image_hdul[0].header['DATE_OBS'] + level2_0_header["DATE_OBS"] = ( + image_hdul[0].header["DATE_OBS"], + "Date Observation", + ) + level2_0_header["T_OBS"] = ( + image_hdul[0].header["T_OBS"], + "Telescope Observation", + ) + level2_0_header["TELESCOP"] = ( + image_hdul[0].header["TELESCOP"], + "Telescope", + ) + level2_0_header["INSTRUME"] = ( + image_hdul[0].header["INSTRUME"], + "Instrument", + ) + level2_0_header["DEBIASED"] = ( + image_hdul[0].header["DEBIASED"], + "Bias Removed", + ) + level2_0_header["DEDARKED"] = ( + image_hdul[0].header["DEDARKED"], + "Dark Removed", + ) + level2_0_header["GAINADJ"] = ( + image_hdul[0].header["GAINADJ"], + "Gain Adjusted", + ) + level2_0_header["BDPIXRPL"] = ( + image_hdul[0].header["BDPIXRPL"], + "Bad Pixels Replaced", + ) + level2_0_header["DESPIKED"] = ( + image_hdul[0].header["DESPIKED"], + "Despiked", + ) + level2_0_header["ABSORADJ"] = ( + image_hdul[0].header["ABSORADJ"], + "Absorption Adjusted", + ) + level2_0_header["UNITS"] = ("Electrons s-1", "Units") + image_timestamp_begin = image_hdul[0].header["DATE_OBS"] if i == (running_summed_length - 1): - image_timestamp_end = image_hdul[0].header['DATE_OBS'] - image_exposure_time = image_hdul[0].header['IMG_EXP'] + image_timestamp_end = image_hdul[0].header["DATE_OBS"] + image_exposure_time = image_hdul[0].header["IMG_EXP"] summed_image_exposure_time += image_exposure_time image_spike_replaced_values = pd.DataFrame(image_hdul[2].data) - spike_replaced_values = spike_replaced_values.append(image_spike_replaced_values, ignore_index=True) + spike_replaced_values = spike_replaced_values.append( + image_spike_replaced_values, ignore_index=True + ) if i == (running_summed_length - 1): # Normalize data. light_summed_image /= summed_image_exposure_time light_summed_image[np.where(light_summed_image < 0.0)] = 0.0 - level2_0_header['EXPTIME'] = summed_image_exposure_time + level2_0_header["EXPTIME"] = summed_image_exposure_time keywords_exist = True try: - lower_left_x = image_hdul[0].header['SFOVLLX'] - lower_left_y = image_hdul[0].header['SFOVLLY'] - upper_left_x = image_hdul[0].header['SFOVULX'] - upper_left_y = image_hdul[0].header['SFOVULY'] - lower_right_x = image_hdul[0].header['SFOVLRX'] - lower_right_y = image_hdul[0].header['SFOVLRY'] - upper_right_x = image_hdul[0].header['SFOVURX'] - upper_right_y = image_hdul[0].header['SFOVURY'] - roll = image_hdul[0].header['ROLL'] + lower_left_x = image_hdul[0].header["SFOVLLX"] + lower_left_y = image_hdul[0].header["SFOVLLY"] + upper_left_x = image_hdul[0].header["SFOVULX"] + upper_left_y = image_hdul[0].header["SFOVULY"] + lower_right_x = image_hdul[0].header["SFOVLRX"] + lower_right_y = image_hdul[0].header["SFOVLRY"] + upper_right_x = image_hdul[0].header["SFOVURX"] + upper_right_y = image_hdul[0].header["SFOVURY"] + roll = image_hdul[0].header["ROLL"] except KeyError: keywords_exist = False - if keywords_exist == True: - level2_0_header['SFOVLLX'] = (lower_left_x, 'SLOT FOV Lower Left X') - level2_0_header['SFOVLLY'] = (lower_left_y, 'SLOT FOV Lower Left Y') - level2_0_header['SFOVULX'] = (upper_left_x, 'SLOT FOV Upper Left X') - level2_0_header['SFOVULY'] = (upper_left_y, 'SLOT FOV Upper Left Y') - level2_0_header['SFOVLRX'] = (lower_right_x, 'SLOT FOV Lower Right X') - level2_0_header['SFOVLRY'] = (lower_right_y, 'SLOT FOV Lower Right Y') - level2_0_header['SFOVURX'] = (upper_right_x, 'SLOT FOV Upper Right X') - level2_0_header['SFOVURY'] = (upper_right_y, 'SLOT FOV Upper Right Y') - level2_0_header['ROLL'] = (roll, 'degrees') + if keywords_exist: + level2_0_header["SFOVLLX"] = ( + lower_left_x, + "SLOT FOV Lower Left X", + ) + level2_0_header["SFOVLLY"] = ( + lower_left_y, + "SLOT FOV Lower Left Y", + ) + level2_0_header["SFOVULX"] = ( + upper_left_x, + "SLOT FOV Upper Left X", + ) + level2_0_header["SFOVULY"] = ( + upper_left_y, + "SLOT FOV Upper Left Y", + ) + level2_0_header["SFOVLRX"] = ( + lower_right_x, + "SLOT FOV Lower Right X", + ) + level2_0_header["SFOVLRY"] = ( + lower_right_y, + "SLOT FOV Lower Right Y", + ) + level2_0_header["SFOVURX"] = ( + upper_right_x, + "SLOT FOV Upper Right X", + ) + level2_0_header["SFOVURY"] = ( + upper_right_y, + "SLOT FOV Upper Right Y", + ) + level2_0_header["ROLL"] = (roll, "degrees") keywords_exist = True try: - sun_radius_observed = image_hdul[0].header['RSUN_OBS'] - sun_radius = image_hdul[0].header['R_SUN'] + sun_radius_observed = image_hdul[0].header["RSUN_OBS"] + sun_radius = image_hdul[0].header["R_SUN"] except KeyError: keywords_exist = False - if keywords_exist == True: - level2_0_header['RSUN_OBS'] = (sun_radius_observed, 'arcsecs') - level2_0_header['R_SUN'] = (sun_radius, 'arcsecs') - hdu = fits.PrimaryHDU(data = light_summed_image.astype(np.float32), header=level2_0_header) - spike_dict = {'y':np.int32, 'x':np.int32, 'value':np.float32} + if keywords_exist: + level2_0_header["RSUN_OBS"] = (sun_radius_observed, "arcsecs") + level2_0_header["R_SUN"] = (sun_radius, "arcsecs") + hdu = fits.PrimaryHDU( + data=light_summed_image.astype(np.float32), + header=level2_0_header, + ) + spike_dict = {"y": np.int32, "x": np.int32, "value": np.float32} spike_replaced_values = spike_replaced_values.astype(spike_dict) despike_table = Table.from_pandas(spike_replaced_values) depike_table_hdu = fits.table_to_hdu(despike_table) - hdu_list = fits.HDUList([hdu, image_hdul[1].copy(), depike_table_hdu, image_hdul[3].copy()]) + hdu_list = fits.HDUList( + [ + hdu, + image_hdul[1].copy(), + depike_table_hdu, + image_hdul[3].copy(), + ] + ) index_slice = image_timestamp_begin.find(".") image_timestamp_begin = image_timestamp_begin[:index_slice] image_timestamp_begin = image_timestamp_begin.replace(":", ".") @@ -1197,11 +1896,20 @@ def create_level2_1_summed_image(self, image_list: list, output_dir: str): index_slice = image_timestamp_end.find(".") image_timestamp_end = image_timestamp_end[:index_slice] image_timestamp_end = image_timestamp_end.replace(":", ".") - image_output_file = output_dir + "magixs_L1.5_" + image_timestamp_begin + "_" + image_timestamp_end + "_summed_image.fits" + image_output_file = ( + output_dir + + "magixs_L1.5_" + + image_timestamp_begin + + "_" + + image_timestamp_end + + "_summed_image.fits" + ) hdu_list.writeto(image_output_file, overwrite=True) - def create_level2_3_running_summed_images(self, image_list: list, running_summed_length: np.int32, output_dir: str): - ''' + def create_level2_3_running_summed_images( + self, image_list: list, running_summed_length: np.int32, output_dir: str + ): + """ Creates Level 1.5 running summed images (i.e. 0 to n, 1 to n+1, etc.) for Level 2.3 inversions. Parameters @@ -1217,7 +1925,7 @@ def create_level2_3_running_summed_images(self, image_list: list, running_summed ------- None. - ''' + """ # Create output directory. os.makedirs(output_dir, exist_ok=True) image_list.sort() @@ -1227,74 +1935,143 @@ def create_level2_3_running_summed_images(self, image_list: list, running_summed for j in range(num_summed_images): light_summed_image[:, :] = 0.0 summed_image_exposure_time = 0.0 - spike_replaced_values = pd.DataFrame(columns=['y', 'x', 'value']) + spike_replaced_values = pd.DataFrame(columns=["y", "x", "value"]) level2_0_header = fits.Header() - level2_0_header['LEVEL'] = '1.5' + level2_0_header["LEVEL"] = "1.5" for i in range(running_summed_length): with fits.open(image_list[j + i]) as image_hdul: light_summed_image += image_hdul[0].data.astype(np.float64) if i == 0: - level2_0_header['DATE_OBS'] = (image_hdul[0].header['DATE_OBS'], 'Date Observation') - level2_0_header['T_OBS'] = (image_hdul[0].header['T_OBS'], 'Telescope Observation') - level2_0_header['TELESCOP'] = (image_hdul[0].header['TELESCOP'], 'Telescope') - level2_0_header['INSTRUME'] = (image_hdul[0].header['INSTRUME'], 'Instrument') - level2_0_header['DEBIASED'] = (image_hdul[0].header['DEBIASED'], 'Bias Removed') - level2_0_header['DEDARKED'] = (image_hdul[0].header['DEDARKED'], 'Dark Removed') - level2_0_header['GAINADJ'] = (image_hdul[0].header['GAINADJ'], 'Gain Adjusted') - level2_0_header['BDPIXRPL'] = (image_hdul[0].header['BDPIXRPL'], 'Bad Pixels Replaced') - level2_0_header['DESPIKED'] = (image_hdul[0].header['DESPIKED'], 'Despiked') - level2_0_header['ABSORADJ'] = (image_hdul[0].header['ABSORADJ'], 'Absorption Adjusted') - level2_0_header['UNITS'] = ('Electrons s-1', 'Units') - image_timestamp_begin = image_hdul[0].header['DATE_OBS'] + level2_0_header["DATE_OBS"] = ( + image_hdul[0].header["DATE_OBS"], + "Date Observation", + ) + level2_0_header["T_OBS"] = ( + image_hdul[0].header["T_OBS"], + "Telescope Observation", + ) + level2_0_header["TELESCOP"] = ( + image_hdul[0].header["TELESCOP"], + "Telescope", + ) + level2_0_header["INSTRUME"] = ( + image_hdul[0].header["INSTRUME"], + "Instrument", + ) + level2_0_header["DEBIASED"] = ( + image_hdul[0].header["DEBIASED"], + "Bias Removed", + ) + level2_0_header["DEDARKED"] = ( + image_hdul[0].header["DEDARKED"], + "Dark Removed", + ) + level2_0_header["GAINADJ"] = ( + image_hdul[0].header["GAINADJ"], + "Gain Adjusted", + ) + level2_0_header["BDPIXRPL"] = ( + image_hdul[0].header["BDPIXRPL"], + "Bad Pixels Replaced", + ) + level2_0_header["DESPIKED"] = ( + image_hdul[0].header["DESPIKED"], + "Despiked", + ) + level2_0_header["ABSORADJ"] = ( + image_hdul[0].header["ABSORADJ"], + "Absorption Adjusted", + ) + level2_0_header["UNITS"] = ("Electrons s-1", "Units") + image_timestamp_begin = image_hdul[0].header["DATE_OBS"] if i == (running_summed_length - 1): - image_timestamp_end = image_hdul[0].header['DATE_OBS'] - image_exposure_time = image_hdul[0].header['IMG_EXP'] + image_timestamp_end = image_hdul[0].header["DATE_OBS"] + image_exposure_time = image_hdul[0].header["IMG_EXP"] summed_image_exposure_time += image_exposure_time image_spike_replaced_values = pd.DataFrame(image_hdul[2].data) - spike_replaced_values = spike_replaced_values.append(image_spike_replaced_values, ignore_index=True) + spike_replaced_values = spike_replaced_values.append( + image_spike_replaced_values, ignore_index=True + ) if i == (running_summed_length - 1): # Normalize data. light_summed_image /= summed_image_exposure_time light_summed_image[np.where(light_summed_image < 0.0)] = 0.0 - level2_0_header['EXPTIME'] = summed_image_exposure_time + level2_0_header["EXPTIME"] = summed_image_exposure_time keywords_exist = True try: - lower_left_x = image_hdul[0].header['SFOVLLX'] - lower_left_y = image_hdul[0].header['SFOVLLY'] - upper_left_x = image_hdul[0].header['SFOVULX'] - upper_left_y = image_hdul[0].header['SFOVULY'] - lower_right_x = image_hdul[0].header['SFOVLRX'] - lower_right_y = image_hdul[0].header['SFOVLRY'] - upper_right_x = image_hdul[0].header['SFOVURX'] - upper_right_y = image_hdul[0].header['SFOVURY'] - roll = image_hdul[0].header['ROLL'] + lower_left_x = image_hdul[0].header["SFOVLLX"] + lower_left_y = image_hdul[0].header["SFOVLLY"] + upper_left_x = image_hdul[0].header["SFOVULX"] + upper_left_y = image_hdul[0].header["SFOVULY"] + lower_right_x = image_hdul[0].header["SFOVLRX"] + lower_right_y = image_hdul[0].header["SFOVLRY"] + upper_right_x = image_hdul[0].header["SFOVURX"] + upper_right_y = image_hdul[0].header["SFOVURY"] + roll = image_hdul[0].header["ROLL"] except KeyError: keywords_exist = False - if keywords_exist == True: - level2_0_header['SFOVLLX'] = (lower_left_x, 'SLOT FOV Lower Left X') - level2_0_header['SFOVLLY'] = (lower_left_y, 'SLOT FOV Lower Left Y') - level2_0_header['SFOVULX'] = (upper_left_x, 'SLOT FOV Upper Left X') - level2_0_header['SFOVULY'] = (upper_left_y, 'SLOT FOV Upper Left Y') - level2_0_header['SFOVLRX'] = (lower_right_x, 'SLOT FOV Lower Right X') - level2_0_header['SFOVLRY'] = (lower_right_y, 'SLOT FOV Lower Right Y') - level2_0_header['SFOVURX'] = (upper_right_x, 'SLOT FOV Upper Right X') - level2_0_header['SFOVURY'] = (upper_right_y, 'SLOT FOV Upper Right Y') - level2_0_header['ROLL'] = (roll, 'degrees') + if keywords_exist: + level2_0_header["SFOVLLX"] = ( + lower_left_x, + "SLOT FOV Lower Left X", + ) + level2_0_header["SFOVLLY"] = ( + lower_left_y, + "SLOT FOV Lower Left Y", + ) + level2_0_header["SFOVULX"] = ( + upper_left_x, + "SLOT FOV Upper Left X", + ) + level2_0_header["SFOVULY"] = ( + upper_left_y, + "SLOT FOV Upper Left Y", + ) + level2_0_header["SFOVLRX"] = ( + lower_right_x, + "SLOT FOV Lower Right X", + ) + level2_0_header["SFOVLRY"] = ( + lower_right_y, + "SLOT FOV Lower Right Y", + ) + level2_0_header["SFOVURX"] = ( + upper_right_x, + "SLOT FOV Upper Right X", + ) + level2_0_header["SFOVURY"] = ( + upper_right_y, + "SLOT FOV Upper Right Y", + ) + level2_0_header["ROLL"] = (roll, "degrees") keywords_exist = True try: - sun_radius_observed = image_hdul[0].header['RSUN_OBS'] - sun_radius = image_hdul[0].header['R_SUN'] + sun_radius_observed = image_hdul[0].header["RSUN_OBS"] + sun_radius = image_hdul[0].header["R_SUN"] except KeyError: keywords_exist = False - if keywords_exist == True: - level2_0_header['RSUN_OBS'] = (sun_radius_observed, 'arcsecs') - level2_0_header['R_SUN'] = (sun_radius, 'arcsecs') - hdu = fits.PrimaryHDU(data = light_summed_image.astype(np.float32), header=level2_0_header) - spike_dict = {'y':np.int32, 'x':np.int32, 'value':np.float32} + if keywords_exist: + level2_0_header["RSUN_OBS"] = ( + sun_radius_observed, + "arcsecs", + ) + level2_0_header["R_SUN"] = (sun_radius, "arcsecs") + hdu = fits.PrimaryHDU( + data=light_summed_image.astype(np.float32), + header=level2_0_header, + ) + spike_dict = {"y": np.int32, "x": np.int32, "value": np.float32} spike_replaced_values = spike_replaced_values.astype(spike_dict) despike_table = Table.from_pandas(spike_replaced_values) depike_table_hdu = fits.table_to_hdu(despike_table) - hdu_list = fits.HDUList([hdu, image_hdul[1].copy(), depike_table_hdu, image_hdul[3].copy()]) + hdu_list = fits.HDUList( + [ + hdu, + image_hdul[1].copy(), + depike_table_hdu, + image_hdul[3].copy(), + ] + ) index_slice = image_timestamp_begin.find(".") image_timestamp_begin = image_timestamp_begin[:index_slice] image_timestamp_begin = image_timestamp_begin.replace(":", ".") @@ -1304,11 +2081,31 @@ def create_level2_3_running_summed_images(self, image_list: list, running_summed index_slice = image_timestamp_end.find(".") image_timestamp_end = image_timestamp_end[:index_slice] image_timestamp_end = image_timestamp_end.replace(":", ".") - image_output_file = output_dir + "magixs_L1.5_" + image_timestamp_begin + "_" + image_timestamp_end + "_summed_image.fits" + image_output_file = ( + output_dir + + "magixs_L1.5_" + + image_timestamp_begin + + "_" + + image_timestamp_end + + "_summed_image.fits" + ) hdu_list.writeto(image_output_file, overwrite=True) - def perform_level2_0_elasticnet_inversion(self, image_list: list, rsp_func_cube_file: str, rsp_dep_name: str, rsp_dep_list: tp.Union[list, None], solution_fov_width: np.int32, smooth_over: str, field_angle_range: tp.Union[list, None], image_mask_file: tp.Union[str, None], level: str, detector_row_range: tp.Union[list, None], output_dir: str): - ''' + def perform_level2_0_elasticnet_inversion( + self, + image_list: list, + rsp_func_cube_file: str, + rsp_dep_name: str, + rsp_dep_list: tp.Union[list, None], + solution_fov_width: np.int32, + smooth_over: str, + field_angle_range: tp.Union[list, None], + image_mask_file: tp.Union[str, None], + level: str, + detector_row_range: tp.Union[list, None], + output_dir: str, + ): + """ Performs inversion of Level 2.x images using ElasticNet. Parameters @@ -1340,7 +2137,7 @@ def perform_level2_0_elasticnet_inversion(self, image_list: list, rsp_func_cube_ ------- None. - ''' + """ num_images = len(image_list) if num_images > 0: for index in range(len(image_list)): @@ -1348,10 +2145,14 @@ def perform_level2_0_elasticnet_inversion(self, image_list: list, rsp_func_cube_ # Read noisy image. print("Inverting:", image_list[index]) - inversion = Inversion(rsp_func_cube_file=rsp_func_cube_file, - rsp_dep_name=rsp_dep_name, rsp_dep_list=rsp_dep_list, - solution_fov_width=solution_fov_width, smooth_over=smooth_over, - field_angle_range=field_angle_range) + inversion = Inversion( + rsp_func_cube_file=rsp_func_cube_file, + rsp_dep_name=rsp_dep_name, + rsp_dep_list=rsp_dep_list, + solution_fov_width=solution_fov_width, + smooth_over=smooth_over, + field_angle_range=field_angle_range, + ) inversion.initialize_input_data(image_list[index], image_mask_file) @@ -1359,85 +2160,132 @@ def perform_level2_0_elasticnet_inversion(self, image_list: list, rsp_func_cube_ rho = 0.1 # alpha = 1e-3 # rho = 0.8 - model = enet(alpha=alpha, l1_ratio=rho, max_iter=50000, precompute=True, positive=True, fit_intercept=True, selection='cyclic') - #model = enet(alpha=alpha, l1_ratio=rho, max_iter=100000, precompute=True, positive=True, fit_intercept=True, selection='random') + model = enet( + alpha=alpha, + l1_ratio=rho, + max_iter=50000, + precompute=True, + positive=True, + fit_intercept=True, + selection="cyclic", + ) inv_model = enet_model(model) basename = os.path.splitext(os.path.basename(image_list[index]))[0] - basename_components = basename.split('_') - basename = basename.replace(basename_components[1], 'L' + level) - print(basename) - - start = time.time() - inversion.invert(inv_model, output_dir, output_file_prefix=basename, output_file_postfix="", level=level, detector_row_range=detector_row_range) - end = time.time() - print("Inversion Time =", end - start) - - def perform_level2_0_lassolars_inversion(self, image_list: list, rsp_func_cube_file: str, rsp_dep_name: str, rsp_dep_list: tp.Union[list, None], solution_fov_width: np.int32, smooth_over: str, field_angle_range: tp.Union[list, None], image_mask_file: tp.Union[str, None], level: str, detector_row_range: tp.Union[list, None], output_dir: str): - ''' - Performs inversion of Level 2.x images using Lasso Lars. - - Parameters - ---------- - image_list : list - List of Level 2.0 filenames. - rsp_func_cube_file: str - Filename of response function cube. - rsp_dep_name: str - Response dependence name (e.g. 'ion' or 'logt'). - rsp_dep_list: list, optional - List of dependence items. If None, use all dependence values. - solution_fov_width: np.int32 - Solution field-of-view width. 1 (all field angles), 2 (every other one), etc. The default is 1. - smooth_over: str, optional - Inversion smoothing (i.e. 'spatial' or 'dependence'). The default is 'spatial'. - field_angle_range: list, optional - Beginning and ending field angles to invert over. If None, use all field angles. - image_mask_file : str, optional - Mask to apply to image and response functions for inversion. - level: str - Level value for FITS keyword LEVEL. - detector_row_range: list, optional - Beginning and ending row numbers to invert. If None, invert all rows. The default is None. - output_dir : str - Directory to write Level 2.x EM data cubes and model predicted data. - - Returns - ------- - None. - - ''' - num_images = len(image_list) - if num_images > 0: - for index in range(len(image_list)): - print("Image ", index) - # Read noisy image. - print("Inverting:", image_list[index]) - - inversion = Inversion(rsp_func_cube_file=rsp_func_cube_file, - rsp_dep_name=rsp_dep_name, rsp_dep_list=rsp_dep_list, - solution_fov_width=solution_fov_width, smooth_over=smooth_over, - field_angle_range=field_angle_range) - - inversion.initialize_input_data(image_list[index], image_mask_file) - - #alpha = 1e-5 * 3 - alpha = 6e-3 - model = llars(alpha=alpha, max_iter=50000, normalize=False, precompute=True, positive=True, fit_intercept=True) - inv_model = llars_model(model) - - basename = os.path.splitext(os.path.basename(image_list[index]))[0] - basename_components = basename.split('_') - basename = basename.replace(basename_components[1], 'L' + level) + basename_components = basename.split("_") + basename = basename.replace(basename_components[1], "L" + level) print(basename) start = time.time() - inversion.invert(inv_model, output_dir, output_file_prefix=basename, output_file_postfix="", level=level, detector_row_range=detector_row_range) + inversion.invert( + inv_model, + output_dir, + output_file_prefix=basename, + output_file_postfix="", + level=level, + detector_row_range=detector_row_range, + ) end = time.time() print("Inversion Time =", end - start) - def create_level2_0_spectrally_pure_images(self, image_list: list, gnt_file: str, rsp_dep_list: list, output_dir: str): - ''' + # def perform_level2_0_lassolars_inversion( + # self, + # image_list: list, + # rsp_func_cube_file: str, + # rsp_dep_name: str, + # rsp_dep_list: tp.Union[list, None], + # solution_fov_width: np.int32, + # smooth_over: str, + # field_angle_range: tp.Union[list, None], + # image_mask_file: tp.Union[str, None], + # level: str, + # detector_row_range: tp.Union[list, None], + # output_dir: str, + # ): + # """ + # Performs inversion of Level 2.x images using Lasso Lars. + # + # Parameters + # ---------- + # image_list : list + # List of Level 2.0 filenames. + # rsp_func_cube_file: str + # Filename of response function cube. + # rsp_dep_name: str + # Response dependence name (e.g. 'ion' or 'logt'). + # rsp_dep_list: list, optional + # List of dependence items. If None, use all dependence values. + # solution_fov_width: np.int32 + # Solution field-of-view width. 1 (all field angles), 2 (every other one), etc. The default is 1. + # smooth_over: str, optional + # Inversion smoothing (i.e. 'spatial' or 'dependence'). The default is 'spatial'. + # field_angle_range: list, optional + # Beginning and ending field angles to invert over. If None, use all field angles. + # image_mask_file : str, optional + # Mask to apply to image and response functions for inversion. + # level: str + # Level value for FITS keyword LEVEL. + # detector_row_range: list, optional + # Beginning and ending row numbers to invert. If None, invert all rows. The default is None. + # output_dir : str + # Directory to write Level 2.x EM data cubes and model predicted data. + # + # Returns + # ------- + # None. + # + # """ + # num_images = len(image_list) + # if num_images > 0: + # for index in range(len(image_list)): + # print("Image ", index) + # # Read noisy image. + # print("Inverting:", image_list[index]) + # + # inversion = Inversion( + # rsp_func_cube_file=rsp_func_cube_file, + # rsp_dep_name=rsp_dep_name, + # rsp_dep_list=rsp_dep_list, + # solution_fov_width=solution_fov_width, + # smooth_over=smooth_over, + # field_angle_range=field_angle_range, + # ) + # + # inversion.initialize_input_data(image_list[index], image_mask_file) + # + # # alpha = 1e-5 * 3 + # alpha = 6e-3 + # model = llars( + # alpha=alpha, + # max_iter=50000, + # normalize=False, + # precompute=True, + # positive=True, + # fit_intercept=True, + # ) + # inv_model = llars_model(model) + # + # basename = os.path.splitext(os.path.basename(image_list[index]))[0] + # basename_components = basename.split("_") + # basename = basename.replace(basename_components[1], "L" + level) + # print(basename) + # + # start = time.time() + # inversion.invert( + # inv_model, + # output_dir, + # output_file_prefix=basename, + # output_file_postfix="", + # level=level, + # detector_row_range=detector_row_range, + # ) + # end = time.time() + # print("Inversion Time =", end - start) + + def create_level2_0_spectrally_pure_images( + self, image_list: list, gnt_file: str, rsp_dep_list: list, output_dir: str + ): + """ Creates Level 2.x spectrally pure image from EM data cubes. Parameters @@ -1455,7 +2303,7 @@ def create_level2_0_spectrally_pure_images(self, image_list: list, gnt_file: str ------- None. - ''' + """ # Create output directory. os.makedirs(output_dir, exist_ok=True) num_images = len(image_list) @@ -1463,12 +2311,12 @@ def create_level2_0_spectrally_pure_images(self, image_list: list, gnt_file: str with fits.open(gnt_file) as gnt_hdul: gnt_filename = os.path.basename(gnt_file) gnt_data_values = gnt_hdul[0].data.astype(np.float64) - #print(np.shape(gnt_data_values)) + # print(np.shape(gnt_data_values)) num_gnts, num_gnt_deps = np.shape(gnt_data_values) - gnt_dep_list = gnt_hdul[1].data['logt'] - #print(gnt_dep_list) + gnt_dep_list = gnt_hdul[1].data["logt"] + # print(gnt_dep_list) try: - ion_wavelength_table_format = gnt_hdul[0].header['IWTBLFMT'] + ion_wavelength_table_format = gnt_hdul[0].header["IWTBLFMT"] if ion_wavelength_table_format == "ion@wavelength": ion_wavelength_name = "ion_wavelength" else: @@ -1493,7 +2341,7 @@ def create_level2_0_spectrally_pure_images(self, image_list: list, gnt_file: str try: index = np.where(gnt_dep_list == np.around(dep, decimals=2)) print(index[0][0]) - except: + except: # noqa: E722 # TODO figure out what exception was expected pass if len(index[0] == 1): gnt_values[:, dep_cnt] = gnt_data_values[:, index].ravel() @@ -1505,107 +2353,159 @@ def create_level2_0_spectrally_pure_images(self, image_list: list, gnt_file: str with fits.open(image_list[index]) as em_hdul: em_data_cube = em_hdul[0].data.astype(np.float64) em_data_cube = np.transpose(em_data_cube, axes=(1, 2, 0)) - #em_dep_list = em_hdul[1].data['logt'] - #print(em_dep_list) + # em_dep_list = em_hdul[1].data['logt'] + # print(em_dep_list) if index == 0: image_height, num_slits, num_logts = np.shape(em_data_cube) - gnt_data_cube = np.zeros((image_height, num_slits, num_gnts), dtype=np.float64) + gnt_data_cube = np.zeros( + (image_height, num_slits, num_gnts), dtype=np.float64 + ) else: - gnt_data_cube = np.transpose(gnt_data_cube.astype(np.float32), axes=(1, 2, 0)) + gnt_data_cube = np.transpose( + gnt_data_cube.astype(np.float32), axes=(1, 2, 0) + ) gnt_data_cube[:, :, :] = 0.0 for gnt_num in range(num_gnts): - gnt_image = (em_data_cube[:,:,0:num_rsp_deps] * 10**26 * gnt_values[gnt_num,0:num_rsp_deps]).sum(axis=2) - #gnt_image = (em_data_cube[:,:,0:num_rsp_deps] * gnt_values[gnt_num,0:num_rsp_deps]).sum(axis=2) - gnt_data_cube[:,:,gnt_num] = gnt_image - basename = os.path.splitext(os.path.basename(image_list[index]))[0] + gnt_image = ( + em_data_cube[:, :, 0:num_rsp_deps] + * 10**26 + * gnt_values[gnt_num, 0:num_rsp_deps] + ).sum(axis=2) + gnt_data_cube[:, :, gnt_num] = gnt_image + basename = os.path.splitext( + os.path.basename(image_list[index]) + )[0] print(type(basename)) slice_index = basename.find("_em_data_cube") print(type(basename)) - postfix_val=basename.split("_x") - postfix_val=postfix_val[1] + postfix_val = basename.split("_x") + postfix_val = postfix_val[1] print(postfix_val) basename = basename[:slice_index] - basename+='_spectrally_pure_data_cube_x'+postfix_val+'.fits' + basename += ( + "_spectrally_pure_data_cube_x" + postfix_val + ".fits" + ) gnt_data_cube_file = output_dir + basename # Transpose data (wavelength, y, x). Readable by ImageJ. - gnt_data_cube = np.transpose(gnt_data_cube.astype(np.float32), axes=(2, 0, 1)) + gnt_data_cube = np.transpose( + gnt_data_cube.astype(np.float32), axes=(2, 0, 1) + ) em_hdul[0].data = gnt_data_cube - em_hdul[0].header['UNITS'] = 'Ph s-1 sr-1 cm-2' - em_hdul[0].header['GNT'] = (gnt_filename, 'GNT Filename') - em_hdul[0].header['DEPNAME'] = ('wavelength', 'Dependence Name') - em_hdul[0].header['IWTBLFMT'] = (ion_wavelength_table_format, 'Ion/Wavelength Table Format') + em_hdul[0].header["UNITS"] = "Ph s-1 sr-1 cm-2" + em_hdul[0].header["GNT"] = (gnt_filename, "GNT Filename") + em_hdul[0].header["DEPNAME"] = ("wavelength", "Dependence Name") + em_hdul[0].header["IWTBLFMT"] = ( + ion_wavelength_table_format, + "Ion/Wavelength Table Format", + ) # Add binary table. gnt_index_list = list(range(len(ion_wavelength_values))) - col1 = fits.Column(name='index', format='1I', array=gnt_index_list) - col2 = fits.Column(name=ion_wavelength_name, format='15A', array=ion_wavelength_values) + col1 = fits.Column( + name="index", format="1I", array=gnt_index_list + ) + col2 = fits.Column( + name=ion_wavelength_name, + format="15A", + array=ion_wavelength_values, + ) table_hdu = fits.BinTableHDU.from_columns([col1, col2]) em_hdul[1].data = table_hdu.data em_hdul[1].header = table_hdu.header em_hdul.writeto(gnt_data_cube_file, overwrite=True) - def update_level2_pointing(self, image_list: list, level1_wcs_table: str, solar_fov_coords: str): + def update_level2_pointing( + self, image_list: list, level1_wcs_table: str, solar_fov_coords: str + ): number_images = len(image_list) # Read Level 1 WCS table information. wcs_table = pd.read_excel(level1_wcs_table, index_col=None) rows, columns = wcs_table.shape - assert rows >= 1 and columns == 8 and wcs_table.columns[0] == 'wavelength' # Read solar FOV coordinates. fov_table = pd.read_excel(solar_fov_coords, index_col=None) rows, columns = fov_table.shape - assert rows == 1 and columns == 8 and fov_table.columns[0] == 'lower_left_x' for index in range(number_images): with fits.open(image_list[index]) as image_hdul: num_deps, image_height, image_width = np.shape(image_hdul[0].data) - #print(image_height, image_width, num_deps) + # print(image_height, image_width, num_deps) try: - fa_min = float(image_hdul[0].header['FA_MIN']) - fa_dlt = float(image_hdul[0].header['FA_DLT']) - fa_num = float(image_hdul[0].header['FA_NUM']) - fa_cdelt = float(image_hdul[0].header['FA_CDELT']) - drow_min = float(image_hdul[0].header['DROW_MIN']) - drow_max = float(image_hdul[0].header['DROW_MAX']) + fa_cdelt = float(image_hdul[0].header["FA_CDELT"]) + drow_min = float(image_hdul[0].header["DROW_MIN"]) + drow_max = float(image_hdul[0].header["DROW_MAX"]) roll = float(wcs_table.values[0, 7]) # Create Level 2 WCS table. level_2_x_wcs_table = wcs_table.iloc[0:1].copy() level_2_x_wcs_table.iloc[[0], [0]] = 0.0 level_2_x_wcs_table.iloc[[0], [1]] = fa_cdelt - #calc_x_pixel = divmod(image_width, 2) - #level_2_x_wcs_table.iloc[[0], [2]] = calc_x_pixel[0] + # calc_x_pixel = divmod(image_width, 2) + # level_2_x_wcs_table.iloc[[0], [2]] = calc_x_pixel[0] calc_x_pixel = (image_width - 1) / 2.0 level_2_x_wcs_table.iloc[[0], [2]] = calc_x_pixel - #level_2_x_wcs_table.iloc[[0], [3]] = (fa_min + (((fa_dlt) * (fa_num - 1)) / 2)) + level_2_x_wcs_table.values[0, 3] - #calc_y_pixel = divmod(((drow_max + 1) - drow_min), 2) - #level_1_y_pixel = level_2_x_wcs_table.values[0, 5] + # calc_y_pixel = divmod(((drow_max + 1) - drow_min), 2) + # level_1_y_pixel = level_2_x_wcs_table.values[0, 5] calc_y_pixel = (drow_max - drow_min) / 2.0 level_1_y_pixel = level_2_x_wcs_table.values[0, 5] level_2_x_wcs_table.iloc[[0], [5]] = drow_min + calc_y_pixel - level_2_x_wcs_table.iloc[[0], [3]] = level_2_x_wcs_table.values[0, 3] - math.cos(90 - roll) * (2.8 * (level_1_y_pixel - level_2_x_wcs_table.values[0, 5])) - #level_2_x_wcs_table.iloc[[0], [6]] = level_2_x_wcs_table.values[0, 6] + (2.8 * (level_2_x_wcs_table.values[0, 5] - level_1_y_pixel)) - level_2_x_wcs_table.iloc[[0], [6]] = level_2_x_wcs_table.values[0, 6] - math.sin(90 - roll) * (2.8 * (level_1_y_pixel - level_2_x_wcs_table.values[0, 5])) + level_2_x_wcs_table.iloc[[0], [3]] = level_2_x_wcs_table.values[ + 0, 3 + ] - math.cos(90 - roll) * ( + 2.8 * (level_1_y_pixel - level_2_x_wcs_table.values[0, 5]) + ) + level_2_x_wcs_table.iloc[[0], [6]] = level_2_x_wcs_table.values[ + 0, 6 + ] - math.sin(90 - roll) * ( + 2.8 * (level_1_y_pixel - level_2_x_wcs_table.values[0, 5]) + ) except KeyError: # Create empty Level 2 WCS table. level_2_x_wcs_table = wcs_table.iloc[0:0].copy() - #print(level_2_x_wcs_table) + # print(level_2_x_wcs_table) try: - data_product_level = image_hdul[0].header['LEVEL'] + data_product_level = image_hdul[0].header["LEVEL"] data_product_level.rstrip() except KeyError: - data_product_level = '' - assert data_product_level == '2.1' or data_product_level == '2.3' or data_product_level == '2.4' - image_hdul[0].header['SFOVLLX'] = (fov_table.values[0, 0], 'Slot FOV Lower Left X') - image_hdul[0].header['SFOVLLY'] = (fov_table.values[0, 1], 'Slot FOV Lower Left Y') - image_hdul[0].header['SFOVULX'] = (fov_table.values[0, 2], 'Slot FOV Upper Left X') - image_hdul[0].header['SFOVULY'] = (fov_table.values[0, 3], 'Slot FOV Upper Left Y') - image_hdul[0].header['SFOVLRX'] = (fov_table.values[0, 4], 'Slot FOV Lower Right X') - image_hdul[0].header['SFOVLRY'] = (fov_table.values[0, 5], 'Slot FOV Lower Right Y') - image_hdul[0].header['SFOVURX'] = (fov_table.values[0, 6], 'Slot FOV Upper Right X') - image_hdul[0].header['SFOVURY'] = (fov_table.values[0, 7], 'Slot FOV Upper Right Y') - image_hdul[0].header['ROLL'] = (wcs_table.values[0, 7], 'degrees') + data_product_level = "" + assert ( + data_product_level == "2.1" + or data_product_level == "2.3" + or data_product_level == "2.4" + ) + image_hdul[0].header["SFOVLLX"] = ( + fov_table.values[0, 0], + "Slot FOV Lower Left X", + ) + image_hdul[0].header["SFOVLLY"] = ( + fov_table.values[0, 1], + "Slot FOV Lower Left Y", + ) + image_hdul[0].header["SFOVULX"] = ( + fov_table.values[0, 2], + "Slot FOV Upper Left X", + ) + image_hdul[0].header["SFOVULY"] = ( + fov_table.values[0, 3], + "Slot FOV Upper Left Y", + ) + image_hdul[0].header["SFOVLRX"] = ( + fov_table.values[0, 4], + "Slot FOV Lower Right X", + ) + image_hdul[0].header["SFOVLRY"] = ( + fov_table.values[0, 5], + "Slot FOV Lower Right Y", + ) + image_hdul[0].header["SFOVURX"] = ( + fov_table.values[0, 6], + "Slot FOV Upper Right X", + ) + image_hdul[0].header["SFOVURY"] = ( + fov_table.values[0, 7], + "Slot FOV Upper Right Y", + ) + image_hdul[0].header["ROLL"] = (wcs_table.values[0, 7], "degrees") # Add/Update binary table. try: - level_2_x_wcs_table_hdu = image_hdul[2] pointing_table = Table.from_pandas(level_2_x_wcs_table) pointing_table_hdu = fits.table_to_hdu(pointing_table) image_hdul[2].data = pointing_table_hdu.data @@ -1617,8 +2517,15 @@ def update_level2_pointing(self, image_list: list, level1_wcs_table: str, solar_ image_hdul.append(pointing_table_hdu) image_hdul.writeto(image_list[index], overwrite=True) - def create_level3_0_color_color_plot(self, spectrally_pure_data_cube: str, wavelength_list: list, saturation: float, lambda_scale: float, output_plot_filename: str): - ''' + def create_level3_0_color_color_plot( + self, + spectrally_pure_data_cube: str, + wavelength_list: list, + saturation: float, + lambda_scale: float, + output_plot_filename: str, + ): + """ Creates Level 3.0 color-color plot. Parameters @@ -1638,7 +2545,7 @@ def create_level3_0_color_color_plot(self, spectrally_pure_data_cube: str, wavel ------- None. - ''' + """ top = 255.0 assert len(wavelength_list) == 3 or len(wavelength_list) == 4 @@ -1646,7 +2553,7 @@ def create_level3_0_color_color_plot(self, spectrally_pure_data_cube: str, wavel with fits.open(spectrally_pure_data_cube) as spdc_hdul: try: - dep_name = spdc_hdul[0].header['DEPNAME'] + dep_name = spdc_hdul[0].header["DEPNAME"] wavelengths = spdc_hdul[1].data[dep_name] print(wavelengths) except KeyError: @@ -1654,87 +2561,132 @@ def create_level3_0_color_color_plot(self, spectrally_pure_data_cube: str, wavel num_gnts, image_height, num_slits = np.shape(spdc_hdul[0].data) try: - solution_scale = spdc_hdul[0].header['SLTNFOV'] - #except Exception as e: - except: + solution_scale = spdc_hdul[0].header["SLTNFOV"] + # except Exception as e: + except: # noqa: E722 # TODO figure out what exception was expected solution_scale = 1.0 - dep_data = np.zeros((len(wavelength_list), image_height, num_slits), dtype=np.float32) + dep_data = np.zeros( + (len(wavelength_list), image_height, num_slits), dtype=np.float32 + ) for i in range(len(wavelength_list)): for j in range(len(wavelengths)): if wavelength_list[i] == wavelengths[j]: dep_data[i, :, :] = spdc_hdul[0].data[j, :, :] - #dep_data[i, :, :] = spdc_hdul[0].data[j, :, :] / 10**26 - print(wavelength_list[i], np.max(dep_data[i]), np.min(dep_data[i])) + # dep_data[i, :, :] = spdc_hdul[0].data[j, :, :] / 10**26 + print( + wavelength_list[i], np.max(dep_data[i]), np.min(dep_data[i]) + ) break - #dep_data[:,:, :] = dep_data[:,:, :] / 10**26 - #average_slits = np.average(dep_data, axis = 0) + # dep_data[:,:, :] = dep_data[:,:, :] / 10**26 + # average_slits = np.average(dep_data, axis = 0) for dep_index in range(len(wavelength_list)): - #dep_data[dep_index] = dep_data[dep_index] - average_slits + # dep_data[dep_index] = dep_data[dep_index] - average_slits print(np.max(dep_data[dep_index]), np.min(dep_data[dep_index])) - dep_data[dep_index] = np.maximum(np.minimum(dep_data[dep_index], saturation), 0.0) + dep_data[dep_index] = np.maximum( + np.minimum(dep_data[dep_index], saturation), 0.0 + ) max_value = np.amax(dep_data) - if (len(wavelength_list) == 4): + if len(wavelength_list) == 4: slit_data = dep_data[3] - slit_max_value = np.max(slit_data) - #slit_data = slit_data * (max_value / slit_max_value) - #max_value = np.max(slit_data) + # slit_data = slit_data * (max_value / slit_max_value) + # max_value = np.max(slit_data) min_value = np.min(slit_data) print("yellow", max_value, min_value) - y_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) + y_channel = np.maximum( + np.minimum( + ( + (top + 0.9999) + * (slit_data - min_value) + / (max_value - min_value) + ).astype(np.int16), + top, + ), + 0, + ) slit_data = dep_data[0] - slit_max_value = np.max(slit_data) - #slit_data = slit_data * (max_value / slit_max_value) - #max_value = np.max(slit_data) + # slit_data = slit_data * (max_value / slit_max_value) + # max_value = np.max(slit_data) min_value = np.min(slit_data) print("red", max_value, min_value) - r_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) - if (len(wavelength_list) == 4): + r_channel = np.maximum( + np.minimum( + ( + (top + 0.9999) + * (slit_data - min_value) + / (max_value - min_value) + ).astype(np.int16), + top, + ), + 0, + ) + if len(wavelength_list) == 4: r_channel += y_channel r = Image.fromarray(r_channel.astype(np.uint8), mode=None) slit_data = dep_data[1] - slit_max_value = np.max(slit_data) - #slit_data = slit_data * (max_value / slit_max_value) - #max_value = np.max(slit_data) + # slit_data = slit_data * (max_value / slit_max_value) + # max_value = np.max(slit_data) min_value = np.min(slit_data) print("green", max_value, min_value) - g_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) - if (len(wavelength_list) == 4): + g_channel = np.maximum( + np.minimum( + ( + (top + 0.9999) + * (slit_data - min_value) + / (max_value - min_value) + ).astype(np.int16), + top, + ), + 0, + ) + if len(wavelength_list) == 4: g_channel += y_channel g = Image.fromarray(g_channel.astype(np.uint8), mode=None) slit_data = dep_data[2] - slit_max_value = np.max(slit_data) - #slit_data = slit_data * (max_value / slit_max_value) - #max_value = np.max(slit_data) + # slit_data = slit_data * (max_value / slit_max_value) + # max_value = np.max(slit_data) min_value = np.min(slit_data) print("blue", max_value, min_value) - b_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) + b_channel = np.maximum( + np.minimum( + ( + (top + 0.9999) + * (slit_data - min_value) + / (max_value - min_value) + ).astype(np.int16), + top, + ), + 0, + ) b = Image.fromarray(b_channel.astype(np.uint8), mode=None) rgb_image = Image.merge("RGB", (r, g, b)) - scaled_image = rgb_image.resize((int(rgb_image.width * solution_scale * lambda_scale), int(rgb_image.height))) + scaled_image = rgb_image.resize( + ( + int(rgb_image.width * solution_scale * lambda_scale), + int(rgb_image.height), + ) + ) scaled_image.show() scaled_image.save(output_plot_filename) - def create_summed_noisy_images(self, image_list: list, num_noisy_images: np.int32, output_dir_path: str, output_file_post_fix: str = "_Summed_Image"): + def create_summed_noisy_images( + self, + image_list: list, + num_noisy_images: np.int32, + output_dir_path: str, + output_file_post_fix: str = "_Summed_Image", + ): summed_image = np.zeros((1024, 2048), dtype=np.float32) summed_image_exposure_time = 0.0 for i in range(len(image_list)): image_hdul = fits.open(image_list[i]) - image_exposure_time = image_hdul[0].header['IMG_EXP'] + image_exposure_time = image_hdul[0].header["IMG_EXP"] summed_image_exposure_time += image_exposure_time summed_image += image_hdul[0].data @@ -1743,21 +2695,37 @@ def create_summed_noisy_images(self, image_list: list, num_noisy_images: np.int3 summed_image[np.where(summed_image < 0.0)] = 0.0 - summed_image_filename = output_dir_path + "Original" + output_file_post_fix + ".fits" + summed_image_filename = ( + output_dir_path + "Original" + output_file_post_fix + ".fits" + ) level1_5_header = fits.Header() - level1_5_header['IMG_EXP'] = (summed_image_exposure_time, 'Image Exposure') - level1_5_header['MEAS_EXP'] = (summed_image_exposure_time, 'Measurement Exposure') - hdu = fits.PrimaryHDU(data = summed_image, header = level1_5_header) + level1_5_header["IMG_EXP"] = (summed_image_exposure_time, "Image Exposure") + level1_5_header["MEAS_EXP"] = ( + summed_image_exposure_time, + "Measurement Exposure", + ) + hdu = fits.PrimaryHDU(data=summed_image, header=level1_5_header) hdu.writeto(summed_image_filename, overwrite=True) - self.create_noisy_images(summed_image_filename, num_noisy_images, output_dir_path, output_file_post_fix) - - def create_noisy_images(self, image_file: str, num_noisy_images: np.int32, output_dir_path: str, output_file_post_fix: str): + self.create_noisy_images( + summed_image_filename, + num_noisy_images, + output_dir_path, + output_file_post_fix, + ) + + def create_noisy_images( + self, + image_file: str, + num_noisy_images: np.int32, + output_dir_path: str, + output_file_post_fix: str, + ): image_hdul = fits.open(image_file) image = image_hdul[0].data try: - image_exposure_time = image_hdul[0].header['IMG_EXP'] - except: + image_exposure_time = image_hdul[0].header["IMG_EXP"] + except: # noqa: E722 # TODO figure out what exception was expected image_exposure_time = 1.0 # Create output directory. @@ -1774,7 +2742,9 @@ def create_noisy_images(self, image_file: str, num_noisy_images: np.int32, outpu noisy_image = rng.poisson(image) noisy_image = noisy_image / image_exposure_time hdu = fits.PrimaryHDU(noisy_image.astype(np.float32)) - noisy_image_filename = output_dir_path + "Run{}".format(i + 1) + output_file_post_fix + ".fits" + noisy_image_filename = ( + output_dir_path + f"Run{i+1}" + output_file_post_fix + ".fits" + ) hdu.writeto(noisy_image_filename, overwrite=True) def create_image_sequence_number_list(self, numbers: list) -> str: @@ -1784,37 +2754,37 @@ def create_image_sequence_number_list(self, numbers: list) -> str: if not number_list: first_in_sequence = num last_number = num - number_list += "{}".format(last_number) + number_list += str(last_number) else: if num == (last_number + 1): last_number = num elif num > (last_number + 1): if last_number == (first_in_sequence + 1): - number_list += ", {}".format(last_number) + number_list += f", {last_number}" elif last_number > (first_in_sequence + 1): - number_list += "-{}".format(last_number) + number_list += f"-{last_number}" first_in_sequence = num last_number = num - number_list += ", {}".format(last_number) + number_list += f", {last_number}" else: first_in_sequence = num last_number = num - number_list += ", {}".format(last_number) + number_list += f", {last_number}" if number_list: if last_number == (first_in_sequence + 1): - number_list += ", {}".format(last_number) + number_list += f", {last_number}" elif last_number > (first_in_sequence + 1): - number_list += "-{}".format(last_number) + number_list += f"-{last_number}" return number_list - - def create_image_list(self, input_dir: str, image_sequence_number_list: list) -> list: + def create_image_list( + self, input_dir: str, image_sequence_number_list: list + ) -> list: image_list = [] os.chdir(input_dir) for file in glob.glob("*.fits"): with fits.open(file) as image_hdul: - img_seq_num = image_hdul[0].header['IMG_ISN'] + img_seq_num = image_hdul[0].header["IMG_ISN"] if img_seq_num in image_sequence_number_list: image_list.append(file) return image_list - diff --git a/overlappogram/create_color_color_plot.py b/overlappogram/create_color_color_plot.py index c66c104..745434b 100644 --- a/overlappogram/create_color_color_plot.py +++ b/overlappogram/create_color_color_plot.py @@ -3,9 +3,13 @@ from PIL import Image -def create_color_color_plot(dep_list: list, dep_file_fmt: str, - saturatdep: float, lambda_scale: float, - output_plot_filename: str): +def create_color_color_plot( + dep_list: list, + dep_file_fmt: str, + saturatdep: float, + lambda_scale: float, + output_plot_filename: str, +): top = 255.0 assert len(dep_list) == 3 or len(dep_list) == 4 @@ -14,65 +18,92 @@ def create_color_color_plot(dep_list: list, dep_file_fmt: str, data_file = dep_file_fmt.format(dep_list[dep_index]) image_hdul = fits.open(data_file) dep_data_cube = image_hdul[0].data - height, num_slits= np.shape(dep_data_cube) + height, num_slits = np.shape(dep_data_cube) if first_dep: dep_data = np.zeros((len(dep_list), height, num_slits), dtype=np.float32) try: - pixel_fov_width = image_hdul[0].header['PIXELFOV'] - solution_fov_width = image_hdul[0].header['SLTNFOV'] - slit_fov_width = image_hdul[0].header['SLITFOV'] + pixel_fov_width = image_hdul[0].header["PIXELFOV"] + solution_fov_width = image_hdul[0].header["SLTNFOV"] solution_scale = float(int(round(solution_fov_width / pixel_fov_width))) - except: - solution_solution = 1.0 + except: # noqa: E722 # TODO figure out what exception was expected + pass first_dep = False dep_data[dep_index, :, :] = dep_data_cube print(np.max(dep_data[dep_index]), np.min(dep_data[dep_index])) - average_slits = np.average(dep_data, axis = 0) + average_slits = np.average(dep_data, axis=0) for dep_index in range(len(dep_list)): dep_data[dep_index] = dep_data[dep_index] - average_slits print(np.max(dep_data[dep_index]), np.min(dep_data[dep_index])) - dep_data[dep_index] = np.maximum(np.minimum(dep_data[dep_index], saturatdep), 0.0) + dep_data[dep_index] = np.maximum( + np.minimum(dep_data[dep_index], saturatdep), 0.0 + ) - if (len(dep_list) == 4): + if len(dep_list) == 4: slit_data = dep_data[3] max_value = np.max(slit_data) min_value = np.min(slit_data) print("yellow", max_value, min_value) - y_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) + y_channel = np.maximum( + np.minimum( + ( + (top + 0.9999) * (slit_data - min_value) / (max_value - min_value) + ).astype(np.int16), + top, + ), + 0, + ) slit_data = dep_data[0] max_value = np.max(slit_data) min_value = np.min(slit_data) print("red", max_value, min_value) - r_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) - if (len(dep_list) == 4): + r_channel = np.maximum( + np.minimum( + ((top + 0.9999) * (slit_data - min_value) / (max_value - min_value)).astype( + np.int16 + ), + top, + ), + 0, + ) + if len(dep_list) == 4: r_channel += y_channel r = Image.fromarray(r_channel.astype(np.uint8), mode=None) slit_data = dep_data[1] max_value = np.max(slit_data) min_value = np.min(slit_data) print("green", max_value, min_value) - g_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) - if (len(dep_list) == 4): + g_channel = np.maximum( + np.minimum( + ((top + 0.9999) * (slit_data - min_value) / (max_value - min_value)).astype( + np.int16 + ), + top, + ), + 0, + ) + if len(dep_list) == 4: g_channel += y_channel g = Image.fromarray(g_channel.astype(np.uint8), mode=None) slit_data = dep_data[2] max_value = np.max(slit_data) min_value = np.min(slit_data) print("blue", max_value, min_value) - b_channel = np.maximum(np.minimum( - ((top+0.9999)*(slit_data-min_value)/(max_value-min_value)).astype(np.int16) - , top),0) + b_channel = np.maximum( + np.minimum( + ((top + 0.9999) * (slit_data - min_value) / (max_value - min_value)).astype( + np.int16 + ), + top, + ), + 0, + ) b = Image.fromarray(b_channel.astype(np.uint8), mode=None) rgb_image = Image.merge("RGB", (r, g, b)) - scaled_image = rgb_image.resize((int(rgb_image.width * solution_scale * lambda_scale), int(rgb_image.height))) + scaled_image = rgb_image.resize( + (int(rgb_image.width * solution_scale * lambda_scale), int(rgb_image.height)) + ) scaled_image.show() scaled_image.save(output_plot_filename) diff --git a/overlappogram/create_dependence_image.py b/overlappogram/create_dependence_image.py index 664b4d2..17378e5 100644 --- a/overlappogram/create_dependence_image.py +++ b/overlappogram/create_dependence_image.py @@ -5,8 +5,12 @@ from astropy.io import fits -def create_dependence_image(em_data_cube_data: tp.Union[str, list], rsp_dep_file_fmt: str, - image_file_fmt: str, rsp_dep_list: np.ndarray = None): +def create_dependence_image( + em_data_cube_data: tp.Union[str, list], + rsp_dep_file_fmt: str, + image_file_fmt: str, + rsp_dep_list: np.ndarray = None, +): """ Creates a dependence image for each dependence. If the response dependence list is None, a data cube is created for all dependences in binary table. @@ -28,7 +32,7 @@ def create_dependence_image(em_data_cube_data: tp.Union[str, list], rsp_dep_file None. """ - if type(em_data_cube_data) == str: + if isinstance(em_data_cube_data, str): image_hdul = fits.open(em_data_cube_data) em_data_cube = image_hdul[0].data em_data_cube_header = image_hdul[0].header @@ -41,31 +45,29 @@ def create_dependence_image(em_data_cube_data: tp.Union[str, list], rsp_dep_file # EM Data Cube image_hdul = fits.open(em_data_cube_data[index]) em_data_cube = image_hdul[0].data - #print(np.shape(image_hdul[0].data)) + # print(np.shape(image_hdul[0].data)) height, num_slits, width = np.shape(image_hdul[0].data) if first_run: em_data_cube_header = image_hdul[0].header - ref_height = height - ref_num_slits = num_slits - ref_width = width - run_em_data_cube = np.zeros((num_runs, height, num_slits, width), dtype=np.float32) + run_em_data_cube = np.zeros( + (num_runs, height, num_slits, width), dtype=np.float32 + ) first_run = False else: - assert height == ref_height and num_slits == ref_num_slits and width == ref_width + pass run_em_data_cube[index, :, :, :] = em_data_cube em_data_cube = np.mean(run_em_data_cube, axis=0) keywords_and_table_exists = True try: - pixel_fov_width = image_hdul[0].header['PIXELFOV'] - solution_fov_width = image_hdul[0].header['SLTNFOV'] - slit_fov_width = image_hdul[0].header['SLITFOV'] + pixel_fov_width = image_hdul[0].header["PIXELFOV"] + solution_fov_width = image_hdul[0].header["SLTNFOV"] slit_shift_width = int(round(solution_fov_width / pixel_fov_width)) - #print("solution fov = ", solution_fov_width, pixel_fov_width) + # print("solution fov = ", solution_fov_width, pixel_fov_width) - dep_name = image_hdul[0].header['DEPNAME'] + dep_name = image_hdul[0].header["DEPNAME"] print("dep name =", dep_name) - dep_indices = image_hdul[1].data['index'] + dep_indices = image_hdul[1].data["index"] dep_list = image_hdul[1].data[dep_name] if rsp_dep_list is not None: dep_mask = np.isin(dep_list, rsp_dep_list) @@ -78,10 +80,10 @@ def create_dependence_image(em_data_cube_data: tp.Union[str, list], rsp_dep_file if len(new_dep_list) > 0: dep_indices = new_dep_indices dep_list = new_dep_list - #except Exception as e: - except: + # except Exception as e: + except: # noqa: E722 # TODO figure out what exception was expected keywords_and_table_exists = False - #print(repr(e)) + # print(repr(e)) if keywords_and_table_exists: calc_half_slits = divmod(num_slits, 2) @@ -91,19 +93,27 @@ def create_dependence_image(em_data_cube_data: tp.Union[str, list], rsp_dep_file dep_rsp_data = pd.read_csv(dep_rsp_file, delim_whitespace=True) dep_rsp = dep_rsp_data.iloc[:, 2].values if index == 0: - dep_data_cube = np.zeros((num_rows, num_slits,len(dep_rsp)), dtype=np.float32) + dep_data_cube = np.zeros( + (num_rows, num_slits, len(dep_rsp)), dtype=np.float32 + ) for slit_index in range(num_slits): slit_shift = (slit_index - num_half_slits) * slit_shift_width if slit_shift < 0: - slit_rsp = np.pad(dep_rsp, (0, -slit_shift), mode='constant')[-slit_shift:] + slit_rsp = np.pad(dep_rsp, (0, -slit_shift), mode="constant")[ + -slit_shift: + ] elif slit_shift > 0: - slit_rsp = np.pad(dep_rsp, (slit_shift, 0), mode='constant')[:-slit_shift] + slit_rsp = np.pad(dep_rsp, (slit_shift, 0), mode="constant")[ + :-slit_shift + ] else: slit_rsp = dep_rsp for row in range(num_rows): - dep_data_cube[row, slit_index, :] = slit_rsp * em_data_cube[row, slit_index, dep_indices[index]] + dep_data_cube[row, slit_index, :] = ( + slit_rsp * em_data_cube[row, slit_index, dep_indices[index]] + ) dep_data_cube_file = image_file_fmt.format(dep_list[index]) + ".fits" - #print(dep_data_cube_file) - fits_hdu = fits.PrimaryHDU(data = dep_data_cube, header = em_data_cube_header) + # print(dep_data_cube_file) + fits_hdu = fits.PrimaryHDU(data=dep_data_cube, header=em_data_cube_header) fits_hdu.writeto(dep_data_cube_file, overwrite=True) diff --git a/overlappogram/create_gnt_image.py b/overlappogram/create_gnt_image.py index 1babdb7..9fb2e3f 100644 --- a/overlappogram/create_gnt_image.py +++ b/overlappogram/create_gnt_image.py @@ -5,8 +5,12 @@ from astropy.io import fits -def create_gnt_image(em_data_cube_data: tp.Union[str, list], gnt_ions: np.ndarray, - gnt_file_fmt: str, image_file_fmt: str): +def create_gnt_image( + em_data_cube_data: tp.Union[str, list], + gnt_ions: np.ndarray, + gnt_file_fmt: str, + image_file_fmt: str, +): """ Creates a gnt image for each dependence. @@ -27,13 +31,13 @@ def create_gnt_image(em_data_cube_data: tp.Union[str, list], gnt_ions: np.ndarra None. """ - if type(em_data_cube_data) == str: + if isinstance(em_data_cube_data, str): image_hdul = fits.open(em_data_cube_data) em_data_cube = image_hdul[0].data em_data_cube = np.transpose(em_data_cube, axes=(1, 2, 0)) em_data_cube_header = image_hdul[0].header print(np.shape(em_data_cube)) - #num_rows, num_slits, num_deps = np.shape(em_data_cube) + # num_rows, num_slits, num_deps = np.shape(em_data_cube) height, num_slits, width = np.shape(em_data_cube) else: num_runs = len(em_data_cube_data) @@ -44,30 +48,28 @@ def create_gnt_image(em_data_cube_data: tp.Union[str, list], gnt_ions: np.ndarra em_data_cube = image_hdul[0].data em_data_cube_header = image_hdul[0].header em_data_cube = np.transpose(em_data_cube, axes=(1, 2, 0)) - #print(np.shape(image_hdul[0].data)) - #height, num_slits, width = np.shape(image_hdul[0].data) + # print(np.shape(image_hdul[0].data)) + # height, num_slits, width = np.shape(image_hdul[0].data) height, num_slits, width = np.shape(em_data_cube) if first_run: - ref_height = height - ref_num_slits = num_slits - ref_width = width - run_em_data_cube = np.zeros((num_runs, height, num_slits, width), dtype=np.float32) + run_em_data_cube = np.zeros( + (num_runs, height, num_slits, width), dtype=np.float32 + ) first_run = False else: - assert height == ref_height and num_slits == ref_num_slits and width == ref_width + pass run_em_data_cube[index, :, :, :] = em_data_cube em_data_cube = np.mean(run_em_data_cube, axis=0) binary_table_exists = True try: - dep_name = image_hdul[0].header['DEPNAME'] - #print("dep name =", dep_name) - dep_indices = image_hdul[1].data['index'] + dep_name = image_hdul[0].header["DEPNAME"] + # print("dep name =", dep_name) dep_list = image_hdul[1].data[dep_name] - #except Exception as e: - except: + # except Exception as e: + except: # noqa: E722 # TODO figure out what exception was expected binary_table_exists = False - #print(repr(e)) + # print(repr(e)) if binary_table_exists: for index in range(len(gnt_ions)): @@ -75,7 +77,6 @@ def create_gnt_image(em_data_cube_data: tp.Union[str, list], gnt_ions: np.ndarra gnt_data = pd.read_csv(gnt_file, delim_whitespace=True, header=None) gnt_logts = gnt_data.iloc[:, 0].values.astype(str) gnt_values = gnt_data.iloc[:, 1].values - gnt_values_list = list(gnt_values) # Check list of logt used in inverversion versus logt in gnt file. gnt_dep_values = np.zeros(len(dep_list), dtype=np.float32) # Check for matching values. @@ -84,11 +85,11 @@ def create_gnt_image(em_data_cube_data: tp.Union[str, list], gnt_ions: np.ndarra else: gnt_logts_list = list(gnt_logts) for dep_index, dep in enumerate(dep_list): - gnt_index = gnt_logts_list.index(f'{dep:.2}') + gnt_index = gnt_logts_list.index(f"{dep:.2}") gnt_dep_values[dep_index] = gnt_values[gnt_index] - gnt_image = (em_data_cube[:,:,0:width] * gnt_dep_values).sum(axis=2) + gnt_image = (em_data_cube[:, :, 0:width] * gnt_dep_values).sum(axis=2) gnt_image_file = image_file_fmt.format(gnt_ions[index]) + ".fits" - fits_hdu = fits.PrimaryHDU(data = gnt_image, header = em_data_cube_header) + fits_hdu = fits.PrimaryHDU(data=gnt_image, header=em_data_cube_header) fits_hdu.writeto(gnt_image_file, overwrite=True) diff --git a/overlappogram/create_poisson_noisy_image.py b/overlappogram/create_poisson_noisy_image.py index b7df8c4..9808098 100644 --- a/overlappogram/create_poisson_noisy_image.py +++ b/overlappogram/create_poisson_noisy_image.py @@ -2,8 +2,9 @@ from astropy.io import fits -def create_poisson_noisy_image(image: np.ndarray, output_image_file: str, - exposure_time: float): +def create_poisson_noisy_image( + image: np.ndarray, output_image_file: str, exposure_time: float +): """ @@ -25,5 +26,5 @@ def create_poisson_noisy_image(image: np.ndarray, output_image_file: str, rng = np.random.default_rng() noisy_image = rng.poisson(image * exposure_time) / exposure_time hdu = fits.PrimaryHDU(noisy_image) - hdu.header.append(('EXPTIME', noisy_image, 'Exposure Time (seconds)'), end=True) + hdu.header.append(("EXPTIME", noisy_image, "Exposure Time (seconds)"), end=True) hdu.writeto(output_image_file, overwrite=True) diff --git a/overlappogram/create_unique_directory.py b/overlappogram/create_unique_directory.py index 06ec740..5606879 100644 --- a/overlappogram/create_unique_directory.py +++ b/overlappogram/create_unique_directory.py @@ -2,9 +2,9 @@ import os -def create_unique_directory(top_level_directory: str = './', - unique_prefix: str = '', - unique_postfix: str = '') -> str: +def create_unique_directory( + top_level_directory: str = "./", unique_prefix: str = "", unique_postfix: str = "" +) -> str: """ @@ -24,19 +24,19 @@ def create_unique_directory(top_level_directory: str = './', """ unique_dir = unique_prefix - if len(unique_prefix) > 0 and unique_prefix[-1] != '_': - unique_dir += '_' + if len(unique_prefix) > 0 and unique_prefix[-1] != "_": + unique_dir += "_" unique_dir += str(datetime.datetime.utcnow().strftime("%Y%m%d_%H-%M-%S")) - if len(unique_postfix) > 0 and unique_postfix[0] != '_': - unique_dir += '_' + if len(unique_postfix) > 0 and unique_postfix[0] != "_": + unique_dir += "_" unique_dir += unique_postfix output_dir_path = top_level_directory - if len(top_level_directory) > 0 and top_level_directory[-1] != '/': - output_dir_path += '/' - output_dir_path = top_level_directory + unique_dir + '/' + if len(top_level_directory) > 0 and top_level_directory[-1] != "/": + output_dir_path += "/" + output_dir_path = top_level_directory + unique_dir + "/" try: # Create output directory. os.makedirs(output_dir_path, exist_ok=True) - except: - output_dir_path = '' + except: # noqa: E722 # TODO figure out what exception was expected + output_dir_path = "" return output_dir_path diff --git a/overlappogram/elasticnet_model.py b/overlappogram/elasticnet_model.py index 2f8c969..8195422 100644 --- a/overlappogram/elasticnet_model.py +++ b/overlappogram/elasticnet_model.py @@ -9,15 +9,17 @@ class ElasticNetModel(AbstractModel): model: enet = enet() - def invert(self, response_function, data, sample_weights = None): - #print(sample_weights) - self.model.fit(response_function, data, sample_weight=sample_weights, check_input=True) - #self.model.fit(response_function, data) - #score=(self.model.score(response_function, data, sample_weight=sample_weights)) + def invert(self, response_function, data, sample_weights=None): + # print(sample_weights) + self.model.fit( + response_function, data, sample_weight=sample_weights, check_input=True + ) + # self.model.fit(response_function, data) + # score=(self.model.score(response_function, data, sample_weight=sample_weights)) data_out = self.model.predict(response_function) em = self.model.coef_ return em, data_out - #return em, data_out, score + # return em, data_out, score def add_fits_keywords(self, header): # params = self.model.get_params() diff --git a/overlappogram/element.py b/overlappogram/element.py index 66b7bfa..339508f 100644 --- a/overlappogram/element.py +++ b/overlappogram/element.py @@ -4,37 +4,33 @@ class Element(ABC): @classmethod def __init_subclass__(cls): - required_class_variables = [ - "temperature", - "mass", - "rest_wavelength" - ] + required_class_variables = ["temperature", "mass", "rest_wavelength"] for var in required_class_variables: if not hasattr(cls, var): raise NotImplementedError( - f'Class {cls} lacks required `{var}` class attribute' + f"Class {cls} lacks required `{var}` class attribute" ) class OVElement(Element): # Oxygen V Element - temperature = 10**(5.35) # K – Temp of O V - mass = 2.66e-26 # kg – Mass of O V - rest_wavelength = 629.7 # Angstroms - Rest Wavelength of O V + temperature = 10 ** (5.35) # K – Temp of O V + mass = 2.66e-26 # kg – Mass of O V + rest_wavelength = 629.7 # Angstroms - Rest Wavelength of O V class HeIElement(Element): # Helium I Element - temperature = 10**(4.0) # K – Temp of He I - mass = 6.6464731e-27 # kg – Mass of He I - rest_wavelength = 584.3 # Angstroms - Rest Wavelength of He I + temperature = 10 ** (4.0) # K – Temp of He I + mass = 6.6464731e-27 # kg – Mass of He I + rest_wavelength = 584.3 # Angstroms - Rest Wavelength of He I class MgX609Element(Element): # Magnesium X Element - temperature = 10**(6.05) # K – Temp of Mg X - mass = 4.0359398e-26 # kg – Mass of Mg X - rest_wavelength = 609.8 # Angstroms - Rest Wavelength of Mg X + temperature = 10 ** (6.05) # K – Temp of Mg X + mass = 4.0359398e-26 # kg – Mass of Mg X + rest_wavelength = 609.8 # Angstroms - Rest Wavelength of Mg X class MgX624Element(Element): # Magnesium X Element - temperature = 10**(6.05) # K – Temp of Mg X - mass = 4.0359398e-26 # kg – Mass of Mg X - rest_wavelength = 624.9 # Angstroms - Rest Wavelength of Mg X + temperature = 10 ** (6.05) # K – Temp of Mg X + mass = 4.0359398e-26 # kg – Mass of Mg X + rest_wavelength = 624.9 # Angstroms - Rest Wavelength of Mg X diff --git a/overlappogram/em_data_cube.py b/overlappogram/em_data_cube.py index f8547e3..f609b63 100644 --- a/overlappogram/em_data_cube.py +++ b/overlappogram/em_data_cube.py @@ -1,7 +1,6 @@ from __future__ import annotations import math -import typing as tp from dataclasses import dataclass from time import time @@ -15,8 +14,10 @@ from sklearn.linear_model import ElasticNet as enet -def create_background(num_x: int, num_y: int, background_value: float, add_noise: bool = False) -> np.ndarray: - ''' +def create_background( + num_x: int, num_y: int, background_value: float, add_noise: bool = False +) -> np.ndarray: + """ Create a background image for simulated data. Parameters @@ -35,24 +36,25 @@ def create_background(num_x: int, num_y: int, background_value: float, add_noise background : ndarray Created background. - ''' + """ background = np.zeros((num_y, num_x)) background[:, :] = background_value - if add_noise == True: + if add_noise: background = apply_poisson_noise(background, random_state=1) return background + @dataclass(order=True) class EMDataCube: cube: NDCube - background: tp.Optional[tp.Union[np.ndarray]] = None + background: np.ndarray | None = None def __post_init__(self): # Verify wcs and data shape match assert self.cube.wcs.pixel_shape == np.shape(self.cube.data)[::-1] assert self.cube.wcs.naxis == 3 self.inversion_image_list = [] - zero_velocities = np.where(self.cube.axis_world_coords(2) == 0.0 * u.km/u.s) + zero_velocities = np.where(self.cube.axis_world_coords(2) == 0.0 * u.km / u.s) # Verify only one zero velocity assert len(zero_velocities) == 1 self.zero_velocity = zero_velocities[0][0] @@ -61,11 +63,11 @@ def __post_init__(self): y, x, vel = np.shape(self.cube.data) assert np.ndim(self.background) == 2 # Verify background shape matches spatial shape - assert np.shape(self.background) == (y ,x) + assert np.shape(self.background) == (y, x) self.cube.data[:, :, self.zero_velocity] = self.background def add_explosive_event(self, filename: str, locations: list): - ''' + """ Add explossive event to emission cube. Parameters @@ -79,7 +81,7 @@ def add_explosive_event(self, filename: str, locations: list): ------- None. - ''' + """ # Read explosive event file ee_data = np.loadtxt(filename) # Velocity units are km/s @@ -89,23 +91,25 @@ def add_explosive_event(self, filename: str, locations: list): # Create explosive event velocity vector vel_vector = np.zeros(num_vel) velocities = self.cube.axis_world_coords(2) - #print(velocities) + # print(velocities) for vel_num in range(len(vel)): - em_vel_index = np.where(velocities == vel[vel_num] * u.km/u.s) + em_vel_index = np.where(velocities == vel[vel_num] * u.km / u.s) if len(em_vel_index) == 1: - #print("vel index =", em_vel_index, vel[vel_num] * u.km/u.s) + # print("vel index =", em_vel_index, vel[vel_num] * u.km/u.s) vel_vector[em_vel_index[0][0]] = em[vel_num] for x, y in locations: - pixel_coords = self.cube.world_to_pixel(y, x, 0 * u.km/u.s) + pixel_coords = self.cube.world_to_pixel(y, x, 0 * u.km / u.s) pixel_x = int(np.rint(pixel_coords[1].value)) pixel_y = int(np.rint(pixel_coords[0].value)) # Verify coordinates within cube - if (pixel_x >= 0 and pixel_x < num_x) and (pixel_y >= 0 and pixel_y < num_y): + if (pixel_x >= 0 and pixel_x < num_x) and ( + pixel_y >= 0 and pixel_y < num_y + ): self.cube.data[pixel_y, pixel_x, :] = 0.0 self.cube.data[pixel_y, pixel_x, :] = vel_vector def create_simulated_data(self, image_list: list): - ''' + """ Creates simulated camera/image data. Parameters @@ -117,14 +121,18 @@ def create_simulated_data(self, image_list: list): ------- None. - ''' + """ num_y, num_x, num_vel = self.cube.wcs.array_shape y_pixels, x_pixels, vel_pixels = np.where(self.cube.data[:, :, :] != 0.0) for i in range(len(x_pixels)): x, y, z = x_pixels[i], y_pixels[i], vel_pixels[i] - world_y, world_x, world_vel = self.cube.pixel_to_world(y * u.pix, x * u.pix, z * u.pix) + world_y, world_x, world_vel = self.cube.pixel_to_world( + y * u.pix, x * u.pix, z * u.pix + ) for image in image_list: - image.add_simulated_data(world_x, world_y, world_vel, self.cube.data[y, x, z]) + image.add_simulated_data( + world_x, world_y, world_vel, self.cube.data[y, x, z] + ) def prep_inversion(self, image_list): # Initialize cube data @@ -134,12 +142,12 @@ def prep_inversion(self, image_list): num_y, num_x, num_vel = self.cube.wcs.array_shape print("prep inversion", num_x, num_y, num_vel) self.num_em_values = num_x * num_y * num_vel - self.x1 = np.zeros(self.num_em_values) + self.x1 = np.zeros(self.num_em_values) self.y1 = np.zeros(self.num_em_values) - self.vel1 = np.zeros(self.num_em_values) + self.vel1 = np.zeros(self.num_em_values) world_y, world_x, world_vel = self.cube.axis_world_coords(edges=True) - #print("world x", world_x) - #print("world y", world_y) + # print("world x", world_x) + # print("world y", world_y) min_world_x = world_x[0][0] max_world_x = world_x[num_y][num_x] min_world_y = world_y[0][0] @@ -149,7 +157,9 @@ def prep_inversion(self, image_list): # Create images to invert for image in image_list: - self.inversion_image_list.append(image.crop_roi([min_world_y, min_world_x], [max_world_y, max_world_x])) + self.inversion_image_list.append( + image.crop_roi([min_world_y, min_world_x], [max_world_y, max_world_x]) + ) # diff_image2 = self.inversion_image_list[1].cube.data[:, :] - self.inversion_image_list[2].cube.data[:, :] # plt.figure() @@ -161,24 +171,30 @@ def prep_inversion(self, image_list): for inversion_image in self.inversion_image_list: image_data = inversion_image.data() y_pixels, x_pixels = np.shape(image_data) - #print("x pixels =", x_pixels, "y pixels =", y_pixels) - self.inversion_data_len += (y_pixels * x_pixels) - self.inversion_data = np.append(self.inversion_data, np.reshape(image_data, (y_pixels * x_pixels))) + # print("x pixels =", x_pixels, "y pixels =", y_pixels) + self.inversion_data_len += y_pixels * x_pixels + self.inversion_data = np.append( + self.inversion_data, np.reshape(image_data, (y_pixels * x_pixels)) + ) # Create response function - self.resp1 = np.zeros((self.num_em_values, self.inversion_data_len)) + self.resp1 = np.zeros((self.num_em_values, self.inversion_data_len)) c = 0 for j in range(0, num_y): for i in range(0, num_x): for k in range(0, num_vel): - y_out, x_out, vel_out = self.cube.pixel_to_world(j * u.pix, i * u.pix, k * u.pix) + y_out, x_out, vel_out = self.cube.pixel_to_world( + j * u.pix, i * u.pix, k * u.pix + ) kernel = np.array([]) - #print("i =", i, "j =", j, "k =", k) + # print("i =", i, "j =", j, "k =", k) for inversion_image in self.inversion_image_list: - image_kernel = inversion_image.create_kernel(x_out, y_out, vel_out) + image_kernel = inversion_image.create_kernel( + x_out, y_out, vel_out + ) kernel = np.append(kernel, image_kernel) - self.resp1[c,:] = kernel + self.resp1[c, :] = kernel self.x1[c] = x_out.to(u.arcsec).value # arcsec self.y1[c] = y_out.to(u.arcsec).value # arcsec @@ -190,12 +206,20 @@ def prep_inversion(self, image_list): def invert_data(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0): # Adjust the resp1 to reflect the weight - weight = abs(self.vel1)*slope + bias + weight = abs(self.vel1) * slope + bias for i in range(0, self.num_em_values): self.resp1[:, i] = self.resp0[:, i] * weight[i] -# self.resp1[:, i] = self.resp0[:, i] / weight[i] - - enet_model = enet(alpha=alpha, l1_ratio = rho, precompute=True, normalize=True, positive=True, fit_intercept=True, selection='random') + # self.resp1[:, i] = self.resp0[:, i] / weight[i] + + enet_model = enet( + alpha=alpha, + l1_ratio=rho, + precompute=True, + normalize=True, + positive=True, + fit_intercept=True, + selection="random", + ) enet_model.fit(self.resp1, self.inversion_data) data_out = enet_model.predict(self.resp1) em = enet_model.coef_ @@ -213,12 +237,19 @@ def invert_data(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0): self.cube.data[j, i, k] = em[c] c = c + 1 - #wcs_anim = ArrayAnimatorWCS(self.cube.data,self.cube.wcs,slices = (0, 'x','y')) - #plt.show() + # wcs_anim = ArrayAnimatorWCS(self.cube.data,self.cube.wcs,slices = (0, 'x','y')) + # plt.show() plt.figure() - plt.scatter(self.vel1, em, c='r', marker='.') - plot_title='alpha='+str('%f' % alpha)+' l1ratio='+str('%f' % rho)+' slope='+str('%f' % slope) + plt.scatter(self.vel1, em, c="r", marker=".") + plot_title = ( + "alpha=" + + str("%f" % alpha) + + " l1ratio=" + + str("%f" % rho) + + " slope=" + + str("%f" % slope) + ) plt.grid(b=True) plt.title(plot_title) @@ -227,14 +258,26 @@ def invert_data(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0): for inversion_image in self.inversion_image_list: image_data = inversion_image.data() y_pixels, x_pixels = np.shape(image_data) - inverted_data = data_out[image_offset : image_offset + (y_pixels * x_pixels)] - print("pearson correlation =", scipy.stats.pearsonr(np.reshape(image_data, (y_pixels * x_pixels)), inverted_data)) - print("linregress =", scipy.stats.linregress(np.reshape(image_data, (y_pixels * x_pixels)), inverted_data)) + inverted_data = data_out[ + image_offset : image_offset + (y_pixels * x_pixels) + ] + print( + "pearson correlation =", + scipy.stats.pearsonr( + np.reshape(image_data, (y_pixels * x_pixels)), inverted_data + ), + ) + print( + "linregress =", + scipy.stats.linregress( + np.reshape(image_data, (y_pixels * x_pixels)), inverted_data + ), + ) inverted_data = np.reshape(inverted_data, (y_pixels, x_pixels)) plt.figure() plt.imshow(inverted_data) plt.gca().invert_yaxis() - image_offset += (y_pixels * x_pixels) + image_offset += y_pixels * x_pixels def prep_inversion1(self, image_list): # Initialize cube data @@ -243,8 +286,8 @@ def prep_inversion1(self, image_list): # Calculate crop ROI in world coordinates num_y, num_x, num_vel = self.cube.wcs.array_shape world_y, world_x, world_vel = self.cube.axis_world_coords(edges=True) - #print("world x", world_x) - #print("world y", world_y) + # print("world x", world_x) + # print("world y", world_y) min_world_x = world_x[0][0] max_world_x = world_x[num_y][num_x] min_world_y = world_y[0][0] @@ -254,7 +297,9 @@ def prep_inversion1(self, image_list): # Create images to invert for image in image_list: - self.inversion_image_list.append(image.crop_roi([min_world_y, min_world_x], [max_world_y, max_world_x])) + self.inversion_image_list.append( + image.crop_roi([min_world_y, min_world_x], [max_world_y, max_world_x]) + ) # Calculate inversion data length self.inversion_data_len = 0 @@ -262,11 +307,20 @@ def prep_inversion1(self, image_list): for inversion_image in self.inversion_image_list: image_data = inversion_image.data() y_pixels, x_pixels = np.shape(image_data) - #print("x pixels =", x_pixels, "y pixels =", y_pixels) - self.inversion_data_len += (y_pixels * x_pixels) - self.inversion_data = np.append(self.inversion_data, np.reshape(image_data, (y_pixels * x_pixels))) - - def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_image_data_list: tp.Optional[tp.Union[list[np.ndarray]]] = None): + # print("x pixels =", x_pixels, "y pixels =", y_pixels) + self.inversion_data_len += y_pixels * x_pixels + self.inversion_data = np.append( + self.inversion_data, np.reshape(image_data, (y_pixels * x_pixels)) + ) + + def invert_data1( + self, + alpha=0.0025, + rho=0.975, + slope=0.0, + bias=1.0, + inversion_image_data_list: list[np.ndarray] | None= None, + ): num_y, num_x, num_vel = self.cube.wcs.array_shape ### First pass - zero velocity only @@ -279,7 +333,7 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i start_time = time() # Create response function - resp1 = np.zeros((num_em_values, self.inversion_data_len)) + resp1 = np.zeros((num_em_values, self.inversion_data_len)) csc_row_vec = np.array([]) csc_col_vec = np.array([]) @@ -288,19 +342,20 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i c = 0 for j in range(0, num_y): for i in range(0, num_x): - #start_time1 = time() - y_out, x_out, vel_out = self.cube.pixel_to_world(j * u.pix, i * u.pix, self.zero_velocity * u.pix) -# y_out, x_out, vel_out = self.cube.pixel_to_world(j * u.pix, i * u.pix, (self.zero_velocity + 1) * u.pix) - #end_time1 = time() - #print("create kernel time =", end_time1 - start_time1) + # start_time1 = time() + y_out, x_out, vel_out = self.cube.pixel_to_world( + j * u.pix, i * u.pix, self.zero_velocity * u.pix + ) + # end_time1 = time() + # print("create kernel time =", end_time1 - start_time1) kernel = np.array([]) for inversion_image in self.inversion_image_list: - #start_time1 = time() + # start_time1 = time() image_kernel = inversion_image.create_kernel(x_out, y_out, vel_out) - #end_time1 = time() - #print("create kernel time =", end_time1 - start_time1) + # end_time1 = time() + # print("create kernel time =", end_time1 - start_time1) kernel = np.append(kernel, image_kernel) - resp1[c,:] = kernel + resp1[c, :] = kernel row_vec = np.where(kernel != 0.0) col_vec = np.full(np.size(row_vec), c) rf_vec = kernel[row_vec] @@ -311,20 +366,26 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i x1_i[c] = i y1_j[c] = j vel1_k[c] = self.zero_velocity -# vel1_k[c] = (self.zero_velocity + 1) + # vel1_k[c] = (self.zero_velocity + 1) vel1[c] = vel_out.to(u.km / u.s).value # km / s c = c + 1 resp1 = resp1.transpose() - resp0 = np.copy(resp1) - - #resp1 = csc_matrix((csc_rf_vec, (csc_row_vec, csc_col_vec)), shape=(self.inversion_data_len, c)).toarray() + # resp1 = csc_matrix((csc_rf_vec, (csc_row_vec, csc_col_vec)), shape=(self.inversion_data_len, c)).toarray() end_time = time() print("first pass, response function create time =", end_time - start_time) start_time = end_time - enet_model = enet(alpha=alpha, l1_ratio = rho, precompute=True, normalize=True, positive=True, fit_intercept=True, selection='random') + enet_model = enet( + alpha=alpha, + l1_ratio=rho, + precompute=True, + normalize=True, + positive=True, + fit_intercept=True, + selection="random", + ) enet_model.fit(resp1, self.inversion_data) data_out = enet_model.predict(resp1) em = enet_model.coef_ @@ -339,12 +400,19 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i for j in range(0, num_y): for i in range(0, num_x): self.cube.data[j, i, self.zero_velocity] = em[c] -# self.cube.data[j, i, (self.zero_velocity + 1)] = em[c] + # self.cube.data[j, i, (self.zero_velocity + 1)] = em[c] c = c + 1 plt.figure() - plt.scatter(vel1, em, c='r', marker='.') - plot_title='alpha='+str('%f' % alpha)+' l1ratio='+str('%f' % rho)+' slope='+str('%f' % slope) + plt.scatter(vel1, em, c="r", marker=".") + plot_title = ( + "alpha=" + + str("%f" % alpha) + + " l1ratio=" + + str("%f" % rho) + + " slope=" + + str("%f" % slope) + ) plt.grid(b=True) plt.title(plot_title) @@ -353,23 +421,35 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i for inversion_image in self.inversion_image_list: image_data = inversion_image.data() y_pixels, x_pixels = np.shape(image_data) - inverted_data = data_out[image_offset : image_offset + (y_pixels * x_pixels)] - print("pearson correlation =", scipy.stats.pearsonr(np.reshape(image_data, (y_pixels * x_pixels)), inverted_data)) - print("linregress =", scipy.stats.linregress(np.reshape(image_data, (y_pixels * x_pixels)), inverted_data)) + inverted_data = data_out[ + image_offset : image_offset + (y_pixels * x_pixels) + ] + print( + "pearson correlation =", + scipy.stats.pearsonr( + np.reshape(image_data, (y_pixels * x_pixels)), inverted_data + ), + ) + print( + "linregress =", + scipy.stats.linregress( + np.reshape(image_data, (y_pixels * x_pixels)), inverted_data + ), + ) inverted_data = np.reshape(inverted_data, (y_pixels, x_pixels)) plt.figure() - plt.imshow(inverted_data, origin='lower') - image_offset += (y_pixels * x_pixels) + plt.imshow(inverted_data, origin="lower") + image_offset += y_pixels * x_pixels diff_image = inverted_data - image_data # plt.figure() # plt.imshow(diff_image, origin='lower') plt_fig = plt.figure() - plt_im = plt.imshow(diff_image, origin='lower') + plt_im = plt.imshow(diff_image, origin="lower") plt_fig.colorbar(plt_im) plt.show() diff_count = np.count_nonzero(diff_image < 0.0) print("diff cout = ", diff_count) - #print(diff_image) + # print(diff_image) # Calculate intensity intensity_values = np.zeros((num_y, num_x), dtype=np.float64) @@ -379,20 +459,23 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i intensity_values[y, x] = intensity y, x = np.shape(intensity_values) - X, Y = np.meshgrid(np.linspace(0, x, len(intensity_values[0,:])), np.linspace(0, y, len(intensity_values[:,0]))) + X, Y = np.meshgrid( + np.linspace(0, x, len(intensity_values[0, :])), + np.linspace(0, y, len(intensity_values[:, 0])), + ) fig = plt.figure() - ax=fig.add_subplot(111, projection='3d') - cp = ax.scatter3D(X, Y, intensity_values) - #fig.colorbar(cp) # Add a colorbar to a plot - ax.set_title('Intensity') + ax = fig.add_subplot(111, projection="3d") + ax.scatter3D(X, Y, intensity_values) + # fig.colorbar(cp) # Add a colorbar to a plot + ax.set_title("Intensity") plt.show() ### Second pass zero_count = np.count_nonzero(intensity_values == 0.0) -# zero_count = np.count_nonzero(intensity_values <= 50.0) + # zero_count = np.count_nonzero(intensity_values <= 50.0) nonzero_count = intensity_values.size - zero_count print("second pass ", zero_count, nonzero_count) -# num_em_values = nonzero_count + (zero_count * num_vel) + # num_em_values = nonzero_count + (zero_count * num_vel) num_em_values = (nonzero_count * num_vel) + zero_count x1_i = np.zeros(num_em_values) y1_j = np.zeros(num_em_values) @@ -402,7 +485,7 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i start_time = time() # Create response function - resp1 = np.zeros((num_em_values, self.inversion_data_len)) + resp1 = np.zeros((num_em_values, self.inversion_data_len)) csc_row_vec = np.array([]) csc_col_vec = np.array([]) @@ -411,16 +494,20 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i c = 0 for j in range(0, num_y): for i in range(0, num_x): -# if intensity_values[j, i] == 0.0: + # if intensity_values[j, i] == 0.0: if intensity_values[j, i] != 0.0: -# if intensity_values[j, i] > 50.0: + # if intensity_values[j, i] > 50.0: for k in range(0, num_vel): - y_out, x_out, vel_out = self.cube.pixel_to_world(j * u.pix, i * u.pix, k * u.pix) + y_out, x_out, vel_out = self.cube.pixel_to_world( + j * u.pix, i * u.pix, k * u.pix + ) kernel = np.array([]) for inversion_image in self.inversion_image_list: - image_kernel = inversion_image.create_kernel(x_out, y_out, vel_out) + image_kernel = inversion_image.create_kernel( + x_out, y_out, vel_out + ) kernel = np.append(kernel, image_kernel) - resp1[c,:] = kernel + resp1[c, :] = kernel row_vec = np.where(kernel != 0.0) col_vec = np.full(np.size(row_vec), c) rf_vec = kernel[row_vec] @@ -434,12 +521,16 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i vel1[c] = vel_out.to(u.km / u.s).value # km / s c = c + 1 else: - y_out, x_out, vel_out = self.cube.pixel_to_world(j * u.pix, i * u.pix, self.zero_velocity * u.pix) + y_out, x_out, vel_out = self.cube.pixel_to_world( + j * u.pix, i * u.pix, self.zero_velocity * u.pix + ) kernel = np.array([]) for inversion_image in self.inversion_image_list: - image_kernel = inversion_image.create_kernel(x_out, y_out, vel_out) + image_kernel = inversion_image.create_kernel( + x_out, y_out, vel_out + ) kernel = np.append(kernel, image_kernel) - resp1[c,:] = kernel + resp1[c, :] = kernel row_vec = np.where(kernel != 0.0) col_vec = np.full(np.size(row_vec), c) rf_vec = kernel[row_vec] @@ -458,20 +549,27 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i start_time = end_time resp1 = resp1.transpose() - resp0 = np.copy(resp1) - #resp1 = csc_matrix((csc_rf_vec, (csc_row_vec, csc_col_vec)), shape=(self.inversion_data_len, c)).toarray() - -# # Adjust the resp1 to reflect the weight -# print("vel out =", vel1) -# print("slope =", slope) -# weight = abs(vel1)*slope + bias -# print("weight =", weight[np.where(weight != 1)]) -# for i in range(0, num_em_values): -# resp1[:, i] = resp0[:, i] * weight[i] -# # resp1[:, i] = resp0[:, i] / weight[i] - - enet_model = enet(alpha=alpha, l1_ratio = rho, precompute=True, normalize=True, positive=True, fit_intercept=True, selection='random') + # resp1 = csc_matrix((csc_rf_vec, (csc_row_vec, csc_col_vec)), shape=(self.inversion_data_len, c)).toarray() + + # # Adjust the resp1 to reflect the weight + # print("vel out =", vel1) + # print("slope =", slope) + # weight = abs(vel1)*slope + bias + # print("weight =", weight[np.where(weight != 1)]) + # for i in range(0, num_em_values): + # resp1[:, i] = resp0[:, i] * weight[i] + # # resp1[:, i] = resp0[:, i] / weight[i] + + enet_model = enet( + alpha=alpha, + l1_ratio=rho, + precompute=True, + normalize=True, + positive=True, + fit_intercept=True, + selection="random", + ) enet_model.fit(resp1, self.inversion_data) data_out = enet_model.predict(resp1) em = enet_model.coef_ @@ -489,8 +587,15 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i self.cube.data[int(y1_j[c]), int(x1_i[c]), int(vel1_k[c])] = em[c] plt.figure() - plt.scatter(vel1, em, c='r', marker='.') - plot_title='alpha='+str('%f' % alpha)+' l1ratio='+str('%f' % rho)+' slope='+str('%f' % slope) + plt.scatter(vel1, em, c="r", marker=".") + plot_title = ( + "alpha=" + + str("%f" % alpha) + + " l1ratio=" + + str("%f" % rho) + + " slope=" + + str("%f" % slope) + ) plt.grid(b=True) plt.title(plot_title) @@ -500,9 +605,21 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i for inversion_image in self.inversion_image_list: image_data = inversion_image.data() y_pixels, x_pixels = np.shape(image_data) - inverted_data = data_out[image_offset : image_offset + (y_pixels * x_pixels)] - print("pearson correlation =", scipy.stats.pearsonr(np.reshape(image_data, (y_pixels * x_pixels)), inverted_data)) - print("linregress =", scipy.stats.linregress(np.reshape(image_data, (y_pixels * x_pixels)), inverted_data)) + inverted_data = data_out[ + image_offset : image_offset + (y_pixels * x_pixels) + ] + print( + "pearson correlation =", + scipy.stats.pearsonr( + np.reshape(image_data, (y_pixels * x_pixels)), inverted_data + ), + ) + print( + "linregress =", + scipy.stats.linregress( + np.reshape(image_data, (y_pixels * x_pixels)), inverted_data + ), + ) inverted_data = np.reshape(inverted_data, (y_pixels, x_pixels)) if inversion_image_data_list is not None: crop_roi_coords = inversion_image.get_crop_roi_coords() @@ -515,17 +632,17 @@ def invert_data1(self, alpha=0.0025, rho=0.975, slope=0.0, bias=1.0, inversion_i # plt.figure() # plt.imshow(inverted_data) # plt.gca().invert_yaxis() - image_offset += (y_pixels * x_pixels) + image_offset += y_pixels * x_pixels image_count += 1 -# # diff_image = image_data - inverted_data -# # plt_fig = plt.figure() -# # plt_im = plt.imshow(diff_image, origin='lower') -# # plt_fig.colorbar(plt_im) -# # plt.show() + # # diff_image = image_data - inverted_data + # # plt_fig = plt.figure() + # # plt_im = plt.imshow(diff_image, origin='lower') + # # plt_fig.colorbar(plt_im) + # # plt.show() def crop_tile(self, x1: int, y1: int, x2: int, y2: int) -> EMDataCube: - ''' + """ Create a slice of the emission data cube as a tile for inversions. Parameters @@ -544,16 +661,16 @@ def crop_tile(self, x1: int, y1: int, x2: int, y2: int) -> EMDataCube: EMDataCube A slice/tile reference of the emission data cube. - ''' + """ em_tile = self.cube[y1:y2, x1:x2, :] print(em_tile) - new_em = EMDataCube(em_tile) - - return new_em + return EMDataCube(em_tile) - def calculate_moments(self) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: - ''' + def calculate_moments( + self, + ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """ Calculate moments 0 - 3. Returns @@ -567,7 +684,7 @@ def calculate_moments(self) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndar skew_values : TYPE DESCRIPTION. - ''' + """ num_y, num_x, num_vel = self.cube.wcs.array_shape intensity_values = np.zeros((num_y, num_x), dtype=np.float64) vbar_values = np.zeros((num_y, num_x), dtype=np.float64) @@ -576,7 +693,7 @@ def calculate_moments(self) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndar # Velocities in km/s velocities = self.cube.axis_world_coords(2) / 1000.0 velocities = velocities[:].value - #print("velocities =", velocities, num_x, num_y) + # print("velocities =", velocities, num_x, num_y) for x in range(num_x): for y in range(num_y): # Calculate mathematical moments 0 - 3 @@ -585,19 +702,27 @@ def calculate_moments(self) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndar if intensity != 0.0: vbar = np.sum(self.cube.data[y, x, :] * velocities / intensity) # Calculate line width and skew - line_width = math.sqrt(np.sum(self.cube.data[y, x, :] * (velocities - vbar)**2. / intensity)) - skew = np.sum(self.cube.data[y, x, :] * (velocities - vbar)**3. / intensity) + line_width = math.sqrt( + np.sum( + self.cube.data[y, x, :] + * (velocities - vbar) ** 2.0 + / intensity + ) + ) + skew = np.sum( + self.cube.data[y, x, :] * (velocities - vbar) ** 3.0 / intensity + ) if skew < 0.0: - skew = (abs(skew)**(1./3.)) * -1. + skew = (abs(skew) ** (1.0 / 3.0)) * -1.0 else: - skew = skew**(1./3.) + skew = skew ** (1.0 / 3.0) intensity_values[y, x] = intensity vbar_values[y, x] = vbar line_width_values[y, x] = line_width skew_values[y, x] = skew return intensity_values, vbar_values, line_width_values, skew_values - def write(self, filename : str): + def write(self, filename: str): fits_header = self.cube.wcs.to_header() - fits_hdu = fits.PrimaryHDU(data = self.cube.data, header = fits_header) + fits_hdu = fits.PrimaryHDU(data=self.cube.data, header=fits_header) fits_hdu.writeto(filename, overwrite=True) diff --git a/overlappogram/homemade_model.py b/overlappogram/homemade_model.py index 7f4fc5e..af269f6 100644 --- a/overlappogram/homemade_model.py +++ b/overlappogram/homemade_model.py @@ -3,36 +3,40 @@ class Model: - def __init__(self,alpha) -> None: - self.alpha=alpha + def __init__(self, alpha) -> None: + self.alpha = alpha + def get_params(self): - return {'alpha':self.alpha} + return {"alpha": self.alpha} + + class HomemadeModel: - def __init__(self,model): - self.model=model + def __init__(self, model): + self.model = model + def invert(self, response_function, data): # weights=np.full((2048),1) - l=response_function.shape[1] - R=np.eye(l,l,1)-np.eye(l,l,-1) def f(x): - s=0 - #x=congrid(x,response_function[:,0,:].shape) + s = 0 + # x=congrid(x,response_function[:,0,:].shape) for t in range(response_function.shape[0]): - s+=self.model.alpha*np.linalg.norm(x[t])#np.linalg.norm(response_function[t]@x[t] -data)+self.alpha*np.linalg.norm(R@x) + s += self.model.alpha * np.linalg.norm( + x[t] + ) # np.linalg.norm(response_function[t]@x[t] -data)+self.alpha*np.linalg.norm(R@x) return s # data_out = self.model.predict(np.full_like(response_function,0.)) - em = minimize(f,np.ones((15,256),dtype=float)) + em = minimize(f, np.ones((15, 256), dtype=float)) print(em) - data_out = sum([response_function[t]@em[t] for t in range(15)]) - #print(self.model.intercept_) + data_out = sum([response_function[t] @ em[t] for t in range(15)]) + # print(self.model.intercept_) return em, data_out def add_fits_keywords(self, header): params = self.model.get_params() - #print(params) - header['INVMDL'] = ('Elastic Net', 'Inversion Model') - header['ALPHA'] = (params['alpha'], 'Inversion Model Alpha') - #header['RHO'] = (params['l1_ratio'], 'Inversion Model Rho') + # print(params) + header["INVMDL"] = ("Elastic Net", "Inversion Model") + header["ALPHA"] = (params["alpha"], "Inversion Model Alpha") + # header['RHO'] = (params['l1_ratio'], 'Inversion Model Rho') diff --git a/overlappogram/image.py b/overlappogram/image.py index 7ded666..4cef699 100644 --- a/overlappogram/image.py +++ b/overlappogram/image.py @@ -20,64 +20,74 @@ class Image: sigma_psf: np.float64 pixel_delta_wavelength: np.float64 camera_angle: np.float64 = 0.0 + def __post_init__(self): # Verify wcs and data shape match assert self.cube.wcs.pixel_shape == np.shape(self.cube.data)[::-1] assert self.cube.wcs.naxis == 2 self.num_y_pixels, self.num_x_pixels = np.shape(self.cube.data) self.x0, self.y0 = self.cube.wcs.wcs.crpix - self.angle = self.camera_angle * np.pi/180. - self.calculate_detector_dispersion(self.element.temperature, - self.element.mass, - self.element.rest_wavelength, - self.sigma_psf, - self.pixel_delta_wavelength) + self.angle = self.camera_angle * np.pi / 180.0 + self.calculate_detector_dispersion( + self.element.temperature, + self.element.mass, + self.element.rest_wavelength, + self.sigma_psf, + self.pixel_delta_wavelength, + ) self.sources = Table() - self.sources['amplitude'] = [self.amplitude] - self.sources['x_mean'] = [0] - self.sources['y_mean'] = [0] - self.sources['x_stddev'] = [self.sigma_along_disp] - self.sources['y_stddev'] = [self.sigma_psf] - self.sources['theta'] = [self.angle] + self.sources["amplitude"] = [self.amplitude] + self.sources["x_mean"] = [0] + self.sources["y_mean"] = [0] + self.sources["x_stddev"] = [self.sigma_along_disp] + self.sources["y_stddev"] = [self.sigma_psf] + self.sources["theta"] = [self.angle] self.crop_roi_coords = [] - def calculate_detector_dispersion(self,element_temperature, element_mass, - element_rest_wavelength, sigma_psf, - pixel_delta_wavelength): - thermal_velocity = \ - np.sqrt(k_B.value * element_temperature / element_mass)/1.e3 + def calculate_detector_dispersion( + self, + element_temperature, + element_mass, + element_rest_wavelength, + sigma_psf, + pixel_delta_wavelength, + ): + thermal_velocity = ( + np.sqrt(k_B.value * element_temperature / element_mass) / 1.0e3 + ) self.amplitude = 1.0 / (thermal_velocity * sqrt(2 * np.pi)) - self.width_of_pix_in_km_s = \ - pixel_delta_wavelength / element_rest_wavelength * (c.value / 1.e3) - sigma_thermal = thermal_velocity/self.width_of_pix_in_km_s + self.width_of_pix_in_km_s = ( + pixel_delta_wavelength / element_rest_wavelength * (c.value / 1.0e3) + ) + sigma_thermal = thermal_velocity / self.width_of_pix_in_km_s self.sigma_along_disp = np.sqrt(sigma_psf**2 + sigma_thermal**2) def data(self): return self.cube.data[:][:] def create_kernel(self, x, y, vel): - #start_time = time() -# pixel_y, pixel_x = self.cube.world_to_pixel(y, x) + # start_time = time() + # pixel_y, pixel_x = self.cube.world_to_pixel(y, x) pixel_y, pixel_x = y, x - #end_time = time() - #print("gaussian create time =", end_time - start_time) + # end_time = time() + # print("gaussian create time =", end_time - start_time) if pixel_x != np.nan and pixel_y != np.nan: -# newx0 = pixel_x.value + vel.to(u.km / u.s).value/self.width_of_pix_in_km_s * np.cos(self.angle) -# newy0 = pixel_y.value + vel.to(u.km / u.s).value/self.width_of_pix_in_km_s * np.sin(self.angle) - newx0 = pixel_x + vel/self.width_of_pix_in_km_s * np.cos(self.angle) - newy0 = pixel_y + vel/self.width_of_pix_in_km_s * np.sin(self.angle) - self.sources['x_mean'] = [newx0] - self.sources['y_mean'] = [newy0] + # newx0 = pixel_x.value + vel.to(u.km / u.s).value/self.width_of_pix_in_km_s * np.cos(self.angle) + # newy0 = pixel_y.value + vel.to(u.km / u.s).value/self.width_of_pix_in_km_s * np.sin(self.angle) + newx0 = pixel_x + vel / self.width_of_pix_in_km_s * np.cos(self.angle) + newy0 = pixel_y + vel / self.width_of_pix_in_km_s * np.sin(self.angle) + self.sources["x_mean"] = [newx0] + self.sources["y_mean"] = [newy0] tshape = (self.num_y_pixels, self.num_x_pixels) - #start_time = time() - kernel = (make_gaussian_sources_image(tshape, self.sources)) - #end_time = time() - #print("gaussian create time =", end_time - start_time) - kernel[kernel < 1.e-3] = 0.0 + # start_time = time() + kernel = make_gaussian_sources_image(tshape, self.sources) + # end_time = time() + # print("gaussian create time =", end_time - start_time) + kernel[kernel < 1.0e-3] = 0.0 kernel = np.reshape(kernel, self.num_y_pixels * self.num_x_pixels) # Normalize kernel. kernel_sum = np.sum(kernel) - if (kernel_sum != 0.0): + if kernel_sum != 0.0: kernel = kernel / kernel_sum else: kernel = np.zeros(self.num_y_pixels * self.num_x_pixels) @@ -95,21 +105,31 @@ def crop_roi(self, lower, upper): image_roi = self.cube.crop_by_coords(lower_corner=lower, upper_corner=upper) print(image_roi) plt.figure() - plt.imshow(image_roi.data, origin='lower') + plt.imshow(image_roi.data, origin="lower") # image_roi.plot() - new_image = Image(image_roi, self.element, self.sigma_psf, self.pixel_delta_wavelength, self.camera_angle) + new_image = Image( + image_roi, + self.element, + self.sigma_psf, + self.pixel_delta_wavelength, + self.camera_angle, + ) crop_roi_coords = [] world_y, world_x = new_image.cube.pixel_to_world(0 * u.pix, 0 * u.pix) pixel_y, pixel_x = self.cube.world_to_pixel(world_y, world_x) - crop_roi_coords.append([int(np.rint(pixel_y.value)), int(np.rint(pixel_x.value))]) + crop_roi_coords.append( + [int(np.rint(pixel_y.value)), int(np.rint(pixel_x.value))] + ) print("crop_roi new image 0, 0 =", pixel_y, pixel_x) num_y, num_x = np.shape(new_image.data()) world_y, world_x = new_image.cube.pixel_to_world(num_y * u.pix, num_x * u.pix) pixel_y, pixel_x = self.cube.world_to_pixel(world_y, world_x) print("crop_roi new image num_y, num_x =", pixel_y, pixel_x) - crop_roi_coords.append([int(np.rint(pixel_y.value)), int(np.rint(pixel_x.value))]) + crop_roi_coords.append( + [int(np.rint(pixel_y.value)), int(np.rint(pixel_x.value))] + ) print("inside image crop_roi_coords =", crop_roi_coords) new_image.set_crop_roi_coords(crop_roi_coords) @@ -128,7 +148,7 @@ def add_simulated_data(self, x, y, vel, em): kernel = np.reshape(kernel, (self.num_y_pixels, self.num_x_pixels)) self.cube.data[:, :] += kernel - def write(self, filename : str): + def write(self, filename: str): fits_header = self.cube.wcs.to_header() - fits_hdu = fits.PrimaryHDU(data = self.cube.data, header = fits_header) + fits_hdu = fits.PrimaryHDU(data=self.cube.data, header=fits_header) fits_hdu.writeto(filename, overwrite=True) diff --git a/overlappogram/inversion.py b/overlappogram/inversion.py index 6063a28..8e96c44 100644 --- a/overlappogram/inversion.py +++ b/overlappogram/inversion.py @@ -8,7 +8,7 @@ @dataclass(order=True) class Inversion: - ''' + """ Inversion for overlap-a-gram data. Attributes @@ -34,15 +34,17 @@ class Inversion: ------- None. - ''' + """ + pixel_fov_width: np.float32 solution_fov_width: np.float32 slit_fov_width: np.float32 rsp_dep_name: str rsp_dep_list: list rsp_dep_file_fmt: str - rsp_dep_desc_fmt: str = '' - smooth_over: str = 'spatial' + rsp_dep_desc_fmt: str = "" + smooth_over: str = "spatial" + def __post_init__(self): # Calculate number of slits calc_num_slits = divmod(self.slit_fov_width, self.solution_fov_width) @@ -51,22 +53,24 @@ def __post_init__(self): self.num_slits += 1 if self.num_slits % 2 == 0.0: self.num_slits += 1 - #print("number slits =", self.num_slits) + # print("number slits =", self.num_slits) self.half_slits = divmod(self.num_slits, 2) - #print("half slits =", self.half_slits) + # print("half slits =", self.half_slits) # calc_shift_width = divmod(self.solution_fov_width, self.pixel_fov_width) # self.slit_shift_width = int(round(calc_shift_width[0])) - self.slit_shift_width = int(round(self.solution_fov_width / self.pixel_fov_width)) - #print("slit shift width =", self.slit_shift_width) + self.slit_shift_width = int( + round(self.solution_fov_width / self.pixel_fov_width) + ) + # print("slit shift width =", self.slit_shift_width) self.image_height = 0 self.image_width = 0 # Read response files and create response matrix response_files = [self.rsp_dep_file_fmt.format(i) for i in self.rsp_dep_list] - #print("Response files =", response_files) + # print("Response files =", response_files) self.num_response_files = len(response_files) - assert(self.num_response_files > 0) - #print("num rsp files =", self.num_response_files) + assert self.num_response_files > 0 + # print("num rsp files =", self.num_response_files) response_count = 0 for index in range(len(response_files)): # Read file @@ -75,47 +79,60 @@ def __post_init__(self): self.pixels = dep_em_data.iloc[:, 0].values self.wavelengths = dep_em_data.iloc[:, 1].values self.wavelength_width = len(self.wavelengths) - self.response_function = np.zeros((self.num_response_files * self.num_slits, self.wavelength_width), dtype=np.float32) + self.response_function = np.zeros( + (self.num_response_files * self.num_slits, self.wavelength_width), + dtype=np.float32, + ) em = dep_em_data.iloc[:, 2].values # TEMPORARY PATCH!!! em[-1] = 0.0 - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": # Smooth over dependence. slit_count = 0 for slit_num in range(-self.half_slits[0], self.half_slits[0] + 1): slit_shift = slit_num * self.slit_shift_width if slit_shift < 0: - slit_em = np.pad(em, (0, -slit_shift), mode='constant')[-slit_shift:] + slit_em = np.pad(em, (0, -slit_shift), mode="constant")[ + -slit_shift: + ] elif slit_shift > 0: - slit_em = np.pad(em, (slit_shift, 0), mode='constant')[:-slit_shift] + slit_em = np.pad(em, (slit_shift, 0), mode="constant")[ + :-slit_shift + ] else: slit_em = em - self.response_function[(self.num_response_files * slit_count) + response_count, :] = slit_em + self.response_function[ + (self.num_response_files * slit_count) + response_count, : + ] = slit_em slit_count += 1 response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for slit_num in range(-self.half_slits[0], self.half_slits[0] + 1): slit_shift = slit_num * self.slit_shift_width if slit_shift < 0: - slit_em = np.pad(em, (0, -slit_shift), mode='constant')[-slit_shift:] + slit_em = np.pad(em, (0, -slit_shift), mode="constant")[ + -slit_shift: + ] elif slit_shift > 0: - slit_em = np.pad(em, (slit_shift, 0), mode='constant')[:-slit_shift] + slit_em = np.pad(em, (slit_shift, 0), mode="constant")[ + :-slit_shift + ] else: slit_em = em self.response_function[response_count, :] = slit_em response_count += 1 - #print("response count =", response_count) + # print("response count =", response_count) self.response_function = self.response_function.transpose() - if self.rsp_dep_desc_fmt == '': + if self.rsp_dep_desc_fmt == "": max_dep_len = len(max(self.rsp_dep_list, key=len)) - self.rsp_dep_desc_fmt = str(max_dep_len) + 'A' + self.rsp_dep_desc_fmt = str(max_dep_len) + "A" def initialize_input_data(self, input_image: str, image_mask: str = None): - ''' + """ Initialize input image and optional mask. Parameters @@ -129,7 +146,7 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): ------- None. - ''' + """ # Read image image_hdul = fits.open(input_image) image_height, image_width = np.shape(image_hdul[0].data) @@ -138,7 +155,7 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): assert image_width == self.wavelength_width self.image = image_hdul[0].data self.image_header = image_hdul[0].header - #print("image (h, w) =", image_height, image_width) + # print("image (h, w) =", image_height, image_width) self.image_width = image_width self.image_height = image_height self.input_image = os.path.basename(input_image) @@ -150,17 +167,18 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): self.image_mask = mask_hdul[0].data if len(np.where(image_mask == 0)) == 0: self.image_mask = None - else: - #print("mask (h, w) =", mask_height, mask_width) - assert image_height == mask_height and image_width == mask_width and self.wavelength_width == self.image_width else: - #self.image_mask = np.ones((image_height, image_width), dtype=np.float32) + # self.image_mask = np.ones((image_height, image_width), dtype=np.float32) self.image_mask = None - def invert(self, model, output_dir: str, - output_file_prefix: str = '', - output_file_postfix: str = ''): - ''' + def invert( + self, + model, + output_dir: str, + output_file_prefix: str = "", + output_file_postfix: str = "", + ): + """ Invert image. Parameters @@ -178,18 +196,22 @@ def invert(self, model, output_dir: str, ------- None. - ''' + """ # Verify input data has been initialized. - assert self.image_width != 0 and self.image_height != 0 - em_data_cube = np.zeros((self.image_height, self.num_slits, self.num_response_files), dtype=np.float32) - inverted_data = np.zeros((self.image_height, self.image_width), dtype=np.float32) + em_data_cube = np.zeros( + (self.image_height, self.num_slits, self.num_response_files), + dtype=np.float32, + ) + inverted_data = np.zeros( + (self.image_height, self.image_width), dtype=np.float32 + ) for image_row_number in range(self.image_height): - if (image_row_number % 10 == 0): + if image_row_number % 10 == 0: print("image row number =", image_row_number) - image_row = self.image[image_row_number,:] + image_row = self.image[image_row_number, :] masked_rsp_func = self.response_function if self.image_mask is not None: - mask_row = self.image_mask[image_row_number,:] + mask_row = self.image_mask[image_row_number, :] mask_pixels = np.where(mask_row == 0) if len(mask_pixels) > 0: image_row[mask_pixels] = 0 @@ -206,10 +228,14 @@ def invert(self, model, output_dir: str, em, data_out = model.invert(masked_rsp_func, image_row) for slit_num in range(self.num_slits): - if self.smooth_over == 'dependence': - slit_em = em[slit_num * self.num_response_files:(slit_num + 1) * self.num_response_files] + if self.smooth_over == "dependence": + slit_em = em[ + slit_num + * self.num_response_files : (slit_num + 1) + * self.num_response_files + ] else: - slit_em = em[slit_num::self.num_slits] + slit_em = em[slit_num :: self.num_slits] em_data_cube[image_row_number, slit_num, :] = slit_em inverted_data[image_row_number, :] = data_out @@ -219,49 +245,53 @@ def invert(self, model, output_dir: str, # Save EM data cube. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'em_data_cube' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "em_data_cube" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix - em_data_cube_file = output_dir + base_filename + '.fits' + em_data_cube_file = output_dir + base_filename + ".fits" # Transpose data (wavelength, y, x). Readable by ImageJ. em_data_cube = np.transpose(em_data_cube, axes=(2, 0, 1)) em_data_cube_header = self.image_header.copy() self.__add_fits_keywords(em_data_cube_header) model.add_fits_keywords(em_data_cube_header) - hdu = fits.PrimaryHDU(data = em_data_cube, header = em_data_cube_header) + hdu = fits.PrimaryHDU(data=em_data_cube, header=em_data_cube_header) index = np.arange(len(self.rsp_dep_list)) # Add binary table. - col1 = fits.Column(name='index', format='1I', array=index) - col2 = fits.Column(name=self.rsp_dep_name, format=self.rsp_dep_desc_fmt, array=self.rsp_dep_list) + col1 = fits.Column(name="index", format="1I", array=index) + col2 = fits.Column( + name=self.rsp_dep_name, + format=self.rsp_dep_desc_fmt, + array=self.rsp_dep_list, + ) table_hdu = fits.BinTableHDU.from_columns([col1, col2]) hdulist = fits.HDUList([hdu, table_hdu]) hdulist.writeto(em_data_cube_file, overwrite=True) # Save model predicted data. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'model_predicted_data' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "model_predicted_data" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix data_file = output_dir + base_filename + ".fits" model_predicted_data_header = self.image_header.copy() self.__add_fits_keywords(model_predicted_data_header) model.add_fits_keywords(model_predicted_data_header) - hdu = fits.PrimaryHDU(data = inverted_data, header = model_predicted_data_header) + hdu = fits.PrimaryHDU(data=inverted_data, header=model_predicted_data_header) # Add binary table. - col1 = fits.Column(name='pixel', format='1I', array=self.pixels) - col2 = fits.Column(name='wavelength', format='1E', array=self.wavelengths) + col1 = fits.Column(name="pixel", format="1I", array=self.pixels) + col2 = fits.Column(name="wavelength", format="1E", array=self.wavelengths) table_hdu = fits.BinTableHDU.from_columns([col1, col2]) hdulist = fits.HDUList([hdu, table_hdu]) hdulist.writeto(data_file, overwrite=True) def __add_fits_keywords(self, header): - ''' + """ Add FITS keywords to FITS header. Parameters @@ -273,10 +303,12 @@ def __add_fits_keywords(self, header): ------- None. - ''' - header.append(('INPUTIMG', self.input_image, 'Input Image'), end=True) - header.append(('PIXELFOV', self.pixel_fov_width, 'Pixel FOV Width'), end=True) - header.append(('SLTNFOV', self.solution_fov_width, 'Solution FOV Width'), end=True) - header.append(('SLITFOV', self.slit_fov_width, 'Slit FOV Width'), end=True) - header.append(('DEPNAME', self.rsp_dep_name, 'Dependence Name'), end=True) - header.append(('SMTHOVER', self.smooth_over, 'Smooth Over'), end=True) + """ + header.append(("INPUTIMG", self.input_image, "Input Image"), end=True) + header.append(("PIXELFOV", self.pixel_fov_width, "Pixel FOV Width"), end=True) + header.append( + ("SLTNFOV", self.solution_fov_width, "Solution FOV Width"), end=True + ) + header.append(("SLITFOV", self.slit_fov_width, "Slit FOV Width"), end=True) + header.append(("DEPNAME", self.rsp_dep_name, "Dependence Name"), end=True) + header.append(("SMTHOVER", self.smooth_over, "Smooth Over"), end=True) diff --git a/overlappogram/inversion_field_angles.py b/overlappogram/inversion_field_angles.py index 6ad7e5f..af5a6af 100644 --- a/overlappogram/inversion_field_angles.py +++ b/overlappogram/inversion_field_angles.py @@ -3,7 +3,6 @@ import os import typing as tp import warnings -from copy import deepcopy from dataclasses import dataclass import numpy as np @@ -13,9 +12,10 @@ from overlappogram.elasticnet_model import ElasticNetModel as model + @dataclass(order=True) class Inversion: - ''' + """ Inversion for overlap-a-gram data. Attributes @@ -37,13 +37,15 @@ class Inversion: ------- None. - ''' + """ + rsp_func_cube_file: str rsp_dep_name: str rsp_dep_list: list = None solution_fov_width: np.int32 = 1 - smooth_over: str = 'spatial' + smooth_over: str = "spatial" field_angle_range: list = None + def __post_init__(self): self.image_height = 0 self.image_width = 0 @@ -54,79 +56,87 @@ def __post_init__(self): num_dep, num_field_angles, rsp_func_width = np.shape(rsp_func_cube) self.rsp_func_cube_filename = os.path.basename(self.rsp_func_cube_file) - self.inv_date = datetime.datetime.now().isoformat(timespec='milliseconds').replace("+00:00", "Z") + self.inv_date = ( + datetime.datetime.now() + .isoformat(timespec="milliseconds") + .replace("+00:00", "Z") + ) try: - self.rsp_func_date = rsp_func_hdul[0].header['DATE'] + self.rsp_func_date = rsp_func_hdul[0].header["DATE"] except KeyError: - self.rsp_func_date = '' + self.rsp_func_date = "" try: - self.abundance = rsp_func_hdul[0].header['ABUNDANC'] + self.abundance = rsp_func_hdul[0].header["ABUNDANC"] except KeyError: - self.abundance = '' + self.abundance = "" try: - self.electron_distribution = rsp_func_hdul[0].header['ELECDIST'] + self.electron_distribution = rsp_func_hdul[0].header["ELECDIST"] except KeyError: - self.electron_distribution = '' + self.electron_distribution = "" try: - self.chianti_version = rsp_func_hdul[0].header['CHIANT_V'] + self.chianti_version = rsp_func_hdul[0].header["CHIANT_V"] except KeyError: - self.chianti_version = '' + self.chianti_version = "" - dep_name = rsp_func_hdul[0].header['DEPNAME'] + dep_name = rsp_func_hdul[0].header["DEPNAME"] dep_list = rsp_func_hdul[1].data[dep_name] dep_list = np.round(dep_list, decimals=2) print("dep_list", dep_list) - self.pixels = rsp_func_hdul[2].data['index'] - self.field_angle_list = rsp_func_hdul[2].data['field_angle'] + self.pixels = rsp_func_hdul[2].data["index"] + self.field_angle_list = rsp_func_hdul[2].data["field_angle"] self.field_angle_list = np.round(self.field_angle_list, decimals=2) - self.field_angle_index_list = rsp_func_hdul[2].data['index'] + self.field_angle_index_list = rsp_func_hdul[2].data["index"] if self.rsp_dep_list is None: - self.dep_index_list = rsp_func_hdul[1].data['index'] + self.dep_index_list = rsp_func_hdul[1].data["index"] self.dep_list = dep_list dep_list_deltas = abs(np.diff(dep_list)) self.max_dep_list_delta = max(dep_list_deltas) else: dep_list_deltas = abs(np.diff(dep_list)) self.max_dep_list_delta = max(dep_list_deltas) - #print(self.max_dep_list_delta) + # print(self.max_dep_list_delta) dep_index_list = [] for dep in self.rsp_dep_list: delta_dep_list = abs(dep_list - dep) dep_index = np.argmin(delta_dep_list) if abs(dep_list[dep_index] - dep) < self.max_dep_list_delta: - #print(dep, dep_index, dep_list[dep_index]) + # print(dep, dep_index, dep_list[dep_index]) dep_index_list = np.append(dep_index_list, dep_index) new_index_list = [*set(dep_index_list)] new_index_list = np.array(new_index_list, dtype=np.int32) new_index_list.sort() self.dep_index_list = new_index_list self.dep_list = dep_list[new_index_list] - #print(dep_list[new_index_list]) + # print(dep_list[new_index_list]) self.num_deps = len(self.dep_list) print("num deps =", self.num_deps) print("dep index list =", self.dep_index_list) self.rsp_func_width = rsp_func_width - max_num_field_angles = num_field_angles field_angle_list_deltas = abs(np.diff(self.field_angle_list)) self.max_field_angle_list_delta = max(field_angle_list_deltas) - #print(self.max_field_angle_list_delta) + # print(self.max_field_angle_list_delta) if self.field_angle_range is None: begin_slit_index = np.int64(0) end_slit_index = np.int64(len(self.field_angle_list) - 1) print("begin index", begin_slit_index, ", end index", end_slit_index) self.field_angle_range_index_list = [begin_slit_index, end_slit_index] - self.field_angle_range_list = self.field_angle_list[self.field_angle_range_index_list] + self.field_angle_range_list = self.field_angle_list[ + self.field_angle_range_index_list + ] else: assert len(self.field_angle_range) == 2 angle_index_list = [] for angle in self.field_angle_range: delta_angle_list = abs(self.field_angle_list - angle) angle_index = np.argmin(delta_angle_list) - if abs(self.field_angle_list[angle_index] - angle) < self.max_field_angle_list_delta: - #print(angle, angle_index, self.field_angle_list[angle_index]) + if ( + abs(self.field_angle_list[angle_index] - angle) + < self.max_field_angle_list_delta + ): + # print(angle, angle_index, self.field_angle_list[angle_index]) angle_index_list = np.append(angle_index_list, angle_index) print(angle_index_list) new_index_list = [*set(angle_index_list)] @@ -144,7 +154,7 @@ def __post_init__(self): if calc_half_fields_angles[1] == 0.0: end_slit_index = end_slit_index - 1 self.field_angle_range_index_list[1] = end_slit_index - self.field_angle_range_list[1] = self.field_angle_list[end_slit_index] + self.field_angle_range_list[1] = self.field_angle_list[end_slit_index] num_field_angles = (end_slit_index - begin_slit_index) + 1 # calc_num_slits = divmod(num_field_angles, self.solution_fov_width) @@ -155,27 +165,28 @@ def __post_init__(self): # self.num_slits += 1 calc_num_slits = divmod(num_field_angles, self.solution_fov_width) - self.num_slits = int(calc_num_slits[0]) + self.num_slits = int(calc_num_slits[0]) # Check if number of slits is even. calc_half_num_slits = divmod(self.num_slits, 2) if calc_half_num_slits[1] == 0.0: self.num_slits -= 1 - #self.num_slits = num_field_angles * self.solution_fov_width + # self.num_slits = num_field_angles * self.solution_fov_width assert self.num_slits >= 3 - #print("number slits =", self.num_slits) - #self.center_slit = divmod(num_field_angles, 2) + # print("number slits =", self.num_slits) + # self.center_slit = divmod(num_field_angles, 2) self.half_slits = divmod(self.num_slits, 2) # if self.half_slits[0] * self.solution_fov_width > self.center_slit[0]: # self.num_slits = self.num_slits - 2 # self.half_slits = divmod(self.num_slits, 2) self.half_fov = divmod(self.solution_fov_width, 2) - #assert self.half_fov[1] == 1 + # assert self.half_fov[1] == 1 - - #print("old center slit", self.center_slit) - #self.center_slit = self.center_slit + begin_slit_index - self.center_slit = divmod(end_slit_index - begin_slit_index, 2) + begin_slit_index + # print("old center slit", self.center_slit) + # self.center_slit = self.center_slit + begin_slit_index + self.center_slit = ( + divmod(end_slit_index - begin_slit_index, 2) + begin_slit_index + ) print("center slit", self.center_slit, self.num_slits, self.half_slits) # Check if even FOV. @@ -184,73 +195,159 @@ def __post_init__(self): # - (self.half_slits[0] * self.solution_fov_width) # else: # begin_slit_index = self.center_slit[0] - self.half_fov[0] - (self.half_slits[0] * self.solution_fov_width) - begin_slit_index = self.center_slit[0] - self.half_fov[0] - (self.half_slits[0] * self.solution_fov_width) - end_slit_index = self.center_slit[0] + self.half_fov[0] + (self.half_slits[0] * self.solution_fov_width) + begin_slit_index = ( + self.center_slit[0] + - self.half_fov[0] + - (self.half_slits[0] * self.solution_fov_width) + ) + end_slit_index = ( + self.center_slit[0] + + self.half_fov[0] + + (self.half_slits[0] * self.solution_fov_width) + ) # assert begin_slit_index >= 0 and end_slit_index <= (max_num_field_angles - 1) - print("begin_slit_index =", begin_slit_index, "end_slit_index =", end_slit_index) - #print(self.center_slit, (self.half_slits[0], self.solution_fov_width)) - #begin_slit_index = self.center_slit - (self.half_slits[0] * self.solution_fov_width) - #end_slit_ + print( + "begin_slit_index =", begin_slit_index, "end_slit_index =", end_slit_index + ) + # print(self.center_slit, (self.half_slits[0], self.solution_fov_width)) + # begin_slit_index = self.center_slit - (self.half_slits[0] * self.solution_fov_width) + # end_slit_ index = self.center_slit + (self.half_slits[0] * self.solution_fov_width) - #print(begin_slit_index, end_slit_index) + # print(begin_slit_index, end_slit_index) num_field_angles = (end_slit_index - begin_slit_index) + 1 self.field_angle_range_index_list = [begin_slit_index, end_slit_index] - self.field_angle_range_list = self.field_angle_list[self.field_angle_range_index_list] + self.field_angle_range_list = self.field_angle_list[ + self.field_angle_range_index_list + ] self.num_field_angles = num_field_angles response_count = 0 - self.response_function = np.zeros((self.num_deps * self.num_slits, self.rsp_func_width), dtype=np.float32) + self.response_function = np.zeros( + (self.num_deps * self.num_slits, self.rsp_func_width), dtype=np.float32 + ) for index in self.dep_index_list: - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": # Smooth over dependence. slit_count = 0 - for slit_num in range(self.center_slit[0] - (self.half_slits[0] * self.solution_fov_width), self.center_slit[0] + ((self.half_slits[0] * self.solution_fov_width) + 1), self.solution_fov_width): - #for slit_num in range(begin_slit_index, (end_slit_index + 1), self.solution_fov_width): + for slit_num in range( + self.center_slit[0] + - (self.half_slits[0] * self.solution_fov_width), + self.center_slit[0] + + ((self.half_slits[0] * self.solution_fov_width) + 1), + self.solution_fov_width, + ): + # for slit_num in range(begin_slit_index, (end_slit_index + 1), self.solution_fov_width): if self.solution_fov_width == 1: - self.response_function[(self.num_deps * slit_count) + response_count, :] = rsp_func_cube[index, slit_num, :] + self.response_function[ + (self.num_deps * slit_count) + response_count, : + ] = rsp_func_cube[index, slit_num, :] else: # Check if even FOV. if self.half_fov[1] == 0: - self.response_function[(self.num_deps * slit_count) + response_count, :] = rsp_func_cube[index, slit_num - (self.half_fov[0] - 1):slit_num + (self.half_fov[0] - 1) + 1, :].sum(axis=0) + (rsp_func_cube[index, slit_num - self.half_fov[0], :] * 0.5) + (rsp_func_cube[index, slit_num + self.half_fov[0], :] * 0.5) + self.response_function[ + (self.num_deps * slit_count) + response_count, : + ] = ( + rsp_func_cube[ + index, + slit_num + - (self.half_fov[0] - 1) : slit_num + + (self.half_fov[0] - 1) + + 1, + :, + ].sum(axis=0) + + ( + rsp_func_cube[index, slit_num - self.half_fov[0], :] + * 0.5 + ) + + ( + rsp_func_cube[index, slit_num + self.half_fov[0], :] + * 0.5 + ) + ) else: - self.response_function[(self.num_deps * slit_count) + response_count, :] = rsp_func_cube[index, slit_num - self.half_fov[0]:slit_num + self.half_fov[0] + 1, :].sum(axis=0) - #self.response_function[(self.num_deps * slit_count) + response_count, :] = rsp_func_cube[index, slit_num:slit_num + self.solution_fov_width, :].sum(axis=0) - #print(slit_num - self.half_fov[0], slit_num + self.half_fov[0] + 1) + self.response_function[ + (self.num_deps * slit_count) + response_count, : + ] = rsp_func_cube[ + index, + slit_num + - self.half_fov[0] : slit_num + + self.half_fov[0] + + 1, + :, + ].sum( + axis=0 + ) slit_count += 1 response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. - for slit_num in range(int(self.center_slit[0] - (self.half_slits[0] * self.solution_fov_width)), int(self.center_slit[0] + ((self.half_slits[0] * self.solution_fov_width) + 1)), int(self.solution_fov_width)): - #for slit_num in range(begin_slit_index, (end_slit_index + 1), self.solution_fov_width): - #print(slit_num) + for slit_num in range( + int( + self.center_slit[0] + - (self.half_slits[0] * self.solution_fov_width) + ), + int( + self.center_slit[0] + + ((self.half_slits[0] * self.solution_fov_width) + 1) + ), + int(self.solution_fov_width), + ): + # for slit_num in range(begin_slit_index, (end_slit_index + 1), self.solution_fov_width): + # print(slit_num) if self.solution_fov_width == 1: - self.response_function[response_count, :] = rsp_func_cube[index, slit_num, :] + self.response_function[response_count, :] = rsp_func_cube[ + index, slit_num, : + ] else: # Check if even FOV. if self.half_fov[1] == 0: - self.response_function[response_count, :] = rsp_func_cube[index, slit_num - (self.half_fov[0] - 1):slit_num + (self.half_fov[1] - 1) + 1, :].sum(axis=0) + (rsp_func_cube[index, slit_num - self.half_fov[0], :] * 0.5) + (rsp_func_cube[index, slit_num + self.half_fov[0], :] * 0.5) + self.response_function[response_count, :] = ( + rsp_func_cube[ + index, + slit_num + - (self.half_fov[0] - 1) : slit_num + + (self.half_fov[1] - 1) + + 1, + :, + ].sum(axis=0) + + ( + rsp_func_cube[index, slit_num - self.half_fov[0], :] + * 0.5 + ) + + ( + rsp_func_cube[index, slit_num + self.half_fov[0], :] + * 0.5 + ) + ) else: - self.response_function[response_count, :] = rsp_func_cube[index, slit_num - self.half_fov[0]:slit_num + self.half_fov[0] + 1, :].sum(axis=0) - #self.response_function[response_count, :] = rsp_func_cube[index, slit_num:slit_num + self.solution_fov_width, :].sum(axis=0) - #print("slit_num - self.half_fov[0], slit_num + self.half_fov[0] + 1) + self.response_function[response_count, :] = rsp_func_cube[ + index, + slit_num + - self.half_fov[0] : slit_num + + self.half_fov[0] + + 1, + :, + ].sum(axis=0) response_count += 1 - #print("response count =", response_count) + # print("response count =", response_count) self.response_function = self.response_function.transpose() print("response shape", np.shape(self.response_function)) - if self.rsp_dep_name == 'logt': - self.rsp_dep_desc_fmt = '1E' + if self.rsp_dep_name == "logt": + self.rsp_dep_desc_fmt = "1E" else: max_dep_len = len(max(self.rsp_dep_list, key=len)) - self.rsp_dep_desc_fmt = str(max_dep_len) + 'A' + self.rsp_dep_desc_fmt = str(max_dep_len) + "A" def get_response_function(self): return self.response_function - def initialize_input_data(self, input_image: str, image_mask: str = None, sample_weights_data: str = None): - ''' + def initialize_input_data( + self, input_image: str, image_mask: str = None, sample_weights_data: str = None + ): + """ Initialize input image and optional mask. Parameters @@ -264,25 +361,25 @@ def initialize_input_data(self, input_image: str, image_mask: str = None, sample ------- None. - ''' + """ # Read image image_hdul = fits.open(input_image) image = image_hdul[0].data image_height, image_width = np.shape(image) print(image_height, image_width) # Verify image width equals the response function width in cube. - #assert image_width == self.rsp_func_width + # assert image_width == self.rsp_func_width self.image = image try: - image_exposure_time = image_hdul[0].header['IMG_EXP'] + image_exposure_time = image_hdul[0].header["IMG_EXP"] except KeyError: image_exposure_time = 1.0 self.image /= image_exposure_time self.image[np.where(self.image < 0.0)] = 0.0 self.image_hdul = image_hdul - #print("image (h, w) =", image_height, image_width) + # print("image (h, w) =", image_height, image_width) self.image_width = image_width self.image_height = image_height self.input_image = os.path.basename(input_image) @@ -295,29 +392,34 @@ def initialize_input_data(self, input_image: str, image_mask: str = None, sample if len(np.where(self.image_mask == 0)) == 0: self.image_mask = None else: - #self.image_mask = np.ones((image_height, image_width), dtype=np.float32) + # self.image_mask = np.ones((image_height, image_width), dtype=np.float32) self.image_mask = None if self.image_mask is not None: self.image_mask_filename = os.path.basename(image_mask) else: - self.image_mask_filename = '' + self.image_mask_filename = "" if sample_weights_data is not None: print("sample", sample_weights_data) sample_weights_hdul = fits.open(sample_weights_data) - sample_weights_height, sample_weights_width = np.shape(sample_weights_hdul[0].data) - assert image_height == sample_weights_height and image_width == sample_weights_width + sample_weights_height, sample_weights_width = np.shape( + sample_weights_hdul[0].data + ) self.sample_weights = sample_weights_hdul[0].data else: self.sample_weights = None - def invert(self, model, output_dir: str, - output_file_prefix: str = '', - output_file_postfix: str = '', - level: str = '2.0', - detector_row_range: tp.Union[list, None] = None, - score = False): - ''' + def invert( + self, + model, + output_dir: str, + output_file_prefix: str = "", + output_file_postfix: str = "", + level: str = "2.0", + detector_row_range: tp.Union[list, None] = None, + score=False, + ): + """ Invert image. Parameters @@ -341,9 +443,8 @@ def invert(self, model, output_dir: str, ------- None. - ''' + """ # Verify input data has been initialized. - assert self.image_width != 0 and self.image_height != 0 if detector_row_range is not None: # assert len(detector_row_range) == 2 # assert detector_row_range[1] >= detector_row_range[0] @@ -353,51 +454,67 @@ def invert(self, model, output_dir: str, else: self.detector_row_min = 0 self.detector_row_max = self.image_height - 1 - em_data_cube = np.zeros((self.image_height, self.num_slits, self.num_deps), dtype=np.float32) - inverted_data = np.zeros((self.image_height, self.image_width), dtype=np.float32) + em_data_cube = np.zeros( + (self.image_height, self.num_slits, self.num_deps), dtype=np.float32 + ) + inverted_data = np.zeros( + (self.image_height, self.image_width), dtype=np.float32 + ) if score: score_data = np.zeros((self.image_height, 1), dtype=np.float32) num_nonconvergences = 0 if detector_row_range is None: image_row_number_range = range(self.image_height) else: - image_row_number_range = range(detector_row_range[0], detector_row_range[1] + 1) + image_row_number_range = range( + detector_row_range[0], detector_row_range[1] + 1 + ) for image_row_number in image_row_number_range: - #if (image_row_number % 10 == 0): - if (image_row_number % 1 == 0): + # if (image_row_number % 10 == 0): + if image_row_number % 1 == 0: print("image row number =", image_row_number) - #print(image_row_number) - image_row = self.image[image_row_number,:] + # print(image_row_number) + image_row = self.image[image_row_number, :] masked_rsp_func = self.response_function if self.image_mask is not None: - mask_row = self.image_mask[image_row_number,:] + mask_row = self.image_mask[image_row_number, :] mask_pixels = np.where(mask_row == 0) if len(mask_pixels) > 0: image_row[mask_pixels] = 0.0 - #image_row[mask_pixels] = 1e-26 + # image_row[mask_pixels] = 1e-26 masked_rsp_func = self.response_function.copy() masked_rsp_func[mask_pixels, :] = 0.0 - #masked_rsp_func[mask_pixels, :] = 1e-26 + # masked_rsp_func[mask_pixels, :] = 1e-26 if self.sample_weights is not None: - sample_weights_row = self.sample_weights[image_row_number,:] + sample_weights_row = self.sample_weights[image_row_number, :] else: sample_weights_row = None with warnings.catch_warnings(): - warnings.filterwarnings("error", category=ConvergenceWarning, module="sklearn") + warnings.filterwarnings( + "error", category=ConvergenceWarning, module="sklearn" + ) try: - em, data_out = model.invert(masked_rsp_func, image_row, sample_weights_row) + em, data_out = model.invert( + masked_rsp_func, image_row, sample_weights_row + ) for slit_num in range(self.num_slits): - if self.smooth_over == 'dependence': - slit_em = em[slit_num * self.num_deps:(slit_num + 1) * self.num_deps] + if self.smooth_over == "dependence": + slit_em = em[ + slit_num + * self.num_deps : (slit_num + 1) + * self.num_deps + ] else: - slit_em = em[slit_num::self.num_slits] + slit_em = em[slit_num :: self.num_slits] em_data_cube[image_row_number, slit_num, :] = slit_em inverted_data[image_row_number, :] = data_out if score: - score_data[image_row_number, :] = model.get_score(masked_rsp_func, image_row) - #print("Row", image_row_number, "converged.") + score_data[image_row_number, :] = model.get_score( + masked_rsp_func, image_row + ) + # print("Row", image_row_number, "converged.") except Exception: num_nonconvergences += 1 print("Row", image_row_number, "did not converge!") @@ -409,41 +526,43 @@ def invert(self, model, output_dir: str, # Save EM data cube. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'em_data_cube' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "em_data_cube" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix - em_data_cube_file = output_dir + base_filename + '.fits' + em_data_cube_file = output_dir + base_filename + ".fits" # Transpose data (wavelength, y, x). Readable by ImageJ. em_data_cube = np.transpose(em_data_cube, axes=(2, 0, 1)) em_data_cube_header = self.image_hdul[0].header.copy() - em_data_cube_header['LEVEL'] = (level, 'Level') - em_data_cube_header['UNITS'] = ('1e26 cm-5', 'Units') + em_data_cube_header["LEVEL"] = (level, "Level") + em_data_cube_header["UNITS"] = ("1e26 cm-5", "Units") self.__add_fits_keywords(em_data_cube_header) model.add_fits_keywords(em_data_cube_header) - hdu = fits.PrimaryHDU(data = em_data_cube, header = em_data_cube_header) + hdu = fits.PrimaryHDU(data=em_data_cube, header=em_data_cube_header) # Add binary table. - col1 = fits.Column(name='index', format='1I', array=self.dep_index_list) - col2 = fits.Column(name=self.rsp_dep_name, format=self.rsp_dep_desc_fmt, array=self.dep_list) + col1 = fits.Column(name="index", format="1I", array=self.dep_index_list) + col2 = fits.Column( + name=self.rsp_dep_name, format=self.rsp_dep_desc_fmt, array=self.dep_list + ) table_hdu = fits.BinTableHDU.from_columns([col1, col2]) hdulist = fits.HDUList([hdu, table_hdu]) hdulist.writeto(em_data_cube_file, overwrite=True) # Save model predicted data. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'model_predicted_data' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "model_predicted_data" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix data_file = output_dir + base_filename + ".fits" model_predicted_data_hdul = self.image_hdul.copy() model_predicted_data_hdul[0].data = inverted_data - model_predicted_data_hdul[0].header['LEVEL'] = (level, 'Level') - model_predicted_data_hdul[0].header['UNITS'] = 'Electron s-1' + model_predicted_data_hdul[0].header["LEVEL"] = (level, "Level") + model_predicted_data_hdul[0].header["UNITS"] = "Electron s-1" self.__add_fits_keywords(model_predicted_data_hdul[0].header) model.add_fits_keywords(model_predicted_data_hdul[0].header) model_predicted_data_hdul.writeto(data_file, overwrite=True) @@ -451,37 +570,43 @@ def invert(self, model, output_dir: str, if score: # Save score. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'model_score_data' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "model_score_data" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix data_file = output_dir + base_filename + ".fits" - hdu = fits.PrimaryHDU(data = score_data) + hdu = fits.PrimaryHDU(data=score_data) hdulist = fits.HDUList([hdu]) hdulist.writeto(data_file, overwrite=True) - def multiprocessing_invert_image_row(self, image_row_number: np.int32, chunk_index: int, score = False): + def multiprocessing_invert_image_row( + self, image_row_number: np.int32, chunk_index: int, score=False + ): model = self.models[chunk_index] print(f"Inverting image row {image_row_number:>4}", end="\r") - image_row = self.image[image_row_number,:] + image_row = self.image[image_row_number, :] masked_rsp_func = self.response_function if self.image_mask is not None: - mask_row = self.image_mask[image_row_number,:] + mask_row = self.image_mask[image_row_number, :] mask_pixels = np.where(mask_row == 0) if len(mask_pixels) > 0: image_row[mask_pixels] = 0 masked_rsp_func = self.response_function.copy() masked_rsp_func[mask_pixels, :] = 0.0 if self.sample_weights is not None: - sample_weights_row = self.sample_weights[image_row_number,:] + sample_weights_row = self.sample_weights[image_row_number, :] else: sample_weights_row = None with warnings.catch_warnings(): - warnings.filterwarnings("error", category=ConvergenceWarning, module="sklearn") + warnings.filterwarnings( + "error", category=ConvergenceWarning, module="sklearn" + ) try: - em, data_out = model.invert(masked_rsp_func, image_row, sample_weights_row) + em, data_out = model.invert( + masked_rsp_func, image_row, sample_weights_row + ) except Exception: print("Row", image_row_number, "did not converge!") em = np.zeros((self.num_slits * self.num_deps), dtype=np.float32) @@ -490,17 +615,23 @@ def multiprocessing_invert_image_row(self, image_row_number: np.int32, chunk_ind if score: score_data = model.get_score(masked_rsp_func, image_row) return [image_row_number, em, data_out, score_data] - else: + else: # noqa: RET505 return [image_row_number, em, data_out] - def multiprocessing_invert(self, model_config, alpha, rho, output_dir: str, - output_file_prefix: str = '', - output_file_postfix: str = '', - level: str = '2.0', - num_threads: int = 1, - detector_row_range: tp.Union[list, None] = None, - score = False): - ''' + def multiprocessing_invert( + self, + model_config, + alpha, + rho, + output_dir: str, + output_file_prefix: str = "", + output_file_postfix: str = "", + level: str = "2.0", + num_threads: int = 1, + detector_row_range: tp.Union[list, None] = None, + score=False, + ): + """ Invert image. Parameters @@ -524,11 +655,15 @@ def multiprocessing_invert(self, model_config, alpha, rho, output_dir: str, ------- None. - ''' + """ # Verify input data has been initialized. # assert self.image_width != 0 and self.image_height != 0 - self.mp_em_data_cube = np.zeros((self.image_height, self.num_slits, self.num_deps), dtype=np.float32) - self.mp_inverted_data = np.zeros((self.image_height, self.image_width), dtype=np.float32) + self.mp_em_data_cube = np.zeros( + (self.image_height, self.num_slits, self.num_deps), dtype=np.float32 + ) + self.mp_inverted_data = np.zeros( + (self.image_height, self.image_width), dtype=np.float32 + ) if score: self.mp_score_data = np.zeros((self.image_height, 1), dtype=np.float32) self.mp_model = model @@ -540,53 +675,66 @@ def multiprocessing_invert(self, model_config, alpha, rho, output_dir: str, self.detector_row_min = 0 self.detector_row_max = self.image_height - 1 - - np.arange(self.detector_row_min, - self.detector_row_max, - (self.detector_row_max - self.detector_row_min) / num_threads) - starts = np.arange(self.detector_row_min, - self.detector_row_max, - (self.detector_row_max - self.detector_row_min)/num_threads).astype(int) + np.arange( + self.detector_row_min, + self.detector_row_max, + (self.detector_row_max - self.detector_row_min) / num_threads, + ) + starts = np.arange( + self.detector_row_min, + self.detector_row_max, + (self.detector_row_max - self.detector_row_min) / num_threads, + ).astype(int) ends = np.append(starts[1:], self.detector_row_max) futures = [] executors = [] self.models = [] for chunk_index, (start, end) in enumerate(zip(starts, ends)): - executors.append(concurrent.futures.ThreadPoolExecutor(max_workers=1)) - enet_model = ElasticNet(alpha=alpha, - l1_ratio=rho, - tol=model_config['tol'], - max_iter=model_config['max_iter'], - precompute=False, # setting this to true slows down performance dramatically - positive=True, - copy_X=False, - fit_intercept=False, - selection=model_config['selection'], - warm_start=model_config['warm_start']) - self.models.append(model(enet_model)) - - futures.extend([executors[-1].submit(self.multiprocessing_invert_image_row, row, chunk_index, score) - for row in range(start, end + 1)]) + executors.append(concurrent.futures.ThreadPoolExecutor(max_workers=1)) + enet_model = ElasticNet( + alpha=alpha, + l1_ratio=rho, + tol=model_config["tol"], + max_iter=model_config["max_iter"], + precompute=False, # setting this to true slows down performance dramatically + positive=True, + copy_X=False, + fit_intercept=False, + selection=model_config["selection"], + warm_start=model_config["warm_start"], + ) + self.models.append(model(enet_model)) + + futures.extend( + [ + executors[-1].submit( + self.multiprocessing_invert_image_row, row, chunk_index, score + ) + for row in range(start, end + 1) + ] + ) # Wait for all tasks to complete and retrieve the results for future in concurrent.futures.as_completed(futures): result = future.result() - #print(result) + # print(result) for slit_num in range(self.num_slits): - if self.smooth_over == 'dependence': - slit_em = result[1][slit_num * self.num_deps:(slit_num + 1) * self.num_deps] + if self.smooth_over == "dependence": + slit_em = result[1][ + slit_num * self.num_deps : (slit_num + 1) * self.num_deps + ] else: - slit_em = result[1][slit_num::self.num_slits] + slit_em = result[1][slit_num :: self.num_slits] self.mp_em_data_cube[result[0], slit_num, :] = slit_em self.mp_inverted_data[result[0], :] = result[2] if score: self.mp_score_data[result[0]] = result[3] - #print("before shutdown") + # print("before shutdown") for executor in executors: executor.shutdown() - #print("after shutdown") + # print("after shutdown") print("Finished with tasks") @@ -595,42 +743,44 @@ def multiprocessing_invert(self, model_config, alpha, rho, output_dir: str, # Save EM data cube. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'em_data_cube' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "em_data_cube" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix - em_data_cube_file = output_dir + base_filename + '.fits' + em_data_cube_file = output_dir + base_filename + ".fits" # Transpose data (wavelength, y, x). Readable by ImageJ. em_data_cube = np.transpose(self.mp_em_data_cube, axes=(2, 0, 1)) em_data_cube_header = self.image_hdul[0].header.copy() - em_data_cube_header['LEVEL'] = (level, 'Level') - em_data_cube_header['UNITS'] = ('1e26 cm-5', 'Units') + em_data_cube_header["LEVEL"] = (level, "Level") + em_data_cube_header["UNITS"] = ("1e26 cm-5", "Units") self.__add_fits_keywords(em_data_cube_header) self.models[-1].add_fits_keywords(em_data_cube_header) - hdu = fits.PrimaryHDU(data = em_data_cube, header = em_data_cube_header) + hdu = fits.PrimaryHDU(data=em_data_cube, header=em_data_cube_header) # Add binary table. - col1 = fits.Column(name='index', format='1I', array=self.dep_index_list) - col2 = fits.Column(name=self.rsp_dep_name, format=self.rsp_dep_desc_fmt, array=self.dep_list) + col1 = fits.Column(name="index", format="1I", array=self.dep_index_list) + col2 = fits.Column( + name=self.rsp_dep_name, format=self.rsp_dep_desc_fmt, array=self.dep_list + ) table_hdu = fits.BinTableHDU.from_columns([col1, col2]) hdulist = fits.HDUList([hdu, table_hdu]) hdulist.writeto(em_data_cube_file, overwrite=True) # Save model predicted data. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'model_predicted_data' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "model_predicted_data" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix data_file = output_dir + base_filename + ".fits" - #print("data file", data_file) + # print("data file", data_file) model_predicted_data_hdul = self.image_hdul.copy() model_predicted_data_hdul[0].data = self.mp_inverted_data - model_predicted_data_hdul[0].header['LEVEL'] = (level, 'Level') - model_predicted_data_hdul[0].header['UNITS'] = 'Electron s-1' + model_predicted_data_hdul[0].header["LEVEL"] = (level, "Level") + model_predicted_data_hdul[0].header["UNITS"] = "Electron s-1" self.__add_fits_keywords(model_predicted_data_hdul[0].header) self.models[-1].add_fits_keywords(model_predicted_data_hdul[0].header) model_predicted_data_hdul.writeto(data_file, overwrite=True) @@ -638,22 +788,22 @@ def multiprocessing_invert(self, model_config, alpha, rho, output_dir: str, if score: # Save score. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'model_score_data' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "model_score_data" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix score_data_file = output_dir + base_filename + ".fits" - #print("score", data_file) - hdu = fits.PrimaryHDU(data = self.mp_score_data) + # print("score", data_file) + hdu = fits.PrimaryHDU(data=self.mp_score_data) hdulist = fits.HDUList([hdu]) hdulist.writeto(score_data_file, overwrite=True) return em_data_cube_file def __add_fits_keywords(self, header): - ''' + """ Add FITS keywords to FITS header. Parameters @@ -665,29 +815,43 @@ def __add_fits_keywords(self, header): ------- None. - ''' - header['INV_DATE'] = (self.inv_date, 'Inversion Date') - header['RSPFUNC'] = (self.rsp_func_date, 'Response Functions Filename') - header['RSP_DATE'] = (self.rsp_func_cube_filename, 'Response Functions Creation Date') - header['ABUNDANC'] = (self.abundance, 'Abundance') - header['ELECDIST'] = (self.electron_distribution, 'Electron Distribution') - header['CHIANT_V'] = (self.chianti_version, 'Chianti Version') - header['INVIMG'] = (self.input_image, 'Inversion Image Filename') - header['INVMASK'] = (self.image_mask_filename, 'Inversion Mask Filename') - header['SLTNFOV'] = (self.solution_fov_width, 'Solution FOV Width') - header['DEPNAME'] = (self.rsp_dep_name, 'Dependence Name') - header['SMTHOVER'] = (self.smooth_over, 'Smooth Over') - header['LOGT_MIN'] = (f"{self.dep_list[0]:.2f}", 'Minimum Logt') - header['LOGT_DLT'] = (f"{self.max_dep_list_delta:.2f}", 'Delta Logt') - header['LOGT_NUM'] = (len(self.dep_list), 'Number Logts') - header['FA_MIN'] = (f"{self.field_angle_range_list[0]:.3f}", 'Minimum Field Angle') - header['FA_DLT'] = (f"{self.max_field_angle_list_delta:.3f}", 'Delta Field Angle') - header['FA_NUM'] = (self.num_field_angles, 'Number Field Angles') - header['FA_CDELT'] = (f"{self.solution_fov_width * self.max_field_angle_list_delta:.3f}", 'Field Angle CDELT') - header['DROW_MIN'] = (self.detector_row_min, 'Minimum Detector Row') - header['DROW_MAX'] = (self.detector_row_max, 'Maximum Detector Row') - - def create_dependence_images(self, em_data_cube_file: str, output_dir: str, image_mask_file: str = None): + """ + header["INV_DATE"] = (self.inv_date, "Inversion Date") + header["RSPFUNC"] = (self.rsp_func_date, "Response Functions Filename") + header["RSP_DATE"] = ( + self.rsp_func_cube_filename, + "Response Functions Creation Date", + ) + header["ABUNDANC"] = (self.abundance, "Abundance") + header["ELECDIST"] = (self.electron_distribution, "Electron Distribution") + header["CHIANT_V"] = (self.chianti_version, "Chianti Version") + header["INVIMG"] = (self.input_image, "Inversion Image Filename") + header["INVMASK"] = (self.image_mask_filename, "Inversion Mask Filename") + header["SLTNFOV"] = (self.solution_fov_width, "Solution FOV Width") + header["DEPNAME"] = (self.rsp_dep_name, "Dependence Name") + header["SMTHOVER"] = (self.smooth_over, "Smooth Over") + header["LOGT_MIN"] = (f"{self.dep_list[0]:.2f}", "Minimum Logt") + header["LOGT_DLT"] = (f"{self.max_dep_list_delta:.2f}", "Delta Logt") + header["LOGT_NUM"] = (len(self.dep_list), "Number Logts") + header["FA_MIN"] = ( + f"{self.field_angle_range_list[0]:.3f}", + "Minimum Field Angle", + ) + header["FA_DLT"] = ( + f"{self.max_field_angle_list_delta:.3f}", + "Delta Field Angle", + ) + header["FA_NUM"] = (self.num_field_angles, "Number Field Angles") + header["FA_CDELT"] = ( + f"{self.solution_fov_width * self.max_field_angle_list_delta:.3f}", + "Field Angle CDELT", + ) + header["DROW_MIN"] = (self.detector_row_min, "Minimum Detector Row") + header["DROW_MAX"] = (self.detector_row_max, "Maximum Detector Row") + + def create_dependence_images( + self, em_data_cube_file: str, output_dir: str, image_mask_file: str = None + ): assert len(self.dep_list) >= 2 # Read EM data cube em_data_cube_hdul = fits.open(em_data_cube_file) @@ -704,56 +868,79 @@ def create_dependence_images(self, em_data_cube_file: str, output_dir: str, imag image_mask = None dep_image = np.zeros((self.image_height, self.image_width), dtype=np.float32) - dep_image_cube = np.zeros((len(self.dep_list), self.image_height, self.image_width), dtype=np.float32) + dep_image_cube = np.zeros( + (len(self.dep_list), self.image_height, self.image_width), dtype=np.float32 + ) if image_mask is None: - dep_image = np.zeros((self.image_height, self.image_width), dtype=np.float32) + dep_image = np.zeros( + (self.image_height, self.image_width), dtype=np.float32 + ) response_count = 0 for index, dep in zip(range(len(self.dep_list)), self.dep_list): - dep_image[:,:] = 0.0 - if self.smooth_over == 'dependence': + dep_image[:, :] = 0.0 + if self.smooth_over == "dependence": # Smooth over dependence. slit_count = 0 for slit_num in range(self.num_slits): - dep_image += np.dot(em_data_cube[index, :, slit_num][:, None], self.response_function[:, (self.num_deps * slit_count) + response_count][None, :]) + dep_image += np.dot( + em_data_cube[index, :, slit_num][:, None], + self.response_function[ + :, (self.num_deps * slit_count) + response_count + ][None, :], + ) slit_count += 1 response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for slit_num in range(self.num_slits): - dep_image += np.dot(em_data_cube[index, :, slit_num][:, None], self.response_function[:, response_count][None, :]) + dep_image += np.dot( + em_data_cube[index, :, slit_num][:, None], + self.response_function[:, response_count][None, :], + ) response_count += 1 dep_image_cube[index, :, :] = dep_image else: dep_image = np.zeros(self.image_width, dtype=np.float32) for image_row_number in range(self.image_height): - #print("1", image_row_number) - mask_row = self.image_mask[image_row_number,:] + # print("1", image_row_number) + mask_row = self.image_mask[image_row_number, :] mask_pixels = np.where(mask_row == 0) masked_rsp_func = self.response_function.copy() masked_rsp_func[mask_pixels, :] = 0.0 response_count = 0 for index, dep in zip(range(len(self.dep_list)), self.dep_list): - #print("2", index, dep) + # print("2", index, dep) dep_image[:] = 0.0 - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": # Smooth over dependence. slit_count = 0 for slit_num in range(self.num_slits): - dep_image[:] += np.dot(em_data_cube[index, image_row_number, slit_num], masked_rsp_func[:, (self.num_deps * slit_count) + response_count]) + dep_image[:] += np.dot( + em_data_cube[index, image_row_number, slit_num], + masked_rsp_func[ + :, (self.num_deps * slit_count) + response_count + ], + ) slit_count += 1 response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for slit_num in range(self.num_slits): - #print("3", slit_num) - dep_image[:] += np.dot(em_data_cube[index, image_row_number, slit_num], masked_rsp_func[:, response_count]) + # print("3", slit_num) + dep_image[:] += np.dot( + em_data_cube[index, image_row_number, slit_num], + masked_rsp_func[:, response_count], + ) response_count += 1 dep_image_cube[index, image_row_number, :] = dep_image - dep_image_filename = output_dir + f"dep_image_cube_{self.rsp_dep_name}_{self.dep_list[0]:.2}_{self.dep_list[len(self.dep_list)-1]:.2}.fits" + dep_image_filename = ( + output_dir + + f"dep_image_cube_{self.rsp_dep_name}_{self.dep_list[0]:.2}_{self.dep_list[len(self.dep_list)-1]:.2}.fits" + ) em_data_cube_hdul[0].data = dep_image_cube em_data_cube_hdul.writeto(dep_image_filename, overwrite=True) diff --git a/overlappogram/inversion_field_angles_logts_ions.py b/overlappogram/inversion_field_angles_logts_ions.py index db0519c..a5dadbb 100644 --- a/overlappogram/inversion_field_angles_logts_ions.py +++ b/overlappogram/inversion_field_angles_logts_ions.py @@ -12,7 +12,7 @@ @dataclass(order=True) class Inversion: - ''' + """ Inversion for overlap-a-gram data. Attributes @@ -32,12 +32,14 @@ class Inversion: ------- None. - ''' + """ + rsp_func_cube_file: str rsp_func_selection: tuple solution_fov_width: np.int32 = 1 - smooth_over: str = 'spatial' + smooth_over: str = "spatial" field_angle_range: list = None + def __post_init__(self): self.image_height = 0 self.image_width = 0 @@ -45,57 +47,65 @@ def __post_init__(self): # Open response function cube file. rsp_func_hdul = fits.open(self.rsp_func_cube_file) rsp_func_cube = rsp_func_hdul[0].data - self.num_ions, self.num_logts, num_field_angles, rsp_func_width = np.shape(rsp_func_cube) - #print(self.num_ions, self.num_logts, num_field_angles, rsp_func_width) + self.num_ions, self.num_logts, num_field_angles, rsp_func_width = np.shape( + rsp_func_cube + ) + # print(self.num_ions, self.num_logts, num_field_angles, rsp_func_width) self.rsp_func_cube_filename = os.path.basename(self.rsp_func_cube_file) - self.inv_date = datetime.datetime.now().isoformat(timespec='milliseconds').replace("+00:00", "Z") + self.inv_date = ( + datetime.datetime.now() + .isoformat(timespec="milliseconds") + .replace("+00:00", "Z") + ) try: - self.rsp_func_date = rsp_func_hdul[0].header['DATE'] + self.rsp_func_date = rsp_func_hdul[0].header["DATE"] except KeyError: - self.rsp_func_date = '' + self.rsp_func_date = "" try: - self.abundance = rsp_func_hdul[0].header['ABUNDANC'] + self.abundance = rsp_func_hdul[0].header["ABUNDANC"] except KeyError: - self.abundance = '' + self.abundance = "" try: - self.electron_distribution = rsp_func_hdul[0].header['ELECDIST'] + self.electron_distribution = rsp_func_hdul[0].header["ELECDIST"] except KeyError: - self.electron_distribution = '' + self.electron_distribution = "" try: - self.chianti_version = rsp_func_hdul[0].header['CHIANT_V'] + self.chianti_version = rsp_func_hdul[0].header["CHIANT_V"] except KeyError: - self.chianti_version = '' + self.chianti_version = "" # Field Angles (field_angle) - self.field_angle_list = rsp_func_hdul[1].data['field_angle'] - #self.field_angle_list = rsp_func_hdul[2].data['field_angle'] + self.field_angle_list = rsp_func_hdul[1].data["field_angle"] + # self.field_angle_list = rsp_func_hdul[2].data['field_angle'] self.field_angle_list = np.round(self.field_angle_list, decimals=2) # Logt (logt) - logt_list = rsp_func_hdul[2].data['logt'] - #logt_list = rsp_func_hdul[1].data['logt'] + logt_list = rsp_func_hdul[2].data["logt"] + # logt_list = rsp_func_hdul[1].data['logt'] logt_list = np.round(logt_list, decimals=2) print(logt_list) - #Ion (ion) + # Ion (ion) print(rsp_func_hdul[3].header) print(rsp_func_hdul[3].data) - ion_list = rsp_func_hdul[3].data['ion'] - #ion_list = rsp_func_hdul[4].data['ION_PRESSURE'] #density - #ion_list = rsp_func_hdul[3].data['ionlist'] - #ion_list = np.round(ion_list, decimals=2) + ion_list = rsp_func_hdul[3].data["ion"] + # ion_list = rsp_func_hdul[4].data['ION_PRESSURE'] #density + # ion_list = rsp_func_hdul[3].data['ionlist'] + # ion_list = np.round(ion_list, decimals=2) print(ion_list) logt_ion_table = np.zeros((len(logt_list), len(ion_list)), dtype=np.uint8) for index in range(len(self.rsp_func_selection[0])): - ion_index, = np.where(ion_list == self.rsp_func_selection[0][index]) + (ion_index,) = np.where(ion_list == self.rsp_func_selection[0][index]) print(self.rsp_func_selection[0][index]) assert len(ion_index == 1) - logts = np.arange(self.rsp_func_selection[2][index], - self.rsp_func_selection[3][index] + self.rsp_func_selection[1][index], - self.rsp_func_selection[1][index]) + logts = np.arange( + self.rsp_func_selection[2][index], + self.rsp_func_selection[3][index] + self.rsp_func_selection[1][index], + self.rsp_func_selection[1][index], + ) logts = np.round(logts, decimals=2) for logt in logts: - logt_index, = np.where(np.isclose(logt_list, logt)) + (logt_index,) = np.where(np.isclose(logt_list, logt)) assert len(logt_index == 1) logt_ion_table[logt_index, ion_index[0]] = 1 @@ -104,14 +114,14 @@ def __post_init__(self): print(inv_selection, type(inv_selection)) self.num_selections = len(inv_selection[0]) self.inverted_selection = deepcopy(list(inv_selection)) - #print(self.inverted_selection, type(self.inverted_selection)) + # print(self.inverted_selection, type(self.inverted_selection)) inv_logt_index_list = [*set(inv_selection[0])] inv_logt_index_list = np.array(inv_logt_index_list, dtype=np.int32) inv_logt_index_list.sort() self.inv_logt_list = logt_list[inv_logt_index_list] print(self.inv_logt_list) for count, value in enumerate(inv_logt_index_list): - #print(count, value) + # print(count, value) self.inverted_selection[0][np.where(inv_selection[0] == value)] = count inv_ion_index_list = [*set(inv_selection[1])] inv_ion_index_list = np.array(inv_ion_index_list, dtype=np.int32) @@ -119,31 +129,35 @@ def __post_init__(self): self.inv_ion_list = ion_list[inv_ion_index_list] print(self.inv_ion_list) for count, value in enumerate(inv_ion_index_list): - #print(count, value) + # print(count, value) self.inverted_selection[1][np.where(inv_selection[1] == value)] = count - #print(inv_selection, type(inv_selection)) + # print(inv_selection, type(inv_selection)) print(self.inverted_selection, type(self.inverted_selection)) self.rsp_func_width = rsp_func_width - max_num_field_angles = num_field_angles field_angle_list_deltas = abs(np.diff(self.field_angle_list)) self.max_field_angle_list_delta = max(field_angle_list_deltas) - #print(self.max_field_angle_list_delta) + # print(self.max_field_angle_list_delta) if self.field_angle_range is None: begin_slit_index = np.int64(0) end_slit_index = np.int64(len(self.field_angle_list) - 1) print("begin index", begin_slit_index, ", end index", end_slit_index) self.field_angle_range_index_list = [begin_slit_index, end_slit_index] - self.field_angle_range_list = self.field_angle_list[self.field_angle_range_index_list] + self.field_angle_range_list = self.field_angle_list[ + self.field_angle_range_index_list + ] else: assert len(self.field_angle_range) == 2 angle_index_list = [] for angle in self.field_angle_range: delta_angle_list = abs(self.field_angle_list - angle) angle_index = np.argmin(delta_angle_list) - if abs(self.field_angle_list[angle_index] - angle) < self.max_field_angle_list_delta: - #print(angle, angle_index, self.field_angle_list[angle_index]) + if ( + abs(self.field_angle_list[angle_index] - angle) + < self.max_field_angle_list_delta + ): + # print(angle, angle_index, self.field_angle_list[angle_index]) angle_index_list = np.append(angle_index_list, angle_index) print(angle_index_list) new_index_list = [*set(angle_index_list)] @@ -161,85 +175,195 @@ def __post_init__(self): if calc_half_fields_angles[1] == 0.0: end_slit_index = end_slit_index - 1 self.field_angle_range_index_list[1] = end_slit_index - self.field_angle_range_list[1] = self.field_angle_list[end_slit_index] + self.field_angle_range_list[1] = self.field_angle_list[end_slit_index] num_field_angles = (end_slit_index - begin_slit_index) + 1 calc_num_slits = divmod(num_field_angles, self.solution_fov_width) - self.num_slits = int(calc_num_slits[0]) + self.num_slits = int(calc_num_slits[0]) # Check if number of slits is even. calc_half_num_slits = divmod(self.num_slits, 2) if calc_half_num_slits[1] == 0.0: self.num_slits -= 1 - #self.num_slits = num_field_angles * self.solution_fov_width + # self.num_slits = num_field_angles * self.solution_fov_width assert self.num_slits >= 3 - #print("number slits =", self.num_slits) - #self.center_slit = divmod(num_field_angles, 2) + # print("number slits =", self.num_slits) + # self.center_slit = divmod(num_field_angles, 2) self.half_slits = divmod(self.num_slits, 2) # if self.half_slits[0] * self.solution_fov_width > self.center_slit[0]: # self.num_slits = self.num_slits - 2 # self.half_slits = divmod(self.num_slits, 2) self.half_fov = divmod(self.solution_fov_width, 2) - #assert self.half_fov[1] == 1 - + # assert self.half_fov[1] == 1 - #print("old center slit", self.center_slit) - #self.center_slit = self.center_slit + begin_slit_index - self.center_slit = divmod(end_slit_index - begin_slit_index, 2) + begin_slit_index + # print("old center slit", self.center_slit) + # self.center_slit = self.center_slit + begin_slit_index + self.center_slit = ( + divmod(end_slit_index - begin_slit_index, 2) + begin_slit_index + ) print("center slit", self.center_slit, self.num_slits, self.half_slits) # Check if even FOV. # if self.half_fov[1] == 0: - # begin_slit_index = self.center_slit[0] - (self.half_fov[0] - 1) - (self.half_slits[0] * self.solution_fov_width) + # begin_slit_index = self.center_slit[0] - (self.half_fov[0] - 1) + # - (self.half_slits[0] * self.solution_fov_width) # else: # begin_slit_index = self.center_slit[0] - self.half_fov[0] - (self.half_slits[0] * self.solution_fov_width) - begin_slit_index = self.center_slit[0] - self.half_fov[0] - (self.half_slits[0] * self.solution_fov_width) - end_slit_index = self.center_slit[0] + self.half_fov[0] + (self.half_slits[0] * self.solution_fov_width) + begin_slit_index = ( + self.center_slit[0] + - self.half_fov[0] + - (self.half_slits[0] * self.solution_fov_width) + ) + end_slit_index = ( + self.center_slit[0] + + self.half_fov[0] + + (self.half_slits[0] * self.solution_fov_width) + ) # assert begin_slit_index >= 0 and end_slit_index <= (max_num_field_angles - 1) - print("begin_slit_index =", begin_slit_index, "end_slit_index =", end_slit_index) - #print(self.center_slit, (self.half_slits[0], self.solution_fov_width)) - #begin_slit_index = self.center_slit - (self.half_slits[0] * self.solution_fov_width) - #end_slit_ + print( + "begin_slit_index =", begin_slit_index, "end_slit_index =", end_slit_index + ) + # print(self.center_slit, (self.half_slits[0], self.solution_fov_width)) + # begin_slit_index = self.center_slit - (self.half_slits[0] * self.solution_fov_width) + # end_slit_ index = self.center_slit + (self.half_slits[0] * self.solution_fov_width) - #print(begin_slit_index, end_slit_index) + # print(begin_slit_index, end_slit_index) num_field_angles = (end_slit_index - begin_slit_index) + 1 self.field_angle_range_index_list = [begin_slit_index, end_slit_index] - self.field_angle_range_list = self.field_angle_list[self.field_angle_range_index_list] + self.field_angle_range_list = self.field_angle_list[ + self.field_angle_range_index_list + ] self.num_field_angles = num_field_angles response_count = 0 - self.response_function = np.zeros((self.num_selections * self.num_slits, self.rsp_func_width), dtype=np.float32) - if self.smooth_over == 'dependence': + self.response_function = np.zeros( + (self.num_selections * self.num_slits, self.rsp_func_width), + dtype=np.float32, + ) + if self.smooth_over == "dependence": # Smooth over dependence. - #for slit_num in range(self.num_slits): - for slit_num in range(self.center_slit[0] - (self.half_slits[0] * self.solution_fov_width), self.center_slit[0] + ((self.half_slits[0] * self.solution_fov_width) + 1), self.solution_fov_width): + # for slit_num in range(self.num_slits): + for slit_num in range( + self.center_slit[0] - (self.half_slits[0] * self.solution_fov_width), + self.center_slit[0] + + ((self.half_slits[0] * self.solution_fov_width) + 1), + self.solution_fov_width, + ): for index in range(self.num_selections): if self.solution_fov_width == 1: - self.response_function[response_count, :] = rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num, :] + self.response_function[response_count, :] = rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num, + :, + ] else: # Check if even FOV. if self.half_fov[1] == 0: - self.response_function[response_count, :] = rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num - (self.half_fov[0] - 1):slit_num + (self.half_fov[0] - 1) + 1, :].sum(axis=0) + (rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num - self.half_fov[0], :] * 0.5) + (rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num + self.half_fov[0], :] * 0.5) + self.response_function[response_count, :] = ( + rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num + - (self.half_fov[0] - 1) : slit_num + + (self.half_fov[0] - 1) + + 1, + :, + ].sum(axis=0) + + ( + rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num - self.half_fov[0], + :, + ] + * 0.5 + ) + + ( + rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num + self.half_fov[0], + :, + ] + * 0.5 + ) + ) else: - self.response_function[response_count, :] = rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num - self.half_fov[0]:slit_num + self.half_fov[0] + 1, :].sum(axis=0) + self.response_function[response_count, :] = rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num + - self.half_fov[0] : slit_num + + self.half_fov[0] + + 1, + :, + ].sum(axis=0) response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for index in range(self.num_selections): - #for slit_num in range(self.num_slits): - for slit_num in range(self.center_slit[0] - (self.half_slits[0] * self.solution_fov_width), self.center_slit[0] + ((self.half_slits[0] * self.solution_fov_width) + 1), self.solution_fov_width): + # for slit_num in range(self.num_slits): + for slit_num in range( + self.center_slit[0] + - (self.half_slits[0] * self.solution_fov_width), + self.center_slit[0] + + ((self.half_slits[0] * self.solution_fov_width) + 1), + self.solution_fov_width, + ): if self.solution_fov_width == 1: - self.response_function[response_count, :] = rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num, :] + self.response_function[response_count, :] = rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num, + :, + ] else: # Check if even FOV. if self.half_fov[1] == 0: - self.response_function[response_count, :] = rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num - (self.half_fov[0] - 1):slit_num + (self.half_fov[1] - 1) + 1, :].sum(axis=0) + (rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num - self.half_fov[0], :] * 0.5) + (rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num + self.half_fov[0], :] * 0.5) + self.response_function[response_count, :] = ( + rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num + - (self.half_fov[0] - 1) : slit_num + + (self.half_fov[1] - 1) + + 1, + :, + ].sum(axis=0) + + ( + rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num - self.half_fov[0], + :, + ] + * 0.5 + ) + + ( + rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num + self.half_fov[0], + :, + ] + * 0.5 + ) + ) else: - self.response_function[response_count, :] = rsp_func_cube[inv_selection[1][index], inv_selection[0][index], slit_num - self.half_fov[0]:slit_num + self.half_fov[0] + 1, :].sum(axis=0) + self.response_function[response_count, :] = rsp_func_cube[ + inv_selection[1][index], + inv_selection[0][index], + slit_num + - self.half_fov[0] : slit_num + + self.half_fov[0] + + 1, + :, + ].sum(axis=0) response_count += 1 - #print("response count =", response_count) + # print("response count =", response_count) self.response_function = self.response_function.transpose() print(np.shape(self.response_function)) @@ -247,7 +371,7 @@ def get_response_function(self): return self.response_function def initialize_input_data(self, input_image: str, image_mask: str = None): - ''' + """ Initialize input image and optional mask. Parameters @@ -261,7 +385,7 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): ------- None. - ''' + """ # Read image image_hdul = fits.open(input_image) image = image_hdul[0].data @@ -272,14 +396,14 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): self.image = image try: - image_exposure_time = image_hdul[0].header['IMG_EXP'] + image_exposure_time = image_hdul[0].header["IMG_EXP"] except KeyError: image_exposure_time = 1.0 self.image /= image_exposure_time self.image[np.where(self.image < 0.0)] = 0.0 self.image_hdul = image_hdul - #print("image (h, w) =", image_height, image_width) + # print("image (h, w) =", image_height, image_width) self.image_width = image_width self.image_height = image_height self.input_image = os.path.basename(input_image) @@ -292,19 +416,23 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): if len(np.where(self.image_mask == 0)) == 0: self.image_mask = None else: - #self.image_mask = np.ones((image_height, image_width), dtype=np.float32) + # self.image_mask = np.ones((image_height, image_width), dtype=np.float32) self.image_mask = None if self.image_mask is not None: self.image_mask_filename = os.path.basename(image_mask) else: - self.image_mask_filename = '' - - def invert(self, model, output_dir: str, - output_file_prefix: str = '', - output_file_postfix: str = '', - level: str = '2.0', - detector_row_range: tp.Union[list, None] = None): - ''' + self.image_mask_filename = "" + + def invert( + self, + model, + output_dir: str, + output_file_prefix: str = "", + output_file_postfix: str = "", + level: str = "2.0", + detector_row_range: tp.Union[list, None] = None, + ): + """ Invert image. Parameters @@ -326,7 +454,7 @@ def invert(self, model, output_dir: str, ------- None. - ''' + """ # Verify input data has been initialized. # assert self.image_width != 0 and self.image_height != 0 if detector_row_range is not None: @@ -338,66 +466,107 @@ def invert(self, model, output_dir: str, else: self.detector_row_min = 0 self.detector_row_max = self.image_height - 1 - em_data_cube = np.zeros((self.image_height, self.num_slits, len(self.inv_logt_list), len(self.inv_ion_list)), dtype=np.float32) + em_data_cube = np.zeros( + ( + self.image_height, + self.num_slits, + len(self.inv_logt_list), + len(self.inv_ion_list), + ), + dtype=np.float32, + ) em_data_cube[:, :, :, :] = -1.0 - inverted_data = np.zeros((self.image_height, self.image_width), dtype=np.float32) + inverted_data = np.zeros( + (self.image_height, self.image_width), dtype=np.float32 + ) num_nonconvergences = 0 if detector_row_range is None: image_row_number_range = range(self.image_height) else: - image_row_number_range = range(detector_row_range[0], detector_row_range[1] + 1) + image_row_number_range = range( + detector_row_range[0], detector_row_range[1] + 1 + ) - scorelist=[] + scorelist = [] for image_row_number in image_row_number_range: - #if (image_row_number % 10 == 0): - if (image_row_number % 100 == 0): + # if (image_row_number % 10 == 0): + if image_row_number % 100 == 0: print("image row number =", image_row_number) - #print(image_row_number) - image_row = self.image[image_row_number,:] + # print(image_row_number) + image_row = self.image[image_row_number, :] masked_rsp_func = self.response_function if self.image_mask is not None: - mask_row = self.image_mask[image_row_number,:] + mask_row = self.image_mask[image_row_number, :] mask_pixels = np.where(mask_row == 0) if len(mask_pixels) > 0: image_row[mask_pixels] = 0.0 - #image_row[mask_pixels] = 1e-26 + # image_row[mask_pixels] = 1e-26 masked_rsp_func = self.response_function.copy() masked_rsp_func[mask_pixels, :] = 0.0 - #masked_rsp_func[mask_pixels, :] = 1e-26 + # masked_rsp_func[mask_pixels, :] = 1e-26 with warnings.catch_warnings(): - warnings.filterwarnings("error", category=ConvergenceWarning, module="sklearn") + warnings.filterwarnings( + "error", category=ConvergenceWarning, module="sklearn" + ) try: - em, data_out,score = model.invert(masked_rsp_func, image_row) + em, data_out, score = model.invert(masked_rsp_func, image_row) scorelist.append(score) response_count = 0 - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": for slit_num in range(self.num_slits): for index in range(self.num_selections): - em_data_cube[image_row_number, slit_num, self.inverted_selection[0][index], self.inverted_selection[1][index]] = em[response_count] + em_data_cube[ + image_row_number, + slit_num, + self.inverted_selection[0][index], + self.inverted_selection[1][index], + ] = em[response_count] if image_row_number == 512: - print("*", image_row_number, slit_num, self.inverted_selection[0][index], self.inverted_selection[1][index], response_count) + print( + "*", + image_row_number, + slit_num, + self.inverted_selection[0][index], + self.inverted_selection[1][index], + response_count, + ) response_count += 1 else: for index in range(self.num_selections): for slit_num in range(self.num_slits): - em_data_cube[image_row_number, slit_num, self.inverted_selection[0][index], self.inverted_selection[1][index]] = em[response_count] + em_data_cube[ + image_row_number, + slit_num, + self.inverted_selection[0][index], + self.inverted_selection[1][index], + ] = em[response_count] response_count += 1 inverted_data[image_row_number, :] = data_out - #print("Row", image_row_number, "converged.") + # print("Row", image_row_number, "converged.") except Exception as e: num_nonconvergences += 1 print(e) print("Row", image_row_number, "did not converge!") response_count = 0 - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": for slit_num in range(self.num_slits): for index in range(self.num_selections): - em_data_cube[image_row_number, slit_num, self.inverted_selection[0][index], self.inverted_selection[1][index]] = 0.0 + em_data_cube[ + image_row_number, + slit_num, + self.inverted_selection[0][index], + self.inverted_selection[1][index], + ] = 0.0 response_count += 1 else: for index in range(self.num_selections): for slit_num in range(self.num_slits): - em_data_cube[image_row_number, slit_num, self.inverted_selection[0][index], self.inverted_selection[1][index]] = 0.0 + em_data_cube[ + image_row_number, + slit_num, + self.inverted_selection[0][index], + self.inverted_selection[1][index], + ] = 0.0 response_count += 1 print("Number Nonconvergences", num_nonconvergences) @@ -407,56 +576,57 @@ def invert(self, model, output_dir: str, # Save EM data cube. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'em_data_cube' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "em_data_cube" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix - em_data_cube_file = output_dir + base_filename + '.fits' + em_data_cube_file = output_dir + base_filename + ".fits" # Transpose data (wavelength, y, x). Readable by ImageJ. em_data_cube = np.transpose(em_data_cube, axes=(3, 2, 0, 1)) em_data_cube_header = self.image_hdul[0].header.copy() - em_data_cube_header['LEVEL'] = (level, 'Level') - em_data_cube_header['UNITS'] = ('1e26 cm-5', 'Units') + em_data_cube_header["LEVEL"] = (level, "Level") + em_data_cube_header["UNITS"] = ("1e26 cm-5", "Units") self.__add_fits_keywords(em_data_cube_header) model.add_fits_keywords(em_data_cube_header) - hdu = fits.PrimaryHDU(data = em_data_cube, header = em_data_cube_header) + hdu = fits.PrimaryHDU(data=em_data_cube, header=em_data_cube_header) # Add binary table (logt). index_list = range(len(self.inv_logt_list)) - col1 = fits.Column(name='index', format='1I', array=index_list) - col2 = fits.Column(name='logt', format='1E', array=self.inv_logt_list) + col1 = fits.Column(name="index", format="1I", array=index_list) + col2 = fits.Column(name="logt", format="1E", array=self.inv_logt_list) logt_hdu = fits.BinTableHDU.from_columns([col1, col2]) # Add binary table (ion). index_list = range(len(self.inv_ion_list)) - col1 = fits.Column(name='index', format='1I', array=index_list) - col2 = fits.Column(name='ion', format='10A', array=self.inv_ion_list) + col1 = fits.Column(name="index", format="1I", array=index_list) + col2 = fits.Column(name="ion", format="10A", array=self.inv_ion_list) ion_hdu = fits.BinTableHDU.from_columns([col1, col2]) hdulist = fits.HDUList([hdu, logt_hdu, ion_hdu]) hdulist.writeto(em_data_cube_file, overwrite=True) # Save model predicted data. base_filename = output_file_prefix - if len(output_file_prefix) > 0 and output_file_prefix[-1] != '_': - base_filename += '_' - base_filename += 'model_predicted_data' - if len(output_file_postfix) > 0 and output_file_postfix[0] != '_': - base_filename += '_' + if len(output_file_prefix) > 0 and output_file_prefix[-1] != "_": + base_filename += "_" + base_filename += "model_predicted_data" + if len(output_file_postfix) > 0 and output_file_postfix[0] != "_": + base_filename += "_" base_filename += output_file_postfix data_file = output_dir + base_filename + ".fits" model_predicted_data_hdul = self.image_hdul.copy() model_predicted_data_hdul[0].data = inverted_data - model_predicted_data_hdul[0].header['LEVEL'] = (level, 'Level') - model_predicted_data_hdul[0].header['UNITS'] = 'Electron s-1' + model_predicted_data_hdul[0].header["LEVEL"] = (level, "Level") + model_predicted_data_hdul[0].header["UNITS"] = "Electron s-1" self.__add_fits_keywords(model_predicted_data_hdul[0].header) model.add_fits_keywords(model_predicted_data_hdul[0].header) model_predicted_data_hdul.writeto(data_file, overwrite=True) # save scores - f = open(output_dir+'/inversion_scores.txt','w') + f = open(output_dir + "/inversion_scores.txt", "w") for row in range(len(scorelist)): - f.write(str(row)+' '+str(scorelist[row])+'\n ') + f.write(str(row) + " " + str(scorelist[row]) + "\n ") f.close() + # def multiprocessing_callback(self, result): # # image_row_number = result[0] # # em = result[1] @@ -479,7 +649,7 @@ def invert(self, model, output_dir: str, # self.mp_inverted_data[result[0], :] = result[2] - # def multiprocessing_invert_image_row(self, image_row_number: np.int32, model): + # def multiprocessing_invert_image_row(self, image_row_number: np.int32, model): # #print("Inverting image row", image_row_number) # image_row = self.image[image_row_number,:] # masked_rsp_func = self.response_function @@ -520,7 +690,6 @@ def invert(self, model, output_dir: str, # # put the item in the queue # await queue.put((image_row_number, masked_rsp_func, image_row, model)) - # async def consume(self, queue, answer, i): # print(i) # while True: @@ -600,8 +769,10 @@ def invert(self, model, output_dir: str, # # #with mp.Pool(processes=4) as pool: # # with mp.Pool(processes=os.cpu_count()) as pool: # # for i in range(self.image_height): - # # pool.apply_async(self.multiprocessing_invert_image_row, args = (i, model), callback = self.multiprocessing_callback) - # # #pool.apply_async(self.multiprocessing_invert_image_row, args = (i, ), callback = self.multiprocessing_callback) + # # pool.apply_async(self.multiprocessing_invert_image_row, + # args = (i, model), callback = self.multiprocessing_callback) + # # #pool.apply_async(self.multiprocessing_invert_image_row, + # args = (i, ), callback = self.multiprocessing_callback) # # pool.close() # # pool.join() @@ -665,7 +836,7 @@ def invert(self, model, output_dir: str, # model_predicted_data_hdul.writeto(data_file, overwrite=True) def __add_fits_keywords(self, header): - ''' + """ Add FITS keywords to FITS header. Parameters @@ -677,26 +848,40 @@ def __add_fits_keywords(self, header): ------- None. - ''' - header['INV_DATE'] = (self.inv_date, 'Inversion Date') - header['RSPFUNC'] = (self.rsp_func_date, 'Response Functions Filename') - header['RSP_DATE'] = (self.rsp_func_cube_filename, 'Response Functions Creation Date') - header['ABUNDANC'] = (self.abundance, 'Abundance') - header['ELECDIST'] = (self.electron_distribution, 'Electron Distribution') - header['CHIANT_V'] = (self.chianti_version, 'Chianti Version') - header['INVIMG'] = (self.input_image, 'Inversion Image Filename') - header['INVMASK'] = (self.image_mask_filename, 'Inversion Mask Filename') - header['SLTNFOV'] = (self.solution_fov_width, 'Solution FOV Width') - #header['DEPNAME'] = (self.rsp_dep_name, 'Dependence Name') - header['SMTHOVER'] = (self.smooth_over, 'Smooth Over') - header['FA_MIN'] = (f"{self.field_angle_range_list[0]:.3f}", 'Minimum Field Angle') - header['FA_DLT'] = (f"{self.max_field_angle_list_delta:.3f}", 'Delta Field Angle') - header['FA_NUM'] = (self.num_field_angles, 'Number Field Angles') - header['FA_CDELT'] = (f"{self.solution_fov_width * self.max_field_angle_list_delta:.3f}", 'Field Angle CDELT') - header['DROW_MIN'] = (self.detector_row_min, 'Minimum Detector Row') - header['DROW_MAX'] = (self.detector_row_max, 'Maximum Detector Row') - - def create_forward_model(self, em_data_cube_file: str, output_dir: str, image_mask_file: str = None): + """ + header["INV_DATE"] = (self.inv_date, "Inversion Date") + header["RSPFUNC"] = (self.rsp_func_date, "Response Functions Filename") + header["RSP_DATE"] = ( + self.rsp_func_cube_filename, + "Response Functions Creation Date", + ) + header["ABUNDANC"] = (self.abundance, "Abundance") + header["ELECDIST"] = (self.electron_distribution, "Electron Distribution") + header["CHIANT_V"] = (self.chianti_version, "Chianti Version") + header["INVIMG"] = (self.input_image, "Inversion Image Filename") + header["INVMASK"] = (self.image_mask_filename, "Inversion Mask Filename") + header["SLTNFOV"] = (self.solution_fov_width, "Solution FOV Width") + # header['DEPNAME'] = (self.rsp_dep_name, 'Dependence Name') + header["SMTHOVER"] = (self.smooth_over, "Smooth Over") + header["FA_MIN"] = ( + f"{self.field_angle_range_list[0]:.3f}", + "Minimum Field Angle", + ) + header["FA_DLT"] = ( + f"{self.max_field_angle_list_delta:.3f}", + "Delta Field Angle", + ) + header["FA_NUM"] = (self.num_field_angles, "Number Field Angles") + header["FA_CDELT"] = ( + f"{self.solution_fov_width * self.max_field_angle_list_delta:.3f}", + "Field Angle CDELT", + ) + header["DROW_MIN"] = (self.detector_row_min, "Minimum Detector Row") + header["DROW_MAX"] = (self.detector_row_max, "Maximum Detector Row") + + def create_forward_model( + self, em_data_cube_file: str, output_dir: str, image_mask_file: str = None + ): print(em_data_cube_file) assert self.num_selections >= 1 # Read EM data cube @@ -715,47 +900,79 @@ def create_forward_model(self, em_data_cube_file: str, output_dir: str, image_ma else: image_mask = None - #forward_model_image = np.zeros((self.image_height, self.image_width), dtype=np.float32) + # forward_model_image = np.zeros((self.image_height, self.image_width), dtype=np.float32) forward_model_image = np.zeros((em_rows, self.rsp_func_width), dtype=np.float32) if image_mask is None: response_count = 0 - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": # Smooth over dependence. for slit_num in range(self.num_slits): for index in range(self.num_selections): - forward_model_image += np.dot(em_data_cube[self.inverted_selection[1][index], self.inverted_selection[0][index], :, slit_num][:, None], self.response_function[:, response_count][None, :]) + forward_model_image += np.dot( + em_data_cube[ + self.inverted_selection[1][index], + self.inverted_selection[0][index], + :, + slit_num, + ][:, None], + self.response_function[:, response_count][None, :], + ) response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for index in range(self.num_selections): for slit_num in range(self.num_slits): - forward_model_image += np.dot(em_data_cube[self.inverted_selection[1][index], self.inverted_selection[0][index], :, slit_num][:, None], self.response_function[:, response_count][None, :]) + forward_model_image += np.dot( + em_data_cube[ + self.inverted_selection[1][index], + self.inverted_selection[0][index], + :, + slit_num, + ][:, None], + self.response_function[:, response_count][None, :], + ) response_count += 1 else: - #for image_row_number in range(self.image_height): - for image_row_number in range(499,551):#em_rows): - #print("1", image_row_number) - mask_row = self.image_mask[image_row_number,:] + # for image_row_number in range(self.image_height): + for image_row_number in range(499, 551): # em_rows): + # print("1", image_row_number) + mask_row = self.image_mask[image_row_number, :] mask_pixels = np.where(mask_row == 0) masked_rsp_func = self.response_function.copy() masked_rsp_func[mask_pixels, :] = 0.0 response_count = 0 - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": # Smooth over dependence. for slit_num in range(self.num_slits): for index in range(self.num_selections): - forward_model_image[image_row_number, :] += np.dot(em_data_cube[self.inverted_selection[1][index], self.inverted_selection[0][index], image_row_number, slit_num], masked_rsp_func[:, response_count]) + forward_model_image[image_row_number, :] += np.dot( + em_data_cube[ + self.inverted_selection[1][index], + self.inverted_selection[0][index], + image_row_number, + slit_num, + ], + masked_rsp_func[:, response_count], + ) response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for index in range(self.num_selections): for slit_num in range(self.num_slits): - forward_model_image[image_row_number, :] += np.dot(em_data_cube[self.inverted_selection[1][index], self.inverted_selection[0][index], image_row_number, slit_num], masked_rsp_func[:, response_count]) + forward_model_image[image_row_number, :] += np.dot( + em_data_cube[ + self.inverted_selection[1][index], + self.inverted_selection[0][index], + image_row_number, + slit_num, + ], + masked_rsp_func[:, response_count], + ) response_count += 1 - result = em_data_cube_file.find('em_data_cube') + result = em_data_cube_file.find("em_data_cube") if result == -1: forward_model_file = output_dir + "forward_model.fits" else: diff --git a/overlappogram/reconstruct_inverted_image.py b/overlappogram/reconstruct_inverted_image.py index 9acbf88..c8ab25b 100644 --- a/overlappogram/reconstruct_inverted_image.py +++ b/overlappogram/reconstruct_inverted_image.py @@ -5,9 +5,13 @@ from astropy.io import fits -def reconstruct_inverted_image(em_data_cube_slot_data: str, rsp_dep_file_fmt: str, - output_dir_path: str, rsp_dep_list: np.ndarray = None): - ''' +def reconstruct_inverted_image( + em_data_cube_slot_data: str, + rsp_dep_file_fmt: str, + output_dir_path: str, + rsp_dep_list: np.ndarray = None, +): + """ Creates an image from the EM data cube. If the response dependence list is None, a data cube is created for all dependences in binary table. @@ -26,28 +30,28 @@ def reconstruct_inverted_image(em_data_cube_slot_data: str, rsp_dep_file_fmt: st ------- None. - ''' + """ image_hdul = fits.open(em_data_cube_slot_data) em_data_cube = image_hdul[0].data print(np.shape(em_data_cube)) num_rows, num_slits, num_deps = np.shape(em_data_cube) try: - pixel_fov_width = image_hdul[0].header['PIXELFOV'] - solution_fov_width = image_hdul[0].header['SLTNFOV'] + pixel_fov_width = image_hdul[0].header["PIXELFOV"] + solution_fov_width = image_hdul[0].header["SLTNFOV"] calc_shift_width = divmod(solution_fov_width, pixel_fov_width) slit_shift_width = int(round(calc_shift_width[0])) - except: + except: # noqa: E722 # TODO figure out what exception was expected slit_shift_width = 1 print("slit shift width =", slit_shift_width) binary_table_exists = True try: - #dep_name = image_hdul[0].header['DEPNAME'] - #print("dep name =", dep_name) - dep_indices = image_hdul[1].data['index'] - #dep_list = image_hdul[1].data[dep_name] - dep_list = image_hdul[1].data['ion'] + # dep_name = image_hdul[0].header['DEPNAME'] + # print("dep name =", dep_name) + dep_indices = image_hdul[1].data["index"] + # dep_list = image_hdul[1].data[dep_name] + dep_list = image_hdul[1].data["ion"] print("1", dep_indices, dep_list) if rsp_dep_list is not None: dep_mask = np.isin(dep_list, rsp_dep_list) @@ -60,10 +64,10 @@ def reconstruct_inverted_image(em_data_cube_slot_data: str, rsp_dep_file_fmt: st if len(new_dep_list) > 0: dep_indices = new_dep_indices dep_list = new_dep_list - #except Exception as e: - except: + # except Exception as e: + except: # noqa: E722 # TODO figure out what exception was expected binary_table_exists = False - #print(repr(e)) + # print(repr(e)) image_allocated = False @@ -80,13 +84,19 @@ def reconstruct_inverted_image(em_data_cube_slot_data: str, rsp_dep_file_fmt: st for slit_index in range(num_slits): slit_shift = (slit_index - num_half_slits) * slit_shift_width if slit_shift < 0: - slit_rsp = np.pad(dep_rsp, (0, -slit_shift), mode='constant')[-slit_shift:] + slit_rsp = np.pad(dep_rsp, (0, -slit_shift), mode="constant")[ + -slit_shift: + ] elif slit_shift > 0: - slit_rsp = np.pad(dep_rsp, (slit_shift, 0), mode='constant')[:-slit_shift] + slit_rsp = np.pad(dep_rsp, (slit_shift, 0), mode="constant")[ + :-slit_shift + ] else: slit_rsp = dep_rsp for row in range(num_rows): - image_data[row, :] += slit_rsp * em_data_cube[row, slit_index, dep_indices[index]] + image_data[row, :] += ( + slit_rsp * em_data_cube[row, slit_index, dep_indices[index]] + ) # Create output directory. os.makedirs(output_dir_path, exist_ok=True) diff --git a/overlappogram/ridge_model.py b/overlappogram/ridge_model.py index 69556bb..89d15ee 100644 --- a/overlappogram/ridge_model.py +++ b/overlappogram/ridge_model.py @@ -22,8 +22,8 @@ def invert(self, response_function, data, sample_weights=None): def add_fits_keywords(self, header): params = self.model.get_params() # print(params) - header['INVMDL'] = ('Elastic Net', 'Inversion Model') - header['ALPHA'] = (params['alpha'], 'Inversion Model Alpha') + header["INVMDL"] = ("Elastic Net", "Inversion Model") + header["ALPHA"] = (params["alpha"], "Inversion Model Alpha") # header['RHO'] = (params['l1_ratio'], 'Inversion Model Rho') def get_score(self, response_function, data): diff --git a/overlappogram/sgd_model.py b/overlappogram/sgd_model.py index fd0f403..0faeb5c 100644 --- a/overlappogram/sgd_model.py +++ b/overlappogram/sgd_model.py @@ -20,7 +20,8 @@ def invert(self, response_function, data, sample_weights=None): # return em, data_out, score def add_fits_keywords(self, header): - params = self.model.get_params() + pass + # params = self.model.get_params() # # print(params) # header['INVMDL'] = ('Elastic Net', 'Inversion Model') # header['ALPHA'] = (params['alpha'], 'Inversion Model Alpha') diff --git a/overlappogram/train.py b/overlappogram/train.py index 809d37f..0827461 100644 --- a/overlappogram/train.py +++ b/overlappogram/train.py @@ -9,7 +9,7 @@ @dataclass(order=True) class Train: - ''' + """ Inversion for overlap-a-gram data. Attributes @@ -37,15 +37,17 @@ class Train: ------- None. - ''' + """ + pixel_fov_width: np.float64 solution_fov_width: np.float64 slit_fov_width: np.float64 rsp_dep_name: str rsp_dep_list: list rsp_dep_file_fmt: str - rsp_dep_desc_fmt: str = '' - smooth_over: str = 'spatial' + rsp_dep_desc_fmt: str = "" + smooth_over: str = "spatial" + def __post_init__(self): # Calculate number of slits calc_num_slits = divmod(self.slit_fov_width, self.solution_fov_width) @@ -54,22 +56,24 @@ def __post_init__(self): self.num_slits += 1 if self.num_slits % 2 == 0.0: self.num_slits += 1 - #print("number slits =", self.num_slits) + # print("number slits =", self.num_slits) self.half_slits = divmod(self.num_slits, 2) - #print("half slits =", self.half_slits) + # print("half slits =", self.half_slits) # calc_shift_width = divmod(self.solution_fov_width, self.pixel_fov_width) # self.slit_shift_width = int(round(calc_shift_width[0])) - self.slit_shift_width = int(round(self.solution_fov_width / self.pixel_fov_width)) - #print("slit shift width =", self.slit_shift_width) + self.slit_shift_width = int( + round(self.solution_fov_width / self.pixel_fov_width) + ) + # print("slit shift width =", self.slit_shift_width) self.image_height = 0 self.image_width = 0 # Read response files and create response matrix response_files = [self.rsp_dep_file_fmt.format(i) for i in self.rsp_dep_list] - #print("Response files =", response_files) + # print("Response files =", response_files) self.num_response_files = len(response_files) - assert(self.num_response_files > 0) - #print("num rsp files =", self.num_response_files) + assert self.num_response_files > 0 + # print("num rsp files =", self.num_response_files) self.groups = np.zeros(self.num_slits * self.num_response_files, dtype=int) response_count = 0 for index in range(len(response_files)): @@ -79,32 +83,46 @@ def __post_init__(self): self.pixels = dep_em_data.iloc[:, 0].values self.wavelengths = dep_em_data.iloc[:, 1].values self.wavelength_width = len(self.wavelengths) - self.response_function = np.zeros((self.num_response_files * self.num_slits, self.wavelength_width)) + self.response_function = np.zeros( + (self.num_response_files * self.num_slits, self.wavelength_width) + ) em = dep_em_data.iloc[:, 2].values - if self.smooth_over == 'dependence': + if self.smooth_over == "dependence": # Smooth over dependence. slit_count = 0 for slit_num in range(-self.half_slits[0], self.half_slits[0] + 1): slit_shift = slit_num * self.slit_shift_width if slit_shift < 0: - slit_em = np.pad(em, (0, -slit_shift), mode='constant')[-slit_shift:] + slit_em = np.pad(em, (0, -slit_shift), mode="constant")[ + -slit_shift: + ] elif slit_shift > 0: - slit_em = np.pad(em, (slit_shift, 0), mode='constant')[:-slit_shift] + slit_em = np.pad(em, (slit_shift, 0), mode="constant")[ + :-slit_shift + ] else: slit_em = em - self.response_function[(self.num_response_files * slit_count) + response_count, :] = slit_em - self.groups[(self.num_response_files * slit_count) + response_count] = index + self.response_function[ + (self.num_response_files * slit_count) + response_count, : + ] = slit_em + self.groups[ + (self.num_response_files * slit_count) + response_count + ] = index slit_count += 1 response_count += 1 else: - self.smooth_over = 'spatial' + self.smooth_over = "spatial" # Smooth over spatial. for slit_num in range(-self.half_slits[0], self.half_slits[0] + 1): slit_shift = slit_num * self.slit_shift_width if slit_shift < 0: - slit_em = np.pad(em, (0, -slit_shift), mode='constant')[-slit_shift:] + slit_em = np.pad(em, (0, -slit_shift), mode="constant")[ + -slit_shift: + ] elif slit_shift > 0: - slit_em = np.pad(em, (slit_shift, 0), mode='constant')[:-slit_shift] + slit_em = np.pad(em, (slit_shift, 0), mode="constant")[ + :-slit_shift + ] else: slit_em = em self.response_function[response_count, :] = slit_em @@ -112,15 +130,15 @@ def __post_init__(self): response_count += 1 print("groups =", self.groups) - #print("response count =", response_count) + # print("response count =", response_count) self.response_function = self.response_function.transpose() - if self.rsp_dep_desc_fmt == '': + if self.rsp_dep_desc_fmt == "": max_dep_len = len(max(self.rsp_dep_list, key=len)) - self.rsp_dep_desc_fmt = str(max_dep_len) + 'A' + self.rsp_dep_desc_fmt = str(max_dep_len) + "A" def initialize_input_data(self, input_image: str, image_mask: str = None): - ''' + """ Initialize input image and optional mask. Parameters @@ -134,14 +152,14 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): ------- None. - ''' + """ # Read image image_hdul = fits.open(input_image) image_height, image_width = np.shape(image_hdul[0].data) # Verify image width equals number of wavelengths in dependence files. assert image_width == self.wavelength_width self.image = image_hdul[0].data - #print("image (h, w) =", image_height, image_width) + # print("image (h, w) =", image_height, image_width) self.image_width = image_width self.image_height = image_height self.input_image = os.path.basename(input_image) @@ -154,11 +172,11 @@ def initialize_input_data(self, input_image: str, image_mask: str = None): if len(np.where(image_mask == 0)) == 0: self.image_mask = None else: - #self.image_mask = np.ones((image_height, image_width)) + # self.image_mask = np.ones((image_height, image_width)) self.image_mask = None def invert(self): - ''' + """ Invert image. Parameters @@ -176,16 +194,16 @@ def invert(self): ------- None. - ''' - #for image_row_number in range(self.image_height): - #for image_row_number in range(180, 184): + """ + # for image_row_number in range(self.image_height): + # for image_row_number in range(180, 184): for image_row_number in range(240, 250): - #for image_row_number in range(30, 34): - #for image_row_number in range(530, 534): + # for image_row_number in range(30, 34): + # for image_row_number in range(530, 534): # if (image_row_number % 10 == 0): # print("image row number =", image_row_number) print("image row number =", image_row_number) - image_row = self.image[image_row_number,:] + image_row = self.image[image_row_number, :] # masked_rsp_func = self.response_function # if self.image_mask is not None: # mask_row = self.image_mask[image_row_number,:] @@ -195,19 +213,25 @@ def invert(self): # masked_rsp_func = self.response_function.copy() # masked_rsp_func[mask_pixels, :] = 0 - #alphas = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.1] - #alphas = np.arange(0.1, 1.0, 0.1) - #ratios = [0.05, 0.1, 0.15, 0.2] - model = LassoLarsCV(max_iter=50000, precompute=True, normalize=True, positive=True, fit_intercept=True) + # alphas = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 0.1] + # alphas = np.arange(0.1, 1.0, 0.1) + # ratios = [0.05, 0.1, 0.15, 0.2] + model = LassoLarsCV( + max_iter=50000, + precompute=True, + normalize=True, + positive=True, + fit_intercept=True, + ) model.fit(self.response_function, image_row) - #print("model =", model) - print('alpha: %f' % model.alpha_) - #print('l1_ratio: %f' % model.l1_ratio_) - print('intercept: %f' % model.intercept_) - print('n_iter: %f' % model.n_iter_) + # print("model =", model) + print("alpha: %f" % model.alpha_) + # print('l1_ratio: %f' % model.l1_ratio_) + print("intercept: %f" % model.intercept_) + print("n_iter: %f" % model.n_iter_) def __add_fits_keywords(self, header): - ''' + """ Add FITS keywords to FITS header. Parameters @@ -219,10 +243,12 @@ def __add_fits_keywords(self, header): ------- None. - ''' - header.append(('INPUTIMG', self.input_image, 'Input Image'), end=True) - header.append(('PIXELFOV', self.pixel_fov_width, 'Pixel FOV Width'), end=True) - header.append(('SLTNFOV', self.solution_fov_width, 'Solution FOV Width'), end=True) - header.append(('SLITFOV', self.slit_fov_width, 'Slit FOV Width'), end=True) - header.append(('DEPNAME', self.rsp_dep_name, 'Dependence Name'), end=True) - header.append(('SMTHOVER', self.smooth_over, 'Smooth Over'), end=True) + """ + header.append(("INPUTIMG", self.input_image, "Input Image"), end=True) + header.append(("PIXELFOV", self.pixel_fov_width, "Pixel FOV Width"), end=True) + header.append( + ("SLTNFOV", self.solution_fov_width, "Solution FOV Width"), end=True + ) + header.append(("SLITFOV", self.slit_fov_width, "Slit FOV Width"), end=True) + header.append(("DEPNAME", self.rsp_dep_name, "Dependence Name"), end=True) + header.append(("SMTHOVER", self.smooth_over, "Smooth Over"), end=True) diff --git a/pyproject.toml b/pyproject.toml index 4d42e6b..f08447e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ classifiers = [ ] [project.optional-dependencies] -test = ["pytest", "pytest-doctestplus", "pytest-cov", "ruff"] +test = ["pytest", "pytest-doctestplus", "pytest-cov", "ruff", "coverage"] docs = ["sphinx", "sphinx-automodapi"] [project.urls] @@ -49,7 +49,7 @@ Changelog = "https://github.com/jmbhughes/overlappogram/blob/main/CHANGELOG.md" packages = ["overlappogram"] [tool.codespell] -skip = "*.fts,*.fits,venv,*.pro,*.asdf" +skip = "*.fts,*.fits,venv,*.pro,*.asdf,*.ipynb" [tool.ruff] target-version = "py39" diff --git a/run_multiion_inversion.py b/run_multiion_inversion.py index b0ed378..5464616 100644 --- a/run_multiion_inversion.py +++ b/run_multiion_inversion.py @@ -10,37 +10,49 @@ import toml # noqa: E402 -from overlappogram.inversion_field_angles import Inversion # noqa: E402 from magixs_data_products import MaGIXSDataProducts # noqa: E402 +from overlappogram.inversion_field_angles import Inversion # noqa: E402 def run_inversion(image_path, config: dict): - inversion = Inversion(rsp_func_cube_file=config['paths']['response'], - rsp_dep_name=config['inversion']['response_dependency_name'], - rsp_dep_list=config['inversion']['response_dependency_list'], - solution_fov_width=config['inversion']['solution_fov_width'], - smooth_over=config['inversion']['smooth_over'], - field_angle_range=config['inversion']['field_angle_range']) + inversion = Inversion( + rsp_func_cube_file=config["paths"]["response"], + rsp_dep_name=config["inversion"]["response_dependency_name"], + rsp_dep_list=config["inversion"]["response_dependency_list"], + solution_fov_width=config["inversion"]["solution_fov_width"], + smooth_over=config["inversion"]["smooth_over"], + field_angle_range=config["inversion"]["field_angle_range"], + ) - inversion.initialize_input_data(image_path, - None, - config['paths']['weights']) + inversion.initialize_input_data(image_path, None, config["paths"]["weights"]) em_cube_paths = [] - for alpha in config['model']['alphas']: - for rho in config['model']['rhos']: + for alpha in config["model"]["alphas"]: + for rho in config["model"]["rhos"]: start = time.time() - postfix = 'x'+str(config['inversion']['solution_fov_width'])+'_'+str(rho*10)+'_'+str(alpha)+'_wpsf' - em_cube_paths.append(inversion.multiprocessing_invert(config['model'], - alpha, - rho, - config['output']['directory'], - num_threads=config['execution']['num_threads'], - output_file_prefix=config['output']['prefix'], - output_file_postfix=postfix, - detector_row_range=config['inversion']['detector_row_range'], - score=True)) + postfix = ( + "x" + + str(config["inversion"]["solution_fov_width"]) + + "_" + + str(rho * 10) + + "_" + + str(alpha) + + "_wpsf" + ) + em_cube_paths.append( + inversion.multiprocessing_invert( + config["model"], + alpha, + rho, + config["output"]["directory"], + num_threads=config["execution"]["num_threads"], + output_file_prefix=config["output"]["prefix"], + output_file_postfix=postfix, + detector_row_range=config["inversion"]["detector_row_range"], + score=True, + ) + ) end = time.time() print("Inversion Time =", end - start) @@ -48,10 +60,10 @@ def run_inversion(image_path, config: dict): if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Inverts overlappograms') + parser = argparse.ArgumentParser(description="Inverts overlappograms") parser.add_argument("path") - parser.add_argument('config') + parser.add_argument("config") args = parser.parse_args() with open(args.config) as f: @@ -59,9 +71,11 @@ def run_inversion(image_path, config: dict): em_cube_paths = run_inversion(args.path, config) - if config['output']['make_spectral']: + if config["output"]["make_spectral"]: mdp = MaGIXSDataProducts() - mdp.create_level2_0_spectrally_pure_images(em_cube_paths, - config['paths']['gnt'], - config['inversion']['response_dependency_list'], - config['output']['directory']) + mdp.create_level2_0_spectrally_pure_images( + em_cube_paths, + config["paths"]["gnt"], + config["inversion"]["response_dependency_list"], + config["output"]["directory"], + ) diff --git a/tests/test_inversion.py b/tests/test_inversion.py index e69de29..80d2166 100644 --- a/tests/test_inversion.py +++ b/tests/test_inversion.py @@ -0,0 +1,2 @@ +def test_empty(): + assert True