Add logging

This commit is contained in:
rasmusvt 2022-06-20 19:16:20 +02:00
parent 7214746af1
commit cc80a48259

View file

@ -4,18 +4,23 @@ import os
import numpy as np import numpy as np
import nafuma.auxillary as aux import nafuma.auxillary as aux
from nafuma.xanes.calib import find_element from nafuma.xanes.calib import find_element
from datetime import datetime
def split_scan_data(data: dict, options={}): def split_scan_data(data: dict, options={}) -> list:
''' Splits a XANES-file from BM31 into different files depending on the edge. Has the option to add intensities of all scans of same edge into the same file.
As of now only picks out xmap_rois (fluoresence mode) and for Mn, Fe, Co and Ni K-edges.'''
required_options = ['save', 'save_folder', 'replace', 'add_rois'] required_options = ['log', 'logfile', 'save', 'save_folder', 'replace', 'add_rois']
default_options = { default_options = {
'save': False, 'log': False,
'save_folder': '.', 'logfile': f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S.log")}_split_edges.log',
'replace': False, 'save': False, # whether to save the files or not
'add_rois': False 'save_folder': '.', # root folder of where to save the files
'replace': False, # whether to replace the files if they already exist
'add_rois': False # Whether to add the rois of individual scans of the same edge together
} }
options = aux.update_options(options=options, required_options=required_options, default_options=default_options) options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
@ -30,9 +35,15 @@ def split_scan_data(data: dict, options={}):
data['path'] = [data['path']] data['path'] = [data['path']]
all_scans = [] all_scans = []
if options['log']:
aux.write_log(message='Starting file splitting...', options=options)
for filename in data['path']: for filename in data['path']:
if options['log']:
aux.write_log(message=f'Reading {filename}...', options=options)
with open(filename, 'r') as f: with open(filename, 'r') as f:
lines = f.readlines() lines = f.readlines()
@ -44,6 +55,9 @@ def split_scan_data(data: dict, options={}):
# Header line starts with #L - reads headers, and toggles data read-in on # Header line starts with #L - reads headers, and toggles data read-in on
if line[0:2] == "#L": if line[0:2] == "#L":
header, read_data = line[2:].split(), True header, read_data = line[2:].split(), True
if options['log']:
aux.write_log(message='... Found scan data. Starting read-in...', options=options)
continue continue
# First line after data started with #C - stops data read-in # First line after data started with #C - stops data read-in
@ -69,27 +83,46 @@ def split_scan_data(data: dict, options={}):
for i, scan_data in enumerate(scan_datas): for i, scan_data in enumerate(scan_datas):
xanes_df = pd.DataFrame(scan_data).apply(pd.to_numeric) xanes_df = pd.DataFrame(scan_data).apply(pd.to_numeric)
xanes_df.columns = headers[i] xanes_df.columns = headers[i]
edge = find_element({'xanes_data_original': xanes_df})
if options['log']:
aux.write_log(message=f'Starting data clean-up ({edge}-edge)... ({i+1}/{len(scan_datas)})', options=options)
if not ('xmap_roi00' in headers[i]) and (not 'xmap_roi01' in headers[i]): if not ('xmap_roi00' in headers[i]) and (not 'xmap_roi01' in headers[i]):
if options['log']:
aux.write_log(message='... Did not find fluoresence data. Skipping...', options=options)
continue continue
edge = find_element({'xanes_data_original': xanes_df})
edges[edge].append(xanes_df) edges[edge].append(xanes_df)
if options['add']: if options['add']:
if options['log']:
aux.write_log(message=f'Addition of rois enabled. Starting addition...', options=options)
added_edges = {'Mn': [], 'Fe': [], 'Co': [], 'Ni': []} added_edges = {'Mn': [], 'Fe': [], 'Co': [], 'Ni': []}
for edge, scans in edges.items(): for edge, scans in edges.items():
if options['log']:
aux.write_log(message=f'... Adding rois of the {edge}-edge...', options=options)
if scans: if scans:
xanes_df = scans[0] xanes_df = scans[0]
for i, scan in enumerate(scans): for i, scan in enumerate(scans):
if i > 0: if i > 0:
if options['log']:
aux.write_log(message=f'... ... Adding {i}/{len(scans)}', options=options)
if 'xmap_roi00' in xanes_df.columns: if 'xmap_roi00' in xanes_df.columns:
xanes_df['xmap_roi00'] += scan['xmap_roi00'] xanes_df['xmap_roi00'] += scan['xmap_roi00']
if 'xmap_roi01' in xanes_df.columns: if 'xmap_roi01' in xanes_df.columns:
@ -100,7 +133,14 @@ def split_scan_data(data: dict, options={}):
edges = added_edges edges = added_edges
if options['save']: if options['save']:
if options['log']:
aux.write_log(message=f'Saving data to {options["save_folder"]}', options=options)
if not os.path.isdir(options['save_folder']): if not os.path.isdir(options['save_folder']):
if options['log']:
aux.write_log(message=f'... {options["save_folder"]} does not exist. Creating folder.', options=options)
os.makedirs(options['save_folder']) os.makedirs(options['save_folder'])
@ -110,10 +150,26 @@ def split_scan_data(data: dict, options={}):
for i, scan in enumerate(scans): for i, scan in enumerate(scans):
count = '' if options['add'] else '_'+str(i).zfill(4) count = '' if options['add'] else '_'+str(i).zfill(4)
path = os.path.join(options['save_folder'], f'{filename}_{edge}{count}.dat') path = os.path.join(options['save_folder'], f'{filename}_{edge}{count}.dat')
scan.to_csv(path)
if not os.path.isfile(path):
scan.to_csv(path)
if options['log']:
aux.write_log(message=f'... Scan saved to {path}', options=options)
elif options['replace'] and os.path.isfile(path):
scan.to_csv(path)
if options['log']:
aux.write_log(message=f'... File already exists. Overwriting to {path}', options=options)
elif not options['replace'] and os.path.isfile(path):
if options['log']:
aux.write_log(message=f'... File already exists. Skipping...', options=options)
all_scans.append(edges) all_scans.append(edges)
if options['log']:
aux.write_log(message=f'All done!', options=options)
return all_scans return all_scans
@ -124,9 +180,9 @@ def read_data(data: dict, options={}) -> pd.DataFrame:
# FIXME Handle the case when dataseries are not the same size # FIXME Handle the case when dataseries are not the same size
required_options = [] required_options = ['adjust']
default_options = { default_options = {
'adjust': 0
} }
options = aux.update_options(options=options, required_options=required_options, default_options=default_options) options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
@ -135,6 +191,7 @@ def read_data(data: dict, options={}) -> pd.DataFrame:
# Initialise DataFrame with only ZapEnergy-column # Initialise DataFrame with only ZapEnergy-column
xanes_data = pd.read_csv(data['path'][0])[['ZapEnergy']] xanes_data = pd.read_csv(data['path'][0])[['ZapEnergy']]
xanes_data['ZapEnergy'] += options['adjust']
if not isinstance(data['path'], list): if not isinstance(data['path'], list):
data['path'] = [data['path']] data['path'] = [data['path']]
@ -157,6 +214,7 @@ def read_data(data: dict, options={}) -> pd.DataFrame:
def determine_active_roi(scan_data): def determine_active_roi(scan_data):
# FIXME For Co-edge, this gave a wrong scan # FIXME For Co-edge, this gave a wrong scan