From 649196570bff3106a9c99e0d7dfc467145af6633 Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Mon, 11 Jul 2022 13:39:50 +0200 Subject: [PATCH 01/14] Add generic backup file function --- nafuma/auxillary.py | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/nafuma/auxillary.py b/nafuma/auxillary.py index 59eff5a..9879a19 100644 --- a/nafuma/auxillary.py +++ b/nafuma/auxillary.py @@ -1,6 +1,10 @@ import json import numpy as np import os +import shutil + +import time +from datetime import datetime def update_options(options, required_options, default_options): ''' Takes a dictionary of options along with a list of required options and dictionary of default options, and sets all keyval-pairs of options that is not already defined to the default values''' @@ -73,7 +77,7 @@ def floor(a, roundto=1): def write_log(message, options={}): - from datetime import datetime + required_options = ['logfile'] default_options = { @@ -111,4 +115,23 @@ def move_list_element_last(filenames,string): if string in file: del filenames[i] filenames.append(file) - return filenames \ No newline at end of file + return filenames + + + +def backup_file(filename, backup_dir): + # Creates backup-folder if it does not exist + if not os.path.isdir(backup_dir): + os.makedirs(backup_dir) + + + # Get a list of all previous backup files with the same basename as well as the creation time for the + prev_backup_files = [file for file in os.listdir(backup_dir) if os.path.basename(filename.split('.')[0]) in file] + creation_time = datetime.strptime(time.ctime(os.path.getctime(filename)), '%a %b %d %H:%M:%S %Y').strftime("%Y-%m-%d_%H-%M-%S") + ext = '.' + filename.split('.')[-1] + + dst_basename = creation_time + '_' + filename.split('.')[0] + '_' + f'{len(prev_backup_files)}'.zfill(4) + ext + dst = os.path.join(backup_dir, dst_basename) + + + shutil.copy(filename, dst) \ No newline at end of file From cf9499a988edf0afe967edeb6baf71524d828eb3 Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Mon, 11 Jul 2022 13:40:24 +0200 Subject: [PATCH 02/14] Initial commit of refinement submodule --- nafuma/xrd/__init__.py | 2 +- nafuma/xrd/refinement.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 nafuma/xrd/refinement.py diff --git a/nafuma/xrd/__init__.py b/nafuma/xrd/__init__.py index e0e052c..d89f20e 100644 --- a/nafuma/xrd/__init__.py +++ b/nafuma/xrd/__init__.py @@ -1 +1 @@ -from . import io, plot \ No newline at end of file +from . import io, plot, refinement \ No newline at end of file diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py new file mode 100644 index 0000000..64a98e8 --- /dev/null +++ b/nafuma/xrd/refinement.py @@ -0,0 +1,34 @@ +import os +import shutil +import time +import datetime + +import nafuma.auxillary as aux + +def make_big_inp(data: dict, options={}): + + required_options = ['output', 'overwrite', 'backup', 'backup_path'] + + default_options = { + 'output': 'big.inp', # Name of the output .INP file + 'overwrite': False, # Toggles overwrite on / off + 'backup': True, # Toggles backup on / off. Makes a backup of the file if it already exists. Only runs if overwrite is enabled. + 'backup_dir': 'backup' # Specifies the path where the backup files should be located + } + + options = aux.update_options(options=options, required_options=required_options, default_options=default_options) + + + # Raises exception if files exists and overwrite is not enabled + if not options['overwrite']: + if os.path.exists(options['output']): + raise Exception(f'Overwrite disabled and file already exists: {options["output"]}') + + + # Makes a backup of file + elif options['backup'] and os.path.exists(options['output']): + aux.backup_file(filename=options['output'], backup_dir=options['backup_dir']) + + + + \ No newline at end of file From 7ff917bdcd73f56d74286b1c65d44da807f8c61f Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Mon, 11 Jul 2022 17:38:13 +0200 Subject: [PATCH 03/14] Change timestamp to modified instead of creation --- nafuma/auxillary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nafuma/auxillary.py b/nafuma/auxillary.py index 9879a19..96b9ce2 100644 --- a/nafuma/auxillary.py +++ b/nafuma/auxillary.py @@ -127,7 +127,7 @@ def backup_file(filename, backup_dir): # Get a list of all previous backup files with the same basename as well as the creation time for the prev_backup_files = [file for file in os.listdir(backup_dir) if os.path.basename(filename.split('.')[0]) in file] - creation_time = datetime.strptime(time.ctime(os.path.getctime(filename)), '%a %b %d %H:%M:%S %Y').strftime("%Y-%m-%d_%H-%M-%S") + creation_time = datetime.strptime(time.ctime(os.path.getmtime(filename)), '%a %b %d %H:%M:%S %Y').strftime("%Y-%m-%d_%H-%M-%S") ext = '.' + filename.split('.')[-1] dst_basename = creation_time + '_' + filename.split('.')[0] + '_' + f'{len(prev_backup_files)}'.zfill(4) + ext From 71f3940c12677e56089acb64266aba4e204d8dff Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Mon, 11 Jul 2022 17:38:40 +0200 Subject: [PATCH 04/14] Add creation of .INP-file for multiple refinements --- nafuma/xrd/refinement.py | 115 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 111 insertions(+), 4 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index 64a98e8..8637a2c 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -1,24 +1,45 @@ import os import shutil import time +import re import datetime +import warnings import nafuma.auxillary as aux def make_big_inp(data: dict, options={}): + ''' Generates a big .INP-file with all filenames found in data["path"]. Uses a template .INP-file (which has to be generated manually from an initial refinement in TOPAS) and appends this to a large .INP-file + while changing the filenames. ''' - required_options = ['output', 'overwrite', 'backup', 'backup_path'] + required_options = ['template', 'output', 'overwrite', 'backup', 'backup_dir', 'include', 'topas_options', 'save_dir', 'log', 'logfile'] default_options = { - 'output': 'big.inp', # Name of the output .INP file + 'template': 'start.inp', # Name of the template .INP-file + 'output': 'big.inp', # Name of the output .INP-file 'overwrite': False, # Toggles overwrite on / off 'backup': True, # Toggles backup on / off. Makes a backup of the file if it already exists. Only runs if overwrite is enabled. - 'backup_dir': 'backup' # Specifies the path where the backup files should be located + 'backup_dir': 'backup', # Specifies the path where the backup files should be located + 'include': [], + 'topas_options': { + 'bootstrap_errors': None, + 'A_matrix_memory_allowed_in_Mbytes': None, + 'approximate_A': False, + 'conserve_memory': False, + 'do_errors': False, + 'continue_after_convergence': False, + 'num_runs': None, + }, + 'save_dir': 'results', + 'log': False, + 'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_generate_big_inp.log', } options = aux.update_options(options=options, required_options=required_options, default_options=default_options) + if not os.path.exists(options['template']): + raise Exception(f'Template file not found: {options["template"]}') + # Raises exception if files exists and overwrite is not enabled if not options['overwrite']: if os.path.exists(options['output']): @@ -27,8 +48,94 @@ def make_big_inp(data: dict, options={}): # Makes a backup of file elif options['backup'] and os.path.exists(options['output']): + + if options['log']: + aux.write_log(message=f'File {options["output"]} already exists. Creating backup in {options["backup_dir"]}.') + aux.backup_file(filename=options['output'], backup_dir=options['backup_dir']) - \ No newline at end of file + runlist = os.path.join(os.path.dirname(options['output']), 'runlist.txt') + options['include'].append(runlist) + + with open(options['template'], 'r') as template, open(options['output'], 'w', newline='\n') as output, open(runlist, 'w', newline='\n') as runlist: + + write_headers(output, options) + template = template.read() + + for i, path in enumerate(data['path']): + + s = change_labels_and_paths(template, path, i, options) + output.write(s) + + runlist.write('#define \tUSE_'+f'{i}'.zfill(4) + '\n') + + + +def write_headers(fout, options): + + for file in options['include']: + fout.write(f'#include {file} \n') + + fout.write('\n') + + for option, value in options['topas_options'].items(): + if value: + fout.write(f'{option} {value} \n') + + + +def change_labels_and_paths(template, path, i, options): + + temp_xdd = find_xdd_in_inp(options['template']) + + # Replace diffractogram-path + s = template.replace(temp_xdd, path).replace('XXXX', f'{i}'.zfill(4)) + + basename = os.path.basename(path).split(".")[0] + + # Define regular expressions for output lines + regs = [r'Out_Riet\([\S]*\)', + r'Out_CIF_STR\([\S]*\)', + r'Out_CIF_ADPs\([\S]*\)', + r'Out_CIF_Bonds_Angles\([\S]*\)', + r'Out_FCF\([\S]*\)', + r'Create_hklm_d_Th2_Ip_file\([\S]*\)', + r'out(.*?)append'] + + # Define substitute strings for output lines + subs = [f'Out_Riet({options["save_dir"]}/{basename}_riet.xy)', + f'Out_CIF_STR({options["save_dir"]}/{basename}.cif))', + f'Out_CIF_ADPs({options["save_dir"]}/{basename}.cif))', + f'Out_CIF_Bonds_Angles({options["save_dir"]}/{basename}.cif))', + f'Out_FCF({options["save_dir"]}/{basename}.fcf)', + f'Create_hklm_d_Th2_Ip_file({options["save_dir"]}/{basename}_hkl.dat)', + f'out \t {options["save_dir"]}/{basename}_refined_params.dat'] + + # Substitute strings in output lines + for reg, sub in zip(regs, subs): + s = re.sub(reg, sub, s) + + + return s + + + +def find_xdd_in_inp(path): + ''' Finds the path to the .xy / .xye scan in a given .INP-file. Assumes only one occurence of xdd and will return the last one no matter what, but outputs a UserWarning if more than one xdd is found.''' + + with open(path, 'r') as f: + lines = f.readlines() + + + xdds = 0 + for line in lines: + if 'xdd' in line: + xdd = line.split()[-1].strip('"') + xdds += 1 + + if xdds > 1: + warnings.warn(f'More than one path was found in {path}. Returning last occurence - please make sure this is what you want!') + + return xdd From d32b40409c9387e8c2193202dfe397009a02c334 Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Tue, 12 Jul 2022 17:51:08 +0200 Subject: [PATCH 05/14] Add intial version of refinement function --- nafuma/xrd/refinement.py | 176 ++++++++++++++++++++++++++++++++++----- nafuma/xrd/topas.conf | 1 + 2 files changed, 156 insertions(+), 21 deletions(-) create mode 100644 nafuma/xrd/topas.conf diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index 8637a2c..6a8fb1e 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -1,17 +1,22 @@ +from email.policy import default import os import shutil -import time +import subprocess import re + +import time import datetime + import warnings import nafuma.auxillary as aux + def make_big_inp(data: dict, options={}): ''' Generates a big .INP-file with all filenames found in data["path"]. Uses a template .INP-file (which has to be generated manually from an initial refinement in TOPAS) and appends this to a large .INP-file while changing the filenames. ''' - required_options = ['template', 'output', 'overwrite', 'backup', 'backup_dir', 'include', 'topas_options', 'save_dir', 'log', 'logfile'] + required_options = ['template', 'output', 'overwrite', 'backup', 'backup_dir', 'include', 'topas_options', 'save_results', 'save_dir', 'log', 'logfile'] default_options = { 'template': 'start.inp', # Name of the template .INP-file @@ -20,7 +25,19 @@ def make_big_inp(data: dict, options={}): 'backup': True, # Toggles backup on / off. Makes a backup of the file if it already exists. Only runs if overwrite is enabled. 'backup_dir': 'backup', # Specifies the path where the backup files should be located 'include': [], - 'topas_options': { + 'topas_options': None, + 'save_results': True, + 'save_dir': 'results', + 'log': False, + 'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_generate_big_inp.log', + } + + options = aux.update_options(options=options, required_options=required_options, default_options=default_options) + + required_topas_options = ['iters', 'chi2_convergence_criteria', 'bootstrap_errors', 'A_matrix_memory_allowed_in_Mbytes', 'approximate_A', 'conserve_memory', 'do_errors', 'continue_after_convergence', 'num_runs'] + default_topas_options = { + 'iters': 100000, + 'chi2_convergence_criteria': 0.001, 'bootstrap_errors': None, 'A_matrix_memory_allowed_in_Mbytes': None, 'approximate_A': False, @@ -28,13 +45,9 @@ def make_big_inp(data: dict, options={}): 'do_errors': False, 'continue_after_convergence': False, 'num_runs': None, - }, - 'save_dir': 'results', - 'log': False, - 'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_generate_big_inp.log', - } + } - options = aux.update_options(options=options, required_options=required_options, default_options=default_options) + options['topas_options'] = aux.update_options(options=options['topas_options'], required_options=required_topas_options, default_options=default_topas_options) if not os.path.exists(options['template']): @@ -55,45 +68,101 @@ def make_big_inp(data: dict, options={}): aux.backup_file(filename=options['output'], backup_dir=options['backup_dir']) - runlist = os.path.join(os.path.dirname(options['output']), 'runlist.txt') options['include'].append(runlist) with open(options['template'], 'r') as template, open(options['output'], 'w', newline='\n') as output, open(runlist, 'w', newline='\n') as runlist: - + write_headers(output, options) + template = template.read() for i, path in enumerate(data['path']): - s = change_labels_and_paths(template, path, i, options) + s = make_inp_entry(template=template, xdd=path, num=i, options=options) output.write(s) + output.write('\n\n') runlist.write('#define \tUSE_'+f'{i}'.zfill(4) + '\n') def write_headers(fout, options): + # FIXME Could modify this to make sure that certain combinations of options is not written, such as both do_errors and bootstrap_errors. for file in options['include']: fout.write(f'#include {file} \n') fout.write('\n') + if options['save_results']: + fout.write('#define output \n') + + fout.write('\n') + for option, value in options['topas_options'].items(): if value: - fout.write(f'{option} {value} \n') + if isinstance(value, bool): + fout.write(f'{option} \n') + else: + fout.write(f'{option} {value} \n') +def get_headers(inp): -def change_labels_and_paths(template, path, i, options): + with open(inp, 'r') as inp: + headers = [] + + line = inp.readline() + + while not all(keyword in line for keyword in ['out', 'append']): + line = inp.readline() + + # Jump down to lines + line = inp.readline() + line = inp.readline() + + while not 'Out_String' in line: + + if line.split(): + header = line.split()[1] + if all(keyword in header for keyword in ['Get', '(', ')']): + header = header[4:-1] + + headers.append(header) + + line = inp.readline() + + + return headers + + +def get_paths(inp): + + paths = [] + + with open(inp, 'r') as inp: + lines = inp.readlines() + + for line in lines: + if all(keyword in line for keyword in ['out', 'append']): + paths.append(line.split()[1]) + + + return paths + +def make_inp_entry(template: str, xdd: str, num: int, options: dict) -> str: + ''' Takes a template and creates an entry with xdd as path to file and with number num.''' temp_xdd = find_xdd_in_inp(options['template']) + + + num_str = f'{num}'.zfill(4) # Replace diffractogram-path - s = template.replace(temp_xdd, path).replace('XXXX', f'{i}'.zfill(4)) + s = template.replace(temp_xdd, xdd).replace('XXXX', num_str) - basename = os.path.basename(path).split(".")[0] + basename = os.path.basename(xdd).split(".")[0] # Define regular expressions for output lines regs = [r'Out_Riet\([\S]*\)', @@ -106,23 +175,24 @@ def change_labels_and_paths(template, path, i, options): # Define substitute strings for output lines subs = [f'Out_Riet({options["save_dir"]}/{basename}_riet.xy)', - f'Out_CIF_STR({options["save_dir"]}/{basename}.cif))', - f'Out_CIF_ADPs({options["save_dir"]}/{basename}.cif))', - f'Out_CIF_Bonds_Angles({options["save_dir"]}/{basename}.cif))', + f'Out_CIF_STR({options["save_dir"]}/{basename}.cif)', + f'Out_CIF_ADPs({options["save_dir"]}/{basename}.cif)', + f'Out_CIF_Bonds_Angles({options["save_dir"]}/{basename}.cif)', f'Out_FCF({options["save_dir"]}/{basename}.fcf)', f'Create_hklm_d_Th2_Ip_file({options["save_dir"]}/{basename}_hkl.dat)', - f'out \t {options["save_dir"]}/{basename}_refined_params.dat'] + f'out \t {options["save_dir"]}/{basename}_refined_params.dat \t append'] # Substitute strings in output lines for reg, sub in zip(regs, subs): s = re.sub(reg, sub, s) + return s -def find_xdd_in_inp(path): +def find_xdd_in_inp(path: str) -> str: ''' Finds the path to the .xy / .xye scan in a given .INP-file. Assumes only one occurence of xdd and will return the last one no matter what, but outputs a UserWarning if more than one xdd is found.''' with open(path, 'r') as f: @@ -139,3 +209,67 @@ def find_xdd_in_inp(path): warnings.warn(f'More than one path was found in {path}. Returning last occurence - please make sure this is what you want!') return xdd + + +def refine(data: dict, options={}): + ''' Calls TOPAS from options['topas_path'], which should point to tc.exe in the TOPAS-folder. If not explicitly passed, will try to use what is in topas.conf.''' + + required_options = ['topas_path', 'topas_log', 'topas_logfile', 'log', 'logfile', 'overwrite'] + + default_options = { + 'topas_path': None, + 'topas_log': False, + 'topas_logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_topas.out', + 'log': False, + 'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}_generate_big_inp.log', + 'overwrite': False + } + + options = aux.update_options(options=options, required_options=required_options, default_options=default_options) + + if not options['topas_path']: + import nafuma.xrd as xrd + + # Open topas.conf in the nafuma/xrd + with open(os.path.join(os.path.dirname(xrd.__file__), 'topas.conf'), 'r') as f: + topas_base = f.read() + + options['topas_path'] = os.path.join(topas_base, 'tc.exe') + + + # Check to see if the executable exists + if not os.path.exists(options['topas_path']): + raise Exception('TOPAS executable not found! Please explicitly pass path to tc.exe directly in options["topas_path"] or change base folder in topas.conf to the correct one.') + + + # Create folders if they don't exist + paths, headers = get_paths(data['inp']), get_headers(data['inp']) + + for path in paths: + dirname = os.path.dirname(path) + + if dirname and not os.path.isdir(dirname): + os.makedirs(dirname) + + if not os.path.exists(path) or options['overwrite']: + with open(path, 'w') as results: + for header in headers: + results.write(header+'\t') + + results.write('\n') + else: + raise Exception(f'Results file already exists: {path}') + + + # Create shell command + command = ' '.join([options['topas_path'], data['inp']]) + + # Append output if logging is enabled + if options['topas_log']: + command = ' '.join([command, f'>{options["topas_logfile"]}']) + + if os.path.dirname(options['topas_logfile']) and not os.path.isdir(os.path.dirname(options['topas_logfile'])): + os.makedirs(os.path.dirname(options['topas_logfile'])) + + + subprocess.call(command, shell=True) \ No newline at end of file diff --git a/nafuma/xrd/topas.conf b/nafuma/xrd/topas.conf new file mode 100644 index 0000000..3c6612a --- /dev/null +++ b/nafuma/xrd/topas.conf @@ -0,0 +1 @@ +C:/TOPAS6/ \ No newline at end of file From 51e29284726cc9b4f8a1af079ee328d7a800e8ac Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Wed, 13 Jul 2022 17:26:29 +0200 Subject: [PATCH 06/14] Add cif-reader and inital INP-generator --- nafuma/xrd/refinement.py | 270 +++++++++++++++++++++++++++++++++- nafuma/xrd/snippets.json | 14 ++ nafuma/xrd/topas_default.json | 15 ++ 3 files changed, 297 insertions(+), 2 deletions(-) create mode 100644 nafuma/xrd/snippets.json create mode 100644 nafuma/xrd/topas_default.json diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index 6a8fb1e..94f20fc 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -6,12 +6,266 @@ import re import time import datetime - import warnings +import json + +from ase import io import nafuma.auxillary as aux + +def make_initial_inp(data: dict, options={}): + + required_options = ['filename', 'overwrite', 'include', 'save_results', 'topas_options', 'background', 'capillary', 'start', 'finish', 'exclude'] + + default_options = { + 'filename': 'start.inp', + 'overwrite': False, + 'save_results': False, + 'include': [], # Any files that should be included with #include + 'topas_options': {}, + 'background': 7, + 'capillary': False, + 'interval': [None, None], # Start and finish values that TOPAS should refine on. Overrides 'start' and 'finish' + 'start': None, # Start value only. Overridden by 'interval' if this is set. + 'finish': None, # Finish value only. Overridden by 'interval' if this is set. + 'exlude': [] # Excluded regions. List of lists. + } + + options = aux.update_options(options=options, required_options=required_options, default_options=default_options) + options['topas_options'] = update_topas_options(options=options) + + + if not os.path.exists(options['filename']) or options['overwrite']: + with open(options['filename'], 'w') as fout: + write_headers(fout=fout, options=options) + + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write('rw_p 0 r_exp 0 r_p 0 r_wp_dash 0 r_p_dash 0 r_exp_dash 0 weighted_Durbin_Watson 0 gof 0') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + + write_xdd(fout=fout, data=data, options=options) + + for str in data['str']: + fout.write('\n\n') + write_str(fout=fout, str=str, options=options) + + + +def write_xdd(fout, data, options): + import nafuma.xrd as xrd + basedir = os.path.dirname(xrd.__file__) + + with open (os.path.join(basedir, 'snippets.json'), 'r') as snip: + snippets = json.load(snip) + + topas_options = options['topas_options'] + + # Write initial parameters + fout.write(f'xdd "{data["xdd"]}"\n') + fout.write('\t'+snippets['calculation_step'].format(topas_options['convolution_step'])+'\n') + + # Write background + fout.write('\tbkg @ ') + for i in range(options['background']): + fout.write('0 ') + + # Write wavelength and LP-factor + fout.write('\n\n') + + fout.write('\t'+snippets['wavelength'].format(data['wavelength'])+'\n') + fout.write('\t'+snippets['lp_factor'].format(topas_options['lp_factor'])+'\n') + + if options['capillary']: + fout.write('\n') + for i, line in enumerate(snippets['capillary']): + if i == 0: + line.format(topas_options['packing_density']) + if i == 1: + line.format(topas_options['capdia']) + + fout.write('\t'+line+'\n') + + + fout.write('\n') + if options['interval'][0] or options['start']: + options['start'] = options['interval'][0] if options['interval'][0] else options['start'] + fout.write(f'\tstart_X {options["start"]}\n') + + if options['interval'][1] or options['finish']: + options['finish'] = options['interval'][1] if options['interval'][1] else options['finish'] + fout.write(f'\tfinish_X {options["finish"]}\n') + + if options['exclude']: + for exclude in options['exclude']: + fout.write(f'\texclude {exclude[0]} {exclude[1]}\n') + + + +def write_str(fout, str, options): + + atoms = read_cif(str) + print(atoms["atoms"]["Fe1"].keys()) + + a = float(atoms['_cell_length_a'].split('(')[0]) + b = float(atoms['_cell_length_b'].split('(')[0]) + c = float(atoms['_cell_length_c'].split('(')[0]) + alpha = float(atoms['_cell_angle_alpha'].split('(')[0]) + beta = float(atoms['_cell_angle_beta'].split('(')[0]) + gamma = float(atoms['_cell_angle_gamma'].split('(')[0]) + + + fout.write('#ifdef start_values\n') + fout.write(f'local lpa \t{a} \t;:\n') + fout.write(f'local lpb \t{b} \t;:\n') + fout.write(f'local lpc \t{c} \t;:\n') + fout.write(f'local lpal \t{alpha} \t;:\n') + fout.write(f'local lpbe \t{beta} \t;:\n') + fout.write(f'local lpga \t{gamma} \t;:\n\n') + + fout.write('#else\n') + fout.write(f'local lpa \t{a}\n') + fout.write(f'local lpb \t{b}\n') + fout.write(f'local lpc \t{c}\n') + fout.write(f'local lpal \t{alpha}\n') + fout.write(f'local lpbe \t{beta}\n') + fout.write(f'local lpga \t{gamma}\n') + fout.write('#endif\n\n') + + sites = list(atoms['atoms'].keys()) + + attrs = {'_atom_site_fract_x': 'x', '_atom_site_fract_y': 'y', '_atom_site_fract_z': 'z', '_atom_site_B_iso_or_equiv': 'beq'} + + for attr in attrs: + for site in sites: + if attr in atoms["atoms"][site].keys(): + value = atoms["atoms"][site][attr].split("(")[0] + value = value if value != '.' else 0. + + fout.write(f'local {attrs[attr]}_{site}\t\t =\t {value} \t ;= \n') + + + fout.write('\n') + + + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write(atoms['_chemical_name_common'] + f'({atoms["_space_group_name_H-M_alt"]})') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + + + fout.write('\tstr\n') + fout.write(f'\t\tphase_name "{atoms["_chemical_name_common"]} ({atoms["_space_group_name_H-M_alt"]})"\n') + fout.write(f'\t\tspace_group {atoms["_space_group_IT_number"]}\n') + fout.write('\t\ta = lpa ;\n') + fout.write('\t\tb = lpb ;\n') + fout.write('\t\tc = lpc ;\n') + fout.write('\t\tal = lpal ;\n') + fout.write('\t\tbe = lpbe ;\n') + fout.write('\t\tga = lpga ;\n') + fout.write('\n') + fout.write(f'\t\tcell_volume vol_XXXX {atoms["_cell_volume"]}\n') + + for atom in atoms['atoms'].keys(): + atom_label = atom if len(atom) == 3 else atom+' ' + + fout.write(f'\t\tsite {atom_label}\t x = x_{atom_label} ;\t y = y_{atom_label} ;\t z = z_{atom_label} ; occ {atoms["atoms"][atom]["_atom_site_occupancy"]}\t beq = beq_{atom_label} \t ;\n') + + + fout.write('\n') + fout.write('\t\tscale @ 0.0') + fout.write('\t\t r_bragg 1.0') + + + + + +def read_cif(path): + + data = {'atoms': {}} # Initialise dictionary + read = True # Initialise read toggle + + # Lists attributes to get out of the .CIF-file. This will correspond to what VESTA writes out, not necessarily what you will find in ICSD + attrs = ['_chemical_name_common', '_cell', '_space_group_name_H-M_alt', '_space_group_IT_number'] + + # Open file + with open(path, 'r') as cif: + line = cif.readline() + + # Read until encountering #End + while read: + + # Break loop if #End is reached + if not line or line.startswith('#End'): + read = False + break + + # Handle loops + if line.lstrip().startswith('loop_'): + loop = [] + line = cif.readline() + + # Only handle loops with attributes that starts with _atom. Other loops are incompatible with below code due to slight differences in formatting (e.g. lineshifts) + while line.lstrip().startswith('_atom'): + loop.append(line) # add the attributes of the loop to a list (every keywords starting with _atom) + line = cif.readline() # Read next line + + + # If there were any attributes that started with _atom - if this is empty is just means there were other attributes in this loop + if loop: + # Read every line after the attribute listing has ended - until it encounters a new attribute, loop or #End tag + # FIXME WHat a horrible condition statement - need to fix this! + while line and not line.lstrip().startswith('_') and not line.lstrip().startswith('loop_') and not line.lstrip().startswith('#End') and not line=='\n': + # Initialise empty dictionary for a given atom if it has not already been created in another loop + if line.split()[0] not in data['atoms'].keys(): + data["atoms"][line.split()[0]] = {} + + # Add all the attribute / value pairs for the current loop + for i, attr in enumerate(loop): + data["atoms"][line.split()[0]][attr[:-1].lstrip()] = line.split()[i] + + # Read new line + line = cif.readline() + + # If loop list was empty, keep going to the next line + else: + line = cif.readline() + + + + # Handle free-standing attributes + elif any([line.lstrip().startswith(i) for i in attrs]): + #line.startswith() or line.startswith('_symmetry_space_group_name') or line.startswith('_symmetry_Int_Tables'): + # + # + attr, *value = line.split() + + value = ' '.join([str(i) for i in value]) + + data[attr] = value + line = cif.readline() + + else: + line = cif.readline() + + + + return data + + + + +def update_topas_options(options): + import nafuma.xrd as xrd + basedir = os.path.dirname(xrd.__file__) + + with open(os.path.join(basedir, 'topas_default.json'), 'r') as default: + topas_defaults = json.load(default) + + options['topas_options'] = aux.update_options(options=options['topas_options'], required_options=topas_defaults.keys(), default_options=topas_defaults) + + return options['topas_options'] + def make_big_inp(data: dict, options={}): ''' Generates a big .INP-file with all filenames found in data["path"]. Uses a template .INP-file (which has to be generated manually from an initial refinement in TOPAS) and appends this to a large .INP-file while changing the filenames. ''' @@ -90,6 +344,18 @@ def make_big_inp(data: dict, options={}): def write_headers(fout, options): # FIXME Could modify this to make sure that certain combinations of options is not written, such as both do_errors and bootstrap_errors. + headers = [ + "A_matrix_memory_allowed_in_Mbytes", + "approximate_A", + "bootstrap_errors", + "conserve_memory", + "continue_after_convergence", + "do_errors", + "chi2_convergence_criteria", + "iters", + "num_runs" ] + + for file in options['include']: fout.write(f'#include {file} \n') @@ -101,7 +367,7 @@ def write_headers(fout, options): fout.write('\n') for option, value in options['topas_options'].items(): - if value: + if value and option in headers: if isinstance(value, bool): fout.write(f'{option} \n') else: diff --git a/nafuma/xrd/snippets.json b/nafuma/xrd/snippets.json new file mode 100644 index 0000000..8636a1d --- /dev/null +++ b/nafuma/xrd/snippets.json @@ -0,0 +1,14 @@ +{ + "calculation_step": "x_calculation_step = Yobs_dx_at(Xo); convolution_step {}", + "capillary": [ + "local !packing_density {} min 0.1 max 1.0 'typically 0.2 to 0.5", + "local !capdia {} 'capillary diameter in mm", + "local !linab = Get(mixture_MAC) Get(mixture_density_g_on_cm3);: 'in cm-1", + "local muR = (capdia/20)*linab*packing_density;", + "Cylindrical_I_Correction(muR)" + ], + + "lp_factor": "LP_Factor({}) 'change the LP correction or lh value if required", + "wavelength": "lam ymin_on_ymax 0.0001 la 1.0 lo {} lh 0.1", + "zero_error": "Zero_Error(zero, 0)" +} \ No newline at end of file diff --git a/nafuma/xrd/topas_default.json b/nafuma/xrd/topas_default.json new file mode 100644 index 0000000..4ab6a81 --- /dev/null +++ b/nafuma/xrd/topas_default.json @@ -0,0 +1,15 @@ +{ + "A_matrix_memory_allowed_in_Mbytes": null, + "approximate_A": false, + "bootstrap_errors": null, + "capdia": 0.5, + "chi2_convergence_criteria": 0.001, + "conserve_memory": false, + "continue_after_convergence": false, + "convolution_step": 4, + "do_errors": false, + "iters": 100000, + "lp_factor": 90, + "num_runs": null, + "packing_density": 0.5 +} \ No newline at end of file From ace44cb48f2743fd9ae3778c498d539d61705f8a Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Thu, 14 Jul 2022 19:15:39 +0200 Subject: [PATCH 07/14] Add initial INP-generation --- nafuma/xrd/refinement.py | 291 +++++++++++++++++++++++++++++----- nafuma/xrd/snippets.json | 2 +- nafuma/xrd/topas_default.json | 2 +- 3 files changed, 251 insertions(+), 44 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index 94f20fc..d250172 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -17,12 +17,13 @@ import nafuma.auxillary as aux def make_initial_inp(data: dict, options={}): - required_options = ['filename', 'overwrite', 'include', 'save_results', 'topas_options', 'background', 'capillary', 'start', 'finish', 'exclude'] + required_options = ['filename', 'overwrite', 'include', 'save_results', 'save_dir', 'topas_options', 'background', 'capillary', 'start', 'finish', 'exclude'] default_options = { 'filename': 'start.inp', 'overwrite': False, 'save_results': False, + 'save_dir': 'results/', 'include': [], # Any files that should be included with #include 'topas_options': {}, 'background': 7, @@ -41,15 +42,34 @@ def make_initial_inp(data: dict, options={}): with open(options['filename'], 'w') as fout: write_headers(fout=fout, options=options) + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') - fout.write('rw_p 0 r_exp 0 r_p 0 r_wp_dash 0 r_p_dash 0 r_exp_dash 0 weighted_Durbin_Watson 0 gof 0') + fout.write('\'START OF INP - XXXX') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write('r_wp 0 r_exp 0 r_p 0 r_wp_dash 0 r_p_dash 0 r_exp_dash 0 weighted_Durbin_Watson 0 gof 0') fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') write_xdd(fout=fout, data=data, options=options) - for str in data['str']: + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write('\'PARAMETER DEFINITONS') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + + for i, str in enumerate(data['str']): fout.write('\n\n') - write_str(fout=fout, str=str, options=options) + write_params(fout=fout, data=data, options=options, index=i) + + + + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write('\'STR DEFINITIONS') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + + for i, str in enumerate(data['str']): + fout.write('\n\n') + write_str(fout=fout, data=data, options=options, index=i) + @@ -66,6 +86,19 @@ def write_xdd(fout, data, options): fout.write(f'xdd "{data["xdd"]}"\n') fout.write('\t'+snippets['calculation_step'].format(topas_options['convolution_step'])+'\n') + for include in options['include']: + if 'peak_width' in include: + with open(include, 'r') as f: + lines = f.readlines() + peak_width_params = [] + for line in lines: + param = line.split()[1][1:] + peak_width_params.append(param) + + + + fout.write('\t'+snippets['gauss_fwhm'].format(peak_width_params[0], peak_width_params[1], peak_width_params[2])+'\n') + # Write background fout.write('\tbkg @ ') for i in range(options['background']): @@ -76,14 +109,15 @@ def write_xdd(fout, data, options): fout.write('\t'+snippets['wavelength'].format(data['wavelength'])+'\n') fout.write('\t'+snippets['lp_factor'].format(topas_options['lp_factor'])+'\n') + fout.write('\t'+snippets['zero_error']+'\n') if options['capillary']: fout.write('\n') for i, line in enumerate(snippets['capillary']): if i == 0: - line.format(topas_options['packing_density']) + line = line.format(topas_options['packing_density']) if i == 1: - line.format(topas_options['capdia']) + line = line.format(topas_options['capdia']) fout.write('\t'+line+'\n') @@ -103,10 +137,19 @@ def write_xdd(fout, data, options): -def write_str(fout, str, options): - - atoms = read_cif(str) - print(atoms["atoms"]["Fe1"].keys()) + +def write_params(fout, data, options, index=0): + + atoms = read_cif(data['str'][index]) + if 'labels' in data.keys(): + label = data['labels'][index] + else: + label = index + + + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') + fout.write(atoms['_chemical_name_common'] + f'({atoms["_space_group_name_H-M_alt"]}) - Parameters') + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') a = float(atoms['_cell_length_a'].split('(')[0]) b = float(atoms['_cell_length_b'].split('(')[0]) @@ -116,67 +159,231 @@ def write_str(fout, str, options): gamma = float(atoms['_cell_angle_gamma'].split('(')[0]) - fout.write('#ifdef start_values\n') - fout.write(f'local lpa \t{a} \t;:\n') - fout.write(f'local lpb \t{b} \t;:\n') - fout.write(f'local lpc \t{c} \t;:\n') - fout.write(f'local lpal \t{alpha} \t;:\n') - fout.write(f'local lpbe \t{beta} \t;:\n') - fout.write(f'local lpga \t{gamma} \t;:\n\n') + # WRITE LATTICE PARAMETERS + # If start_values is defined: + fout.write(f'#ifdef start_values_{label}\n') + lpa = f'local lpa_{label} {a} ;: {a}' + lpb = f'local lpb_{label} {b} ;: {b}' + lpc = f'local lpc_{label} {c} ;: {c}' + fout.write('{: <55} {: <55} {: <55}\n'.format(lpa, lpb, lpc)) + lpal = f'local lpal_{label} {alpha} ;: {alpha}' + lpbe = f'local lpbe_{label} {beta} ;: {beta}' + lpga = f'local lpga_{label} {gamma} ;: {gamma}' + fout.write('{: <55} {: <55} {: <55}\n'.format(lpal, lpbe, lpga)) + # Otherwise + fout.write('\n') fout.write('#else\n') - fout.write(f'local lpa \t{a}\n') - fout.write(f'local lpb \t{b}\n') - fout.write(f'local lpc \t{c}\n') - fout.write(f'local lpal \t{alpha}\n') - fout.write(f'local lpbe \t{beta}\n') - fout.write(f'local lpga \t{gamma}\n') + lpa = f'local lpa_{label} {a}' + lpb = f'local lpb_{label} {b}' + lpc = f'local lpc_{label} {c}' + fout.write('{: <55} {: <55} {: <55}\n'.format(lpa, lpb, lpc)) + lpal = f'local lpal_{label} {alpha}' + lpbe = f'local lpbe_{label} {beta}' + lpga = f'local lpga_{label} {gamma}' + fout.write('{: <55} {: <55} {: <55}\n'.format(lpal, lpbe, lpga)) fout.write('#endif\n\n') sites = list(atoms['atoms'].keys()) - attrs = {'_atom_site_fract_x': 'x', '_atom_site_fract_y': 'y', '_atom_site_fract_z': 'z', '_atom_site_B_iso_or_equiv': 'beq'} + attrs = { + '_atom_site_fract_x': 'x', + '_atom_site_fract_y': 'y', + '_atom_site_fract_z': 'z', + '_atom_site_occupancy': '!occ', + '_atom_site_B_iso_or_equiv': '!beq' + } - for attr in attrs: - for site in sites: + + # WRITE SITE PARAMETERS + for site in sites: + + params = [] + for attr in attrs: if attr in atoms["atoms"][site].keys(): value = atoms["atoms"][site][attr].split("(")[0] value = value if value != '.' else 0. - fout.write(f'local {attrs[attr]}_{site}\t\t =\t {value} \t ;= \n') + params.append('{: <20} {: <20}'.format(f'local {attrs[attr]}_{site}_{label}', f' = {value} ;: {value}')) + #fout.write(f'local {attrs[attr]}_{site}_{label}\t\t =\t {value} \t ;= \t\t\t\t\t') + + fout.write('{: <55} {: <55} {: <55} {: <55} {: <55}\n'.format(*params)) + + fout.write('\n') + + fout.write('{: <55} {: <55} {: <55} {: <55}\n'.format( + f'local csgc_{label}_XXXX = 200 ;: 200', + f'local cslc_{label}_XXXX = 200 ;: 200', + f'local sgc_{label}_XXXX = 0 ;: 0', + f'local slc_{label}_XXXX = 0 ;: 0', + )) - fout.write('\n') + # fout.write(f'local csgc_{label} ;:\t\t\t\t\t') + # fout.write(f'local cslc_{label} ;:\t\t\t\t\t') + # fout.write(f'local sgc_{label} ;:\t\t\t\t\t') + # fout.write(f'local slc_{label} ;:\n') + + +def write_str(fout, data, options, index=0): + + atoms = read_cif(data['str'][index]) + if 'labels' in data.keys(): + label = data['labels'][index] + else: + label = index + + fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') - fout.write(atoms['_chemical_name_common'] + f'({atoms["_space_group_name_H-M_alt"]})') + fout.write('\'' + atoms['_chemical_name_common'].strip('\'') + ' ' + f'({atoms["_space_group_name_H-M_alt"]})'.replace("\'", "")) fout.write('\n\'------------------------------------------------------------------------------------------------------------------------------------------\n') fout.write('\tstr\n') - fout.write(f'\t\tphase_name "{atoms["_chemical_name_common"]} ({atoms["_space_group_name_H-M_alt"]})"\n') + fout.write(f'\t\tphase_name "{atoms["_chemical_name_common"]} ({atoms["_space_group_name_H-M_alt"]})"\n'.replace('\'', '')) fout.write(f'\t\tspace_group {atoms["_space_group_IT_number"]}\n') - fout.write('\t\ta = lpa ;\n') - fout.write('\t\tb = lpb ;\n') - fout.write('\t\tc = lpc ;\n') - fout.write('\t\tal = lpal ;\n') - fout.write('\t\tbe = lpbe ;\n') - fout.write('\t\tga = lpga ;\n') + fout.write(f'\t\ta = lpa_{label} ;\n') + fout.write(f'\t\tb = lpb_{label} ;\n') + fout.write(f'\t\tc = lpc_{label} ;\n') + fout.write(f'\t\tal = lpal_{label} ;\n') + fout.write(f'\t\tbe = lpbe_{label} ;\n') + fout.write(f'\t\tga = lpga_{label} ;\n') fout.write('\n') - fout.write(f'\t\tcell_volume vol_XXXX {atoms["_cell_volume"]}\n') + fout.write(f'\t\tcell_volume\t vol_{label}_XXXX {atoms["_cell_volume"]}\n') + fout.write(f'\t\tcell_mass\t mass_{label}_XXXX 1\n') + fout.write(f'\t\tweight_percent\t wp_{label}_XXXX 100\n\n') + fout.write('\n') + fout.write('\t\tscale @ 1.0\n') + fout.write('\t\tr_bragg 1.0\n\n') + + fout.write(f'\t\t#ifdef crystallite_size_gaussian_{label}\n') + fout.write(f'\t\tCS_G(csgc_{label}_XXXX)\n') + fout.write('\t\t#endif\n') + fout.write(f'\t\t#ifdef crystallite_size_lorentzian_{label}\n') + fout.write(f'\t\tCS_L(cslc_{label}_XXXX)\n') + fout.write('\t\t#endif\n\n') + + fout.write(f'\t\t#ifdef strain_gaussian_{label}\n') + fout.write(f'\t\tStrain_G(sgc_{label}_XXXX)\n') + fout.write('\t\t#endif\n') + fout.write(f'\t\t#ifdef strain_lorentzian_{label}\n') + fout.write(f'\t\tStrain_L(slc_{label}_XXXX)\n') + fout.write('\t\t#endif\n\n') + + + + for atom in atoms['atoms'].keys(): - atom_label = atom if len(atom) == 3 else atom+' ' - fout.write(f'\t\tsite {atom_label}\t x = x_{atom_label} ;\t y = y_{atom_label} ;\t z = z_{atom_label} ; occ {atoms["atoms"][atom]["_atom_site_occupancy"]}\t beq = beq_{atom_label} \t ;\n') + specie = '{}{}'.format( + atoms["atoms"][atom]["_atom_site_type_symbol"], + data['oxidation_states'][index][atoms["atoms"][atom]["_atom_site_type_symbol"]] + ) + site = f'site {atom}' + x = f'\t\tx = x_{atom}_{label}' + y = f'\t\ty = y_{atom}_{label}' + z = f'\t\tz = z_{atom}_{label}' + occ = '\t\tocc {: <4} = occ_{}_{}'.format(specie, atom, label) + #occ = f'\t\tocc {atoms["atoms"][atom]["_atom_site_type_symbol"]} occ_{atom}_{label}' + beq = f'\t\tbeq = beq_{atom}_{label}' + + # FIXME Fix alignment here at some point + #fout.write(f'\t\tsite {atom}\t x = x_{atom}_{label} ;\t y = y_{atom}_{label} ;\t z = z_{atom}_{label} ;\t occ {atoms["atoms"][atom]["_atom_site_type_symbol"]} occ_{atom}_{label} \t beq = beq_{atom}_{label} \t ;\n') + fout.write('\t\t{: <9} {: <30}; {: <30}; {: <30}; {: <30}; {: <30};'.format(site, x, y, z, occ, beq)) + fout.write('\n') + + + + if options['save_results']: + fout.write('\n\n') + write_output(fout=fout, data=data, options=options, index=index) + + + +def write_output(fout, data, options, index=0): + + + filename = os.path.basename(data['xdd']).split('.')[0] + + atoms = read_cif(data['str'][index]) + + if 'labels' in data.keys(): + label = data['labels'][index] + else: + label = index + + + fout.write('#ifdef output\n') + fout.write(f'\t\tOut_Riet({options["save_dir"]}/{label}_XXXX_riet.xy)\n') + fout.write(f'\t\tOut_CIF_STR({options["save_dir"]}/{label}_XXXX.cif)\n') + fout.write(f'\t\tOut_CIF_ADPs({options["save_dir"]}/{label}_XXXX.cif)\n') + fout.write(f'\t\tOut_CIF_Bonds_Angles({options["save_dir"]}/{label}_XXXX.cif)\n') + fout.write(f'\t\tCreate_hklm_d_Th2_Ip_file({options["save_dir"]}/{label}_XXXX_hkl.dat)\n') fout.write('\n') - fout.write('\t\tscale @ 0.0') - fout.write('\t\t r_bragg 1.0') - + fout.write(f'out {options["save_dir"]}/{filename}_{label}.dat append\n') + fout.write(f'\t\tOut_String("XXXX")\n') + + fout.write('\t\t{: <40} {: <40} {: <40} {: <40} {: <40} {: <40}'.format( + f'Out(Get(r_wp), "%11.5f")', + f'Out(Get(r_exp), "%11.5f")', + f'Out(Get(r_p), "%11.5f")', + f'Out(Get(r_p_dash), "%11.5f")', + f'Out(Get(r_exp_dash), "%11.5f")', + f'Out(Get(gof), "%11.5f")', + ) + ) + + fout.write('\n') + + + fout.write('\t\t{: <40} {: <40} {: <40}'.format( + f'Out(vol_{label}_XXXX, "%11.5f")', + f'Out(mass_{label}_XXXX, "%11.5f")', + f'Out(wp_{label}_XXXX, "%11.5f")', + + ) + ) + + + fout.write('\n') + + + fout.write('\t\t{: <40} {: <40} {: <40} {: <40} {: <40} {: <40}'.format( + f'Out(lpa_{label}, "%11.5f")', + f'Out(lpb_{label}, "%11.5f")', + f'Out(lpc_{label}, "%11.5f")', + f'Out(lpal_{label}, "%11.5f")', + f'Out(lpbe_{label}, "%11.5f")', + f'Out(lpga_{label}, "%11.5f")', + + ) + ) + + fout.write('\n\n') + + for atom in atoms['atoms']: + fout.write('\t\t{: <40} {: <40} {: <40} {: <40} {: <40}'.format( + f'Out(x_{atom}_{label}, "%11.5f")', + f'Out(y_{atom}_{label}, "%11.5f")', + f'Out(z_{atom}_{label}, "%11.5f")', + f'Out(occ_{atom}_{label}, "%11.5f")', + f'Out(beq_{atom}_{label}, "%11.5f")', + ) + ) + + fout.write('\n') + + fout.write('\n\n') + + + + fout.write('#endif') diff --git a/nafuma/xrd/snippets.json b/nafuma/xrd/snippets.json index 8636a1d..5b0f97d 100644 --- a/nafuma/xrd/snippets.json +++ b/nafuma/xrd/snippets.json @@ -7,7 +7,7 @@ "local muR = (capdia/20)*linab*packing_density;", "Cylindrical_I_Correction(muR)" ], - + "gauss_fwhm": "gauss_fwhm = Sqrt({} Cos(2 * Th)^4 + {} Cos(2 * Th)^2 + {});", "lp_factor": "LP_Factor({}) 'change the LP correction or lh value if required", "wavelength": "lam ymin_on_ymax 0.0001 la 1.0 lo {} lh 0.1", "zero_error": "Zero_Error(zero, 0)" diff --git a/nafuma/xrd/topas_default.json b/nafuma/xrd/topas_default.json index 4ab6a81..c92e8a4 100644 --- a/nafuma/xrd/topas_default.json +++ b/nafuma/xrd/topas_default.json @@ -6,7 +6,7 @@ "chi2_convergence_criteria": 0.001, "conserve_memory": false, "continue_after_convergence": false, - "convolution_step": 4, + "convolution_step": 1, "do_errors": false, "iters": 100000, "lp_factor": 90, From 92075fbb66f3a48fa4f6e05e3f1563bd3dde83cb Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Thu, 14 Jul 2022 19:47:36 +0200 Subject: [PATCH 08/14] Fix filenames in make_big_inp and add fixmes --- nafuma/xrd/refinement.py | 67 ++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index d250172..57bfa72 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -9,8 +9,6 @@ import datetime import warnings import json -from ase import io - import nafuma.auxillary as aux @@ -329,6 +327,8 @@ def write_output(fout, data, options, index=0): fout.write(f'out {options["save_dir"]}/{filename}_{label}.dat append\n') fout.write(f'\t\tOut_String("XXXX")\n') + + # FIXME Does not write out weighted_Durbin_Watson, TOPAS complained about this fout.write('\t\t{: <40} {: <40} {: <40} {: <40} {: <40} {: <40}'.format( f'Out(Get(r_wp), "%11.5f")', f'Out(Get(r_exp), "%11.5f")', @@ -477,6 +477,9 @@ def make_big_inp(data: dict, options={}): ''' Generates a big .INP-file with all filenames found in data["path"]. Uses a template .INP-file (which has to be generated manually from an initial refinement in TOPAS) and appends this to a large .INP-file while changing the filenames. ''' + + # FIXME Strip headers from initial INP file before copying it. + required_options = ['template', 'output', 'overwrite', 'backup', 'backup_dir', 'include', 'topas_options', 'save_results', 'save_dir', 'log', 'logfile'] default_options = { @@ -635,29 +638,29 @@ def make_inp_entry(template: str, xdd: str, num: int, options: dict) -> str: # Replace diffractogram-path s = template.replace(temp_xdd, xdd).replace('XXXX', num_str) - basename = os.path.basename(xdd).split(".")[0] + # basename = os.path.basename(xdd).split(".")[0] - # Define regular expressions for output lines - regs = [r'Out_Riet\([\S]*\)', - r'Out_CIF_STR\([\S]*\)', - r'Out_CIF_ADPs\([\S]*\)', - r'Out_CIF_Bonds_Angles\([\S]*\)', - r'Out_FCF\([\S]*\)', - r'Create_hklm_d_Th2_Ip_file\([\S]*\)', - r'out(.*?)append'] + # # Define regular expressions for output lines + # regs = [r'Out_Riet\([\S]*\)', + # r'Out_CIF_STR\([\S]*\)', + # r'Out_CIF_ADPs\([\S]*\)', + # r'Out_CIF_Bonds_Angles\([\S]*\)', + # r'Out_FCF\([\S]*\)', + # r'Create_hklm_d_Th2_Ip_file\([\S]*\)', + # r'out(.*?)append'] - # Define substitute strings for output lines - subs = [f'Out_Riet({options["save_dir"]}/{basename}_riet.xy)', - f'Out_CIF_STR({options["save_dir"]}/{basename}.cif)', - f'Out_CIF_ADPs({options["save_dir"]}/{basename}.cif)', - f'Out_CIF_Bonds_Angles({options["save_dir"]}/{basename}.cif)', - f'Out_FCF({options["save_dir"]}/{basename}.fcf)', - f'Create_hklm_d_Th2_Ip_file({options["save_dir"]}/{basename}_hkl.dat)', - f'out \t {options["save_dir"]}/{basename}_refined_params.dat \t append'] + # # Define substitute strings for output lines + # subs = [f'Out_Riet({options["save_dir"]}/{basename}_riet.xy)', + # f'Out_CIF_STR({options["save_dir"]}/{basename}.cif)', + # f'Out_CIF_ADPs({options["save_dir"]}/{basename}.cif)', + # f'Out_CIF_Bonds_Angles({options["save_dir"]}/{basename}.cif)', + # f'Out_FCF({options["save_dir"]}/{basename}.fcf)', + # f'Create_hklm_d_Th2_Ip_file({options["save_dir"]}/{basename}_hkl.dat)', + # f'out \t {options["save_dir"]}/{basename}_refined_params.dat \t append'] - # Substitute strings in output lines - for reg, sub in zip(regs, subs): - s = re.sub(reg, sub, s) + # # Substitute strings in output lines + # for reg, sub in zip(regs, subs): + # s = re.sub(reg, sub, s) @@ -716,7 +719,12 @@ def refine(data: dict, options={}): # Create folders if they don't exist - paths, headers = get_paths(data['inp']), get_headers(data['inp']) + + # FIXME Since the big INP files now have the same filename for all iterations, we need to adjust the code to only get unique values from the get_paths function + # FIXME get_headers() is also not working now. Needs to be adjusted to the new way of writing the Out-parameters + paths = get_paths(data['inp']) + headers = get_headers(data['inp']) + for path in paths: dirname = os.path.dirname(path) @@ -745,4 +753,15 @@ def refine(data: dict, options={}): os.makedirs(os.path.dirname(options['topas_logfile'])) - subprocess.call(command, shell=True) \ No newline at end of file + subprocess.call(command, shell=True) + + + + + +def read_results(): + # FIXME Write the function + + return None + + From 4e579bd07b1cf3013f4af88094ab5a6f6d022274 Mon Sep 17 00:00:00 2001 From: halvorhv Date: Thu, 14 Jul 2022 21:44:36 +0200 Subject: [PATCH 09/14] dirty fix to enable read cif w/o cell_volume --- nafuma/xrd/refinement.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index 57bfa72..b94e1fb 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -249,7 +249,11 @@ def write_str(fout, data, options, index=0): fout.write(f'\t\tbe = lpbe_{label} ;\n') fout.write(f'\t\tga = lpga_{label} ;\n') fout.write('\n') - fout.write(f'\t\tcell_volume\t vol_{label}_XXXX {atoms["_cell_volume"]}\n') + #FIXME fix the if-statement below, so that cell-volume is the correct formula, based on lattice params and angles. + if '_cell_volume' not in atoms.keys(): + atoms['_cell_volume'] = 0 + else: + fout.write(f'\t\tcell_volume\t vol_{label}_XXXX {atoms["_cell_volume"]}\n') fout.write(f'\t\tcell_mass\t mass_{label}_XXXX 1\n') fout.write(f'\t\tweight_percent\t wp_{label}_XXXX 100\n\n') fout.write('\n') @@ -456,7 +460,7 @@ def read_cif(path): line = cif.readline() - + print(data.keys()) return data From 7ea27abf3a41abc4ec86966c80fe4bae767164fa Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Fri, 15 Jul 2022 11:25:27 +0200 Subject: [PATCH 10/14] Add function to get unique entries in list --- nafuma/auxillary.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/nafuma/auxillary.py b/nafuma/auxillary.py index 96b9ce2..4734802 100644 --- a/nafuma/auxillary.py +++ b/nafuma/auxillary.py @@ -134,4 +134,15 @@ def backup_file(filename, backup_dir): dst = os.path.join(backup_dir, dst_basename) - shutil.copy(filename, dst) \ No newline at end of file + shutil.copy(filename, dst) + + +def get_unique(full_list): + + unique_list = [] + + for entry in full_list: + if not entry in unique_list: + unique_list.append(entry) + + return unique_list \ No newline at end of file From c987fc689f404340d1ab77bc68dc1a096e46c6b7 Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Fri, 15 Jul 2022 11:26:57 +0200 Subject: [PATCH 11/14] Fix get_headers to read new formatting correctly --- nafuma/xrd/refinement.py | 92 +++++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 40 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index b94e1fb..f519b43 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -1,4 +1,3 @@ -from email.policy import default import os import shutil import subprocess @@ -9,6 +8,8 @@ import datetime import warnings import json +import pandas as pd + import nafuma.auxillary as aux @@ -160,25 +161,25 @@ def write_params(fout, data, options, index=0): # WRITE LATTICE PARAMETERS # If start_values is defined: fout.write(f'#ifdef start_values_{label}\n') - lpa = f'local lpa_{label} {a} ;: {a}' - lpb = f'local lpb_{label} {b} ;: {b}' - lpc = f'local lpc_{label} {c} ;: {c}' + lpa = f'local !lpa_{label} {a} ;: {a}' + lpb = f'local !lpb_{label} {b} ;: {b}' + lpc = f'local !lpc_{label} {c} ;: {c}' fout.write('{: <55} {: <55} {: <55}\n'.format(lpa, lpb, lpc)) - lpal = f'local lpal_{label} {alpha} ;: {alpha}' - lpbe = f'local lpbe_{label} {beta} ;: {beta}' - lpga = f'local lpga_{label} {gamma} ;: {gamma}' + lpal = f'local !lpal_{label} {alpha} ;: {alpha}' + lpbe = f'local !lpbe_{label} {beta} ;: {beta}' + lpga = f'local !lpga_{label} {gamma} ;: {gamma}' fout.write('{: <55} {: <55} {: <55}\n'.format(lpal, lpbe, lpga)) # Otherwise fout.write('\n') fout.write('#else\n') - lpa = f'local lpa_{label} {a}' - lpb = f'local lpb_{label} {b}' - lpc = f'local lpc_{label} {c}' + lpa = f'local !lpa_{label} {a}' + lpb = f'local !lpb_{label} {b}' + lpc = f'local !lpc_{label} {c}' fout.write('{: <55} {: <55} {: <55}\n'.format(lpa, lpb, lpc)) - lpal = f'local lpal_{label} {alpha}' - lpbe = f'local lpbe_{label} {beta}' - lpga = f'local lpga_{label} {gamma}' + lpal = f'local !lpal_{label} {alpha}' + lpbe = f'local !lpbe_{label} {beta}' + lpga = f'local !lpga_{label} {gamma}' fout.write('{: <55} {: <55} {: <55}\n'.format(lpal, lpbe, lpga)) fout.write('#endif\n\n') @@ -188,8 +189,8 @@ def write_params(fout, data, options, index=0): '_atom_site_fract_x': 'x', '_atom_site_fract_y': 'y', '_atom_site_fract_z': 'z', - '_atom_site_occupancy': '!occ', - '_atom_site_B_iso_or_equiv': '!beq' + '_atom_site_occupancy': 'occ', + '_atom_site_B_iso_or_equiv': 'beq' } @@ -202,7 +203,7 @@ def write_params(fout, data, options, index=0): value = atoms["atoms"][site][attr].split("(")[0] value = value if value != '.' else 0. - params.append('{: <20} {: <20}'.format(f'local {attrs[attr]}_{site}_{label}', f' = {value} ;: {value}')) + params.append('{: <20} {: <20}'.format(f'local !{attrs[attr]}_{site}_{label}', f' = {value} ;: {value}')) #fout.write(f'local {attrs[attr]}_{site}_{label}\t\t =\t {value} \t ;= \t\t\t\t\t') fout.write('{: <55} {: <55} {: <55} {: <55} {: <55}\n'.format(*params)) @@ -210,10 +211,10 @@ def write_params(fout, data, options, index=0): fout.write('\n') fout.write('{: <55} {: <55} {: <55} {: <55}\n'.format( - f'local csgc_{label}_XXXX = 200 ;: 200', - f'local cslc_{label}_XXXX = 200 ;: 200', - f'local sgc_{label}_XXXX = 0 ;: 0', - f'local slc_{label}_XXXX = 0 ;: 0', + f'local !csgc_{label}_XXXX = 200 ;: 200', + f'local !cslc_{label}_XXXX = 200 ;: 200', + f'local !sgc_{label}_XXXX = 0 ;: 0', + f'local !slc_{label}_XXXX = 0 ;: 0', )) @@ -382,14 +383,13 @@ def write_output(fout, data, options, index=0): ) fout.write('\n') - + + fout.write('\t\tOut_String("\\n")\n') + fout.write('#endif') fout.write('\n\n') - fout.write('#endif') - - def read_cif(path): @@ -588,32 +588,40 @@ def write_headers(fout, options): fout.write(f'{option} {value} \n') -def get_headers(inp): +def get_headers(inp, path): with open(inp, 'r') as inp: - headers = [] + headers = ['index'] line = inp.readline() - while not all(keyword in line for keyword in ['out', 'append']): + while not path in line: line = inp.readline() # Jump down to lines line = inp.readline() line = inp.readline() - while not 'Out_String' in line: - + while not '#endif' in line: if line.split(): - header = line.split()[1] - if all(keyword in header for keyword in ['Get', '(', ')']): - header = header[4:-1] + + regx = r"\([\S]*" + headers_line = re.findall(regx, line) - headers.append(header) + for i, header in enumerate(headers_line): + header = header[1:-1] + + if all(keyword in header for keyword in ['Get', '(', ')']): + header = header[4:-1] + + headers_line[i] = header + + for header in headers_line: + if header != '"\\n"': + headers.append(header) line = inp.readline() - return headers @@ -726,11 +734,14 @@ def refine(data: dict, options={}): # FIXME Since the big INP files now have the same filename for all iterations, we need to adjust the code to only get unique values from the get_paths function # FIXME get_headers() is also not working now. Needs to be adjusted to the new way of writing the Out-parameters - paths = get_paths(data['inp']) - headers = get_headers(data['inp']) - + paths = get_paths(data['inp']) + paths = aux.get_unique(paths) + + for path in paths: + headers = get_headers(data['inp'], path) + dirname = os.path.dirname(path) if dirname and not os.path.isdir(dirname): @@ -763,9 +774,10 @@ def refine(data: dict, options={}): -def read_results(): - # FIXME Write the function +def read_results(path): - return None + results = pd.read_csv(path, delim_whitespace=True, index_col=0) + + return results From ed5d96bf4e84750d21294b9051177a8359566406 Mon Sep 17 00:00:00 2001 From: halvorhv Date: Fri, 15 Jul 2022 11:35:04 +0200 Subject: [PATCH 12/14] read_cif now can read cifs w/o cell_volume --- nafuma/xrd/refinement.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index b94e1fb..1a5808d 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -3,7 +3,7 @@ import os import shutil import subprocess import re - +import numpy as np import time import datetime import warnings @@ -249,11 +249,7 @@ def write_str(fout, data, options, index=0): fout.write(f'\t\tbe = lpbe_{label} ;\n') fout.write(f'\t\tga = lpga_{label} ;\n') fout.write('\n') - #FIXME fix the if-statement below, so that cell-volume is the correct formula, based on lattice params and angles. - if '_cell_volume' not in atoms.keys(): - atoms['_cell_volume'] = 0 - else: - fout.write(f'\t\tcell_volume\t vol_{label}_XXXX {atoms["_cell_volume"]}\n') + fout.write(f'\t\tcell_volume\t vol_{label}_XXXX {atoms["_cell_volume"]}\n') fout.write(f'\t\tcell_mass\t mass_{label}_XXXX 1\n') fout.write(f'\t\tweight_percent\t wp_{label}_XXXX 100\n\n') fout.write('\n') @@ -393,7 +389,7 @@ def write_output(fout, data, options, index=0): def read_cif(path): - data = {'atoms': {}} # Initialise dictionary + atoms = {'atoms': {}} # Initialise dictionary read = True # Initialise read toggle # Lists attributes to get out of the .CIF-file. This will correspond to what VESTA writes out, not necessarily what you will find in ICSD @@ -428,12 +424,12 @@ def read_cif(path): # FIXME WHat a horrible condition statement - need to fix this! while line and not line.lstrip().startswith('_') and not line.lstrip().startswith('loop_') and not line.lstrip().startswith('#End') and not line=='\n': # Initialise empty dictionary for a given atom if it has not already been created in another loop - if line.split()[0] not in data['atoms'].keys(): - data["atoms"][line.split()[0]] = {} + if line.split()[0] not in atoms['atoms'].keys(): + atoms["atoms"][line.split()[0]] = {} # Add all the attribute / value pairs for the current loop for i, attr in enumerate(loop): - data["atoms"][line.split()[0]][attr[:-1].lstrip()] = line.split()[i] + atoms["atoms"][line.split()[0]][attr[:-1].lstrip()] = line.split()[i] # Read new line line = cif.readline() @@ -453,15 +449,22 @@ def read_cif(path): value = ' '.join([str(i) for i in value]) - data[attr] = value + atoms[attr] = value line = cif.readline() else: line = cif.readline() + if '_cell_volume' not in atoms.keys(): + a = float(atoms['_cell_length_a'].split('(')[0]) + b = float(atoms['_cell_length_b'].split('(')[0]) + c = float(atoms['_cell_length_c'].split('(')[0]) + alpha = float(atoms['_cell_angle_alpha'].split('(')[0]) + beta = float(atoms['_cell_angle_beta'].split('(')[0]) + gamma = float(atoms['_cell_angle_gamma'].split('(')[0]) - print(data.keys()) - return data + atoms['_cell_volume'] = a * b * c * np.sqrt(1-np.cos(alpha)**2 - np.cos(beta)**2 - np.cos(gamma)**2 + 2 * np.cos(alpha) * np.cos(beta) * np. cos(gamma)) + return atoms From fb040aa0e54f1a2d09d2626cc63d5fb154e099f5 Mon Sep 17 00:00:00 2001 From: rasmusvt Date: Fri, 15 Jul 2022 11:51:43 +0200 Subject: [PATCH 13/14] Strip headers from INPs when making big file --- nafuma/xrd/refinement.py | 60 ++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/nafuma/xrd/refinement.py b/nafuma/xrd/refinement.py index f519b43..55150a1 100644 --- a/nafuma/xrd/refinement.py +++ b/nafuma/xrd/refinement.py @@ -539,7 +539,11 @@ def make_big_inp(data: dict, options={}): runlist = os.path.join(os.path.dirname(options['output']), 'runlist.txt') options['include'].append(runlist) - with open(options['template'], 'r') as template, open(options['output'], 'w', newline='\n') as output, open(runlist, 'w', newline='\n') as runlist: + with open(options['template'], 'r') as template: + strip_headers(template) + + + with open('tmp_template.inp', 'r') as template, open(options['output'], 'w', newline='\n') as output, open(runlist, 'w', newline='\n') as runlist: write_headers(output, options) @@ -553,6 +557,34 @@ def make_big_inp(data: dict, options={}): runlist.write('#define \tUSE_'+f'{i}'.zfill(4) + '\n') + os.remove('tmp_template.inp') + + +def strip_headers(fin): + + line = fin.readline() + newlines = [] + + while 'r_wp' not in line: + if line[0] != '\'': + line = fin.readline() + else: + newlines.append(line) + line = fin.readline() + + newlines.append(line) + newlines = newlines + fin.readlines() + + with open('tmp_template.inp', 'w') as fout: + for line in newlines: + fout.write(line) + + + + + + + def write_headers(fout, options): @@ -650,32 +682,6 @@ def make_inp_entry(template: str, xdd: str, num: int, options: dict) -> str: # Replace diffractogram-path s = template.replace(temp_xdd, xdd).replace('XXXX', num_str) - # basename = os.path.basename(xdd).split(".")[0] - - # # Define regular expressions for output lines - # regs = [r'Out_Riet\([\S]*\)', - # r'Out_CIF_STR\([\S]*\)', - # r'Out_CIF_ADPs\([\S]*\)', - # r'Out_CIF_Bonds_Angles\([\S]*\)', - # r'Out_FCF\([\S]*\)', - # r'Create_hklm_d_Th2_Ip_file\([\S]*\)', - # r'out(.*?)append'] - - # # Define substitute strings for output lines - # subs = [f'Out_Riet({options["save_dir"]}/{basename}_riet.xy)', - # f'Out_CIF_STR({options["save_dir"]}/{basename}.cif)', - # f'Out_CIF_ADPs({options["save_dir"]}/{basename}.cif)', - # f'Out_CIF_Bonds_Angles({options["save_dir"]}/{basename}.cif)', - # f'Out_FCF({options["save_dir"]}/{basename}.fcf)', - # f'Create_hklm_d_Th2_Ip_file({options["save_dir"]}/{basename}_hkl.dat)', - # f'out \t {options["save_dir"]}/{basename}_refined_params.dat \t append'] - - # # Substitute strings in output lines - # for reg, sub in zip(regs, subs): - # s = re.sub(reg, sub, s) - - - return s From 7e952a4556921cfcf2cd1727dc7fd5d3658fd332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Halvor=20H=C3=B8en=20Hval?= Date: Wed, 20 Jul 2022 18:00:33 +0200 Subject: [PATCH 14/14] tester fra laptop --- test.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 test.txt diff --git a/test.txt b/test.txt new file mode 100644 index 0000000..e69de29