Compare commits

..

396 commits

Author SHA1 Message Date
8a044e73e1 Merge remote-tracking branch 'github/master'
Some checks are pending
Automated testing / build-linux (push) Waiting to run
2024-07-19 18:06:47 +02:00
rasmusthog
7dfc0444f0 Allow different multiplication factor for each scan 2022-11-03 20:35:51 +01:00
rasmusthog
1fc003c755 Add highlight and better colour mixing 2022-10-31 20:56:05 +01:00
rasmusthog
d662d3567d Add function to strip regions from .xy-files 2022-10-28 16:03:06 +02:00
rasmusthog
996b53195c Repair xticks and xlim for heatmaps 2022-10-27 19:23:11 +02:00
rasmusthog
9c87ed0346 Refactor slightly 2022-10-25 20:04:04 +02:00
rasmusthog
eb5116fb1d Add force reset of ylim when plotting backgrounds 2022-10-25 15:15:35 +02:00
rasmusthog
2b57b2ce86 Make small adjustment to Fe-interval 2022-10-25 15:15:05 +02:00
rasmusthog
8d1bee56be Add functions to assign tickmarks 2022-10-24 20:58:39 +02:00
rasmusthog
8fafdefc71 Change which chamber is used for transmission 2022-10-24 20:58:22 +02:00
Rasmus Vester Thøgersen
fd73dc577f
Fix test issues 2022-10-23 18:59:00 +00:00
Rasmus Vester Thøgersen
e6243d4d38
Remove old version of swap_values 2022-10-23 18:57:42 +00:00
rasmusvt
adfed84526 Add possibilty to append ion count 2022-10-23 20:24:59 +02:00
rasmusvt
686ef6ce28 Fix index return type of exact match and add isnan 2022-10-23 20:24:31 +02:00
rasmusvt
cefd7a2edd Add correlation between ions extracted and time 2022-10-23 20:23:59 +02:00
rasmusthog
f7ba19b103 Fix small bug in colour generation 2022-10-23 18:04:56 +02:00
rasmusthog
27f9d2d2e6 Fix bug that calculated wrong molecular weight 2022-10-23 13:48:45 +02:00
rasmusthog
97ac3505de Generalise colours of fill_between 2022-10-23 13:48:26 +02:00
rasmusthog
d617e50cda Allow passing df that has no idxmax or idxmin 2022-10-23 13:48:02 +02:00
rasmusthog
90168e4f1f Start correlation of ions to timestamps 2022-10-23 13:47:26 +02:00
halvorhv
0c8730d5c2 Merge branch 'master' of github.com:rasmusthog/nafuma 2022-10-19 17:06:20 +02:00
halvorhv
ecfe7106f1 Add func for making general background in q 2022-10-19 17:06:15 +02:00
rasmusthog
f23d140453 Allow passing fig and ax to plot_cv() 2022-10-18 19:23:24 +02:00
rasmusthog
940e794a1c Improve handling of colour and other minor fixes 2022-10-17 19:36:16 +02:00
rasmusthog
c02b575b54 Allow looping through cycles if max is 0 2022-10-17 19:35:48 +02:00
rasmusthog
2e9b5e5bc0 Add colour mixer 2022-10-17 19:34:39 +02:00
rasmusthog
97a82353a6 Remove print-statement 2022-10-17 19:34:23 +02:00
rasmusthog
ed2bb8158e Allow specification of colours for several cycles 2022-10-16 19:19:37 +02:00
rasmusthog
f8f5e2ebb2 Reset index of cycles to start at 0 2022-10-15 17:16:54 +02:00
rasmusthog
2265b2a69e Update to use new version of update_options 2022-10-15 17:16:34 +02:00
rasmusthog
317805f4c3 Add functions to read and plot EDS spectrum 2022-10-15 17:16:04 +02:00
rasmusthog
8775a20ba5 Add function to find index closest to a val in df 2022-10-15 17:14:31 +02:00
halvorhv
82b0981ada Initial func to make manual background data in inp 2022-10-14 17:09:42 +02:00
halvorhv
366768ecbf removing random print-functions 2022-10-14 13:47:13 +02:00
halvorhv
7501ac528e Inclusion of manual background in write_xdd 2022-10-14 13:24:39 +02:00
halvorhv
fb524e64d1 Merge branch 'master' of github.com:rasmusthog/nafuma 2022-10-14 10:21:32 +02:00
halvorhv
b483d8e5fb Generalized read_xy, so that WL can be float 2022-10-14 10:21:26 +02:00
rasmusthog
d3af382de3 Merge branch 'master' of github.com:rasmusthog/nafuma 2022-10-13 20:53:47 +02:00
rasmusthog
4cc6f4557f Improve refinement plotting function 2022-10-13 20:53:42 +02:00
rasmusthog
36c6482928 Change way PPMS-data is read 2022-10-13 20:52:58 +02:00
rasmusthog
c6842f6196 Allow fetching of equi data if one dosn't converge 2022-10-13 20:52:37 +02:00
rasmusthog
536160666e Fix bug that didn't plot text in correct Axes-obj 2022-10-13 20:51:54 +02:00
halvorhv
3d493a69f8 Merge branch 'master' of github.com:rasmusthog/nafuma 2022-10-12 21:13:22 +02:00
halvorhv
aa6fa7875d Added "" so also files with "dash" can be refined 2022-10-12 21:13:17 +02:00
rasmusthog
33012c6035 Allow user to skip read-in of error columns 2022-10-12 21:07:44 +02:00
rasmusthog
c2477bfd99 Update COOP-plotting function 2022-10-12 21:07:22 +02:00
rasmusthog
2deb640b3d Add option to exclude (make region = 0) in diffs 2022-10-11 19:57:05 +02:00
rasmusthog
2a2f092f8b Add make_animation() 2022-10-11 19:56:32 +02:00
rasmusthog
4496c34fd2 Implement Lorentzian, PV and double peak fit 2022-10-10 20:32:37 +02:00
rasmusthog
f30426e95d Add arctan fit function to pre-edge background 2022-10-10 17:42:39 +02:00
rasmusthog
fd662e3cbd Change filename behaviour for integration 2022-10-10 17:42:29 +02:00
rasmusthog
b98fd25a5c Add errors to writeouts for TOPAS 2022-10-09 18:42:16 +02:00
rasmusthog
2e469909d7 Add multiply and drawdown to plotting 2022-10-09 18:42:05 +02:00
rasmusthog
8b0a5bcff7 Add multiplication and drawdown for diffs 2022-10-09 18:41:30 +02:00
rasmusthog
76cf0f2275 Draft of write_data function 2022-10-09 18:40:54 +02:00
rasmusthog
576ce0301b Add pre-edge feature fitting function 2022-10-09 18:40:11 +02:00
rasmusthog
9e46504ac9 Initial add of PPMS-analysis module 2022-10-09 18:39:55 +02:00
rasmusthog
11592301fe Allow passing of own Axes-object 2022-10-09 18:39:37 +02:00
rasmusthog
65de5ecf45 Expand Battsmall functionality 2022-10-09 18:39:28 +02:00
rasmusthog
3998005334 Updates to EDS-scripts 2022-10-09 18:39:09 +02:00
rasmusthog
4ccc7421f7 Small changes to inset 2022-10-09 18:38:46 +02:00
rasmusthog
0e7a566f01 Rewrite read_pdos 2022-10-09 18:38:25 +02:00
rasmusthog
f72bd4e77f Rewrite plot_pdos 2022-10-09 18:38:00 +02:00
rasmusthog
3f1d1e4d1f Add inset axes creation and bg drawing 2022-10-09 18:37:42 +02:00
rasmusthog
4424d3d6c1 Remove need for required_options 2022-10-09 18:37:14 +02:00
rasmusvt
f1ec9df9b4 Add a bunch of stuff to refinement 2022-09-27 10:22:06 +02:00
rasmusvt
83f4f6a155 Get HTXRD-plotter to a working state 2022-09-27 10:21:39 +02:00
rasmusvt
6247ce24bf Initial add of EDS module 2022-09-27 10:20:13 +02:00
rasmusvt
61cc0343e0 Add POSCAR writer and str generator 2022-09-27 10:19:51 +02:00
rasmusvt
8aa8164a80 Small adjustment to legend plotter 2022-09-27 10:19:33 +02:00
rasmusvt
07f304ce97 Add new snippets 2022-09-20 20:25:30 +02:00
rasmusvt
006cddaaa3 Add new params and add define statements 2022-09-20 20:25:18 +02:00
rasmusvt
4fd68eab55 Add plotting of refinement data 2022-09-20 20:24:32 +02:00
rasmusvt
caca4e0c1f Allow for plotting EoS even when one fails 2022-09-20 20:24:16 +02:00
rasmusvt
02367581ce Initial add of phonon scripts 2022-09-20 20:23:22 +02:00
rasmusvt
39f59e472c Merge branch 'master' of github.com:rasmusthog/nafuma 2022-09-18 15:50:59 +02:00
rasmusvt
686f74a929 Add initial script files for DFT processing 2022-09-18 15:50:54 +02:00
halvorhv
716e4acb82 Also hh:mm:ss is usable when selecting spectra 2022-09-01 14:09:03 +02:00
halvorhv
8714e86753 Also hh:mm:ss is importable as time stamp from xas 2022-09-01 14:08:26 +02:00
halvorhv
8c3861c984 Fixed split_scan_data for trans data w/o roi 2022-08-31 20:36:31 +02:00
rasmusvt
9ed675b8cb Short term fix for heatmap generation with beamline data 2022-08-26 13:24:17 +02:00
rasmusvt
7285f47db3 Strip headers from exported EVA-files also 2022-08-26 13:23:32 +02:00
rasmusvt
42cddb8fa4 Add GIF-animation for 2D scans 2022-08-25 15:16:14 +02:00
rasmusvt
ab1752e179 Add batch integration 2022-08-25 15:15:44 +02:00
rasmusvt
f76a742fff Add metadata and expand image array read-in 2022-08-24 16:15:56 +02:00
rasmusvt
f3bf6f88d0 Add combined averaging and dark subtraction 2022-08-24 13:27:44 +02:00
rasmusvt
f92636370a Fix required options 2022-08-24 13:26:15 +02:00
rasmusvt
0e5f7dba55 Add unit_tables to xanes-module 2022-08-22 17:03:17 +02:00
rasmusvt
1461d71b99 Convert timestamps in metadata based on ref time 2022-08-22 17:03:06 +02:00
rasmusvt
9a3efbf505 Determine decimal point when reading batsmall data 2022-08-22 17:02:42 +02:00
Rasmus Vester Thøgersen
eb2666b85d
Merge pull request #14 from rasmusthog/rasmus_electrochemistry
Merge new electrochemistry functionality into main
2022-08-22 08:45:11 +00:00
Rasmus Vester Thøgersen
fb63451fdd
Merge pull request #13 from rasmusthog/rasmus_xanes
Merge new XANES-functionality into main
2022-08-22 08:44:34 +00:00
rasmusvt
9f2199e69a Make colour gradient work as intended for subsets 2022-08-18 09:39:14 +02:00
rasmusvt
8ca73e4687 Estimate edge position from xanes_data instead of xanes_data_original 2022-08-16 12:09:37 +02:00
rasmusvt
f31849b08b Add function to pick out scans based on timestamps 2022-08-16 12:08:52 +02:00
rasmusvt
a77eb23a38 Make sure e0_diff is loaded as float, not str 2022-08-16 11:43:56 +02:00
rasmusvt
24a7b12299 Extend save / load functions to include e0 2022-08-16 11:16:20 +02:00
rasmusvt
cd0eaff25b Make BATSMALL-read more general and import decimal = , 2022-08-15 17:35:01 +02:00
rasmusvt
0a6d682649 Add working plot_xanes function 2022-08-09 17:35:41 +02:00
rasmusvt
e67846d6f2 Initial commit of plot_xanes 2022-08-09 16:41:53 +02:00
rasmusvt
b40b023cbc Let load_data conform to data/options-standard 2022-08-09 16:41:39 +02:00
rasmusvt
731ea70f77 Add functions to save and load data 2022-08-09 16:26:49 +02:00
rasmusvt
9cda7694d0 Add choice to load transmission data 2022-08-08 16:40:45 +02:00
rasmusvt
df5190667e Ad plotting of CV-data 2022-08-08 15:45:09 +02:00
rasmusvt
9a73f57a82 Allow reading CV data from BioLogic 2022-08-08 15:44:55 +02:00
rasmusvt
b69a4b9521 Plot efficiencies without changing options 2022-08-05 11:01:09 +02:00
rasmusvt
790048098a Make plotting match new format of summary df 2022-08-05 10:35:59 +02:00
rasmusvt
b8ce2b64cc Add efficiency calculations and new format of summary df 2022-08-05 10:35:35 +02:00
rasmusvt
745522ed82 Calculate specific energy in Neware data 2022-08-05 09:51:28 +02:00
rasmusvt
642f166d71 Constrain size for GIFs, fix fps and incomp cycles 2022-08-04 18:57:27 +02:00
rasmusvt
b6780e8a90 Add option to make animation of GC plots 2022-08-02 21:55:55 +02:00
rasmusvt
4269ac5d46 Fix formatting error in unit conversion 2022-08-02 21:55:42 +02:00
rasmusvt
43663331f1 Remove print statement 2022-08-02 13:49:09 +02:00
rasmusvt
8c20c029ae Fix bug 2022-08-02 13:48:53 +02:00
rasmusvt
e6a4e2c81f Allow deletion of certain datapoints from the datasets 2022-08-02 13:48:29 +02:00
rasmusvt
116e65e0e1 Make 'which_cycles' work for summaries 2022-08-02 13:24:55 +02:00
rasmusvt
3a6a000b14 Allow older formats and manual incrementing of cycles 2022-08-02 13:24:39 +02:00
rasmusvt
130e120690 Chg and dchg of summary can be plotted independent 2022-08-02 10:04:49 +02:00
rasmusvt
0e053ea1e2 Make ticks face in by default 2022-08-02 10:03:49 +02:00
rasmusvt
20111a7457 Allow plotting of summary through plot_gc() 2022-08-01 17:00:26 +02:00
rasmusvt
224d05e0e9 Return summary as separte chg / dchg dataframes 2022-08-01 17:00:10 +02:00
rasmusvt
0699399d1a Mix intervals and ints in update_cycles 2022-08-01 16:37:37 +02:00
rasmusvt
9ebab7d6ee Add splice cycles for Neware (summary + cycles) 2022-08-01 15:47:46 +02:00
rasmusvt
574f633db0 Add correct unit conversion of Neware summaries 2022-08-01 14:50:10 +02:00
rasmusvt
c67629bef6 Merge branch 'master' of github.com:rasmusthog/nafuma 2022-08-01 11:05:05 +02:00
rasmusvt
9f9d364a2f Allow loading of Neware datasets from parent dirs 2022-08-01 11:04:59 +02:00
Halvor Høen Hval
7e952a4556 tester fra laptop 2022-07-20 18:00:33 +02:00
Rasmus Vester Thøgersen
f0acae491f
Merge pull request #12 from rasmusthog/rasmus_refinements
Merge refinements into master
2022-07-16 07:36:26 +00:00
rasmusvt
772bd25019 Merge branch 'rasmus_refinements' of github.com:rasmusthog/nafuma into rasmus_refinements 2022-07-15 11:51:47 +02:00
rasmusvt
fb040aa0e5 Strip headers from INPs when making big file 2022-07-15 11:51:43 +02:00
halvorhv
dec40c217c Merge branch 'rasmus_refinements' of github.com:rasmusthog/nafuma into rasmus_refinements 2022-07-15 11:35:09 +02:00
halvorhv
ed5d96bf4e read_cif now can read cifs w/o cell_volume 2022-07-15 11:35:04 +02:00
rasmusvt
c987fc689f Fix get_headers to read new formatting correctly 2022-07-15 11:26:57 +02:00
rasmusvt
7ea27abf3a Add function to get unique entries in list 2022-07-15 11:25:27 +02:00
halvorhv
4e579bd07b dirty fix to enable read cif w/o cell_volume 2022-07-14 21:44:36 +02:00
rasmusvt
92075fbb66 Fix filenames in make_big_inp and add fixmes 2022-07-14 19:47:36 +02:00
rasmusvt
ace44cb48f Add initial INP-generation 2022-07-14 19:15:39 +02:00
rasmusvt
51e2928472 Add cif-reader and inital INP-generator 2022-07-13 17:26:29 +02:00
rasmusvt
d32b40409c Add intial version of refinement function 2022-07-12 17:51:08 +02:00
rasmusvt
71f3940c12 Add creation of .INP-file for multiple refinements 2022-07-11 17:38:40 +02:00
rasmusvt
7ff917bdcd Change timestamp to modified instead of creation 2022-07-11 17:38:13 +02:00
rasmusvt
cf9499a988 Initial commit of refinement submodule 2022-07-11 13:40:24 +02:00
rasmusvt
649196570b Add generic backup file function 2022-07-11 13:39:50 +02:00
rasmusvt
132bc7274b Update version number 2022-07-08 17:46:15 +02:00
Rasmus Vester Thøgersen
03b1e4719b
Merge pull request #11 from rasmusthog/rasmus_htxrd
Rasmus htxrd
2022-07-08 17:44:42 +02:00
rasmusvt
26c8813eae Add comment I will be happy for in the future 2022-07-08 17:43:44 +02:00
rasmusvt
c9b620f166 Get HT-XRD plotting to a working state 2022-07-08 17:41:13 +02:00
rasmusvt
c6b5dc4627 Update with new version number 2022-07-07 15:15:07 +02:00
Rasmus Vester Thøgersen
b755c1e55d
Merge pull request #10 from rasmusthog/rasmus_htxrd
Rasmus htxrd
2022-07-07 15:01:27 +02:00
Rasmus Vester Thøgersen
c479575c85
Merge pull request #9 from rasmusthog/halvor_xanes
Halvor xanes
2022-07-07 14:58:08 +02:00
Rasmus Vester Thøgersen
28b807c9f9
Merge branch 'master' into halvor_xanes 2022-07-07 14:57:48 +02:00
rasmusvt
7f61617d9a Add HTXRD-reading with saving in individual files 2022-07-07 14:52:56 +02:00
halvorhv
f2a5a25aaf Merge branch 'halvor_xanes' of github.com:rasmusthog/nafuma into halvor_xanes 2022-07-07 14:52:00 +02:00
halvorhv
8096cf3bd5 no change 2022-07-07 14:51:57 +02:00
halvorhv
f0c547f889 Adding function to rearrange filenames-array 2022-07-07 14:49:27 +02:00
rasmusvt
b1b705f28f Ignore makedirs-command if file in base folder 2022-07-07 13:07:22 +02:00
rasmusvt
0ed85e1398 Write timestamp during split and add reading of metadata 2022-07-07 11:50:15 +02:00
rasmusvt
8939bb8479 Remove annoying print in normalise() 2022-07-07 11:49:58 +02:00
halvorhv
4cec8d2756 added FIXMEs 2022-07-07 10:24:04 +02:00
rasmusvt
6eb45772d1 Fix normalisation issues 2022-07-06 17:33:38 +02:00
rasmusvt
327cef5b51 Log updates and let normalise and flatten save plots 2022-07-05 16:37:42 +02:00
rasmusvt
5e2cef2cde Let save_options create folder if it doesn't exist 2022-07-05 16:37:31 +02:00
rasmusvt
da7099a924 Add manual choice of active roi 2022-07-05 10:55:39 +02:00
rasmusvt
b5cac158a4 Fix active_roi determination 2022-07-04 18:18:31 +02:00
rasmusvt
1a06e7b4fc Poor attempts at fixing bug in roi choice 2022-07-01 16:09:43 +02:00
rasmusvt
7336af061f Only plot one dataset if in interactive mode 2022-07-01 14:29:13 +02:00
rasmusvt
eb8660d71d Fix determination of active roi if only one exists 2022-06-30 17:08:28 +02:00
rasmusvt
1757445f89 Add new functions and fix certain bugs 2022-06-30 17:08:14 +02:00
rasmusvt
8702cdfa00 Make logfile-directory if not already exists 2022-06-30 17:07:31 +02:00
rasmusvt
b84cecaf84 Update documentation 2022-06-29 16:40:36 +02:00
rasmusvt
254becff69 Make sure filenames is a list before reading 2022-06-29 16:13:19 +02:00
Rasmus Vester Thøgersen
c0af1dc84c
Merge pull request #8 from rasmusthog/rasmus_xanes_interactive
Rasmus xanes interactive
2022-06-29 15:27:43 +02:00
rasmusvt
faf41db41f Add tabulated K-edge values 2022-06-29 15:26:51 +02:00
rasmusvt
6bbd6776b8 Tweaks based on workflow testing 2022-06-29 15:26:43 +02:00
rasmusvt
c522b73ca4 Add K-edges from ITC Vol C 2022-06-28 12:10:46 +02:00
rasmusvt
cc3c4dc5b6 Add interactive to smoothing + documentation updates 2022-06-27 20:46:01 +02:00
rasmusvt
1e147854a7 Update documentation for determination of edge position 2022-06-27 16:43:42 +02:00
rasmusvt
8c2723ee55 Plot full scan with computed edge position 2022-06-27 16:16:46 +02:00
rasmusvt
537c7b3c5a Add masks to pre and post edge fitting 2022-06-27 13:34:18 +02:00
rasmusvt
8e0d8f4861 ADd interactive mode for pre and post edge fitting 2022-06-27 12:20:49 +02:00
halvorhv
931b3e42ae adding a fixme for split_scan_data-function 2022-06-24 19:39:11 +02:00
halvorhv
726535c66f fixing the "add_rois"-option 2022-06-24 19:28:55 +02:00
nanowhale
b2b19086a5
Merge pull request #7 from rasmusthog/rasmus_xanes
Rasmus xanes
2022-06-24 17:00:11 +02:00
rasmusvt
2b14a64c4b Attempt to get flattening and normalisation to behave properly 2022-06-23 15:32:29 +02:00
rasmusvt
ec1fba1c82 Refactor normalisation and flattening functions 2022-06-23 11:46:06 +02:00
rasmusvt
4d501adb72 Complete smooth and get determine_edge_position going 2022-06-22 15:56:34 +02:00
rasmusvt
9e39135f00 Update smoothing function 2022-06-21 19:04:04 +02:00
rasmusvt
1cf949e36b Start clean-up of smoothing 2022-06-21 18:02:08 +02:00
rasmusvt
054311ca10 Small adjustments to logging 2022-06-21 18:01:53 +02:00
rasmusvt
cc80a48259 Add logging 2022-06-20 19:16:20 +02:00
rasmusvt
7214746af1 Refactor split_scans 2022-06-20 16:08:36 +02:00
rasmusvt
9c6a7d5991 Refactor post_edge_fit 2022-06-17 16:59:37 +02:00
nanowhale
d497217ae3
Merge pull request #6 from rasmusthog/rasmus_xanes
Rasmus xanes
2022-06-17 16:17:54 +02:00
rasmusvt
672d5549fe Fix lint issue 2022-06-17 16:14:21 +02:00
rasmusvt
ba349a5892 Refactor estimation of edge position and automatise pre edge limit setting 2022-06-17 15:58:39 +02:00
rasmusvt
cb2c7532e6 Make sure data['path'] is in a list 2022-06-17 15:46:33 +02:00
rasmusvt
880722d778 Load correct xmap_roi for more cases 2022-06-17 15:35:24 +02:00
rasmusvt
e7a95d65ed Refactor read_data and move get_filenames 2022-06-16 17:56:08 +02:00
rasmusvt
0b89524ef1 Clear up small bugs encountered during testing 2022-06-16 17:55:42 +02:00
rasmusvt
303704c357 Add filter 2022-06-16 17:54:51 +02:00
rasmusvt
0d757ce365 Move get_filenames to auxillary and generalise 2022-06-16 16:26:41 +02:00
rasmusvt
a49fc8b0d2 Refactor read_data 2022-06-16 16:18:22 +02:00
rasmusvt
2baa765806 Quasi-fixed linting issue causing automatic test to fail 2022-06-16 15:55:21 +02:00
rasmusvt
bac137042e Refactor pre edge subtraction 2022-06-16 15:42:50 +02:00
rasmusvt
e0b71a85b7 Add save fit function to pre edge fit function 2022-06-16 14:58:41 +02:00
halvorhv
7485adef07 Adding sketch for normalization and flattening 2022-06-15 16:00:47 +02:00
rasmusvt
d17e715d82 Separating and refactoring pre_edge_normalisation 2022-06-15 14:50:32 +02:00
rasmusvt
909c616c50 Add function to write out log messages 2022-06-15 14:28:50 +02:00
rasmusvt
d88a302d2a Add Fe and Co to find_element and refactor 2022-06-15 13:44:42 +02:00
halvorhv
7676bd06af Adding FIXME's 2022-06-15 13:21:26 +02:00
halvorhv
8ce1557439 finding e0 2022-06-15 10:00:13 +02:00
rasmusvt
ebc77c1b9e Extract data from .brml heatscans and save as .xy 2022-06-13 13:50:24 +02:00
Rasmus Vester Thøgersen
a7f6abe0b9
Merge pull request #5 from rasmusthog/rasmus_pyfai
Add option to use mask with pyfai-integrations
2022-06-13 09:06:15 +00:00
rasmusvt
a1106ac88d Add option to use mask with pyfai-integrations 2022-05-20 17:41:54 +02:00
Rasmus Vester Thøgersen
9490c338c1
Merge pull request #4 from rasmusthog/rasmus_small_improvements
Rasmus small improvements
2022-05-19 19:33:02 +00:00
rasmusvt
da8907083b Fix arcsin issue and allow plotting w/o reflection data 2022-05-19 21:32:10 +02:00
rasmusvt
55b22d5bf1 Small improvements to integrate function 2022-05-12 19:17:43 +02:00
rasmusvt
23f037c0ef Add function to strip headers from .xy 2022-05-11 18:35:11 +02:00
rasmusvt
8f94fa4dc6 Remove outcommented lines in splice_cycles 2022-05-10 17:30:00 +02:00
rasmusvt
8d8cad966d Allow plotting of multiple beamline scans (quickfix) 2022-05-10 17:29:22 +02:00
halvorhv
c9660109cb adding a smoothing function 2022-04-27 10:37:02 +02:00
rasmusvt
d30c9c3b16 Add change of x-values in interactive mode 2022-04-22 16:45:28 +02:00
rasmusvt
7af1dc4228 Add initial interactive capabilities to GC-plots 2022-04-22 16:31:04 +02:00
Rasmus Vester Thøgersen
2ed2117eac
Merge pull request #3 from rasmusthog/rasmus_ec_standard_functions
Change EC-functions to same format as XRD
2022-04-22 13:51:00 +00:00
rasmusvt
5735d011aa Add correct formatting of x- and y-labels for EC-plots 2022-04-22 15:49:02 +02:00
rasmusvt
514a20604b Standardise data flow 2022-04-22 15:19:36 +02:00
rasmusvt
95e411ac21 Fix bugs giving errors with plot_gc() 2022-04-22 13:03:31 +02:00
Rasmus Vester Thøgersen
b586f46979
Merge pull request #2 from rasmusthog/rasmus_docs
Add sphinx-documentation for xrd-module
2022-04-22 09:11:59 +00:00
rasmusvt
cd5cbe5dd4 Add first draft of docs built with sphinx 2022-04-08 18:16:57 +02:00
rasmusvt
f52f00a0b7 Clean up files 2022-04-08 15:32:55 +02:00
rasmusvt
80cc8f6779 Clean up files 2022-04-08 15:31:40 +02:00
rasmusvt
4b74ea4592 Fixing another lint problem 2022-04-08 14:47:35 +02:00
rasmusvt
67ba03dc0c Fix lint problem in xanes.io.py 2022-04-08 14:30:05 +02:00
Rasmus Vester Thøgersen
092ecfa380
Merge pull request #1 from rasmusthog/halvor_xanes
Halvor xanes
2022-04-08 13:36:30 +02:00
rasmusvt
b242602eba Merge halvor_xanes to master 2022-04-08 13:35:13 +02:00
halvorhv
7962b3fc5c Fixing functions 2022-04-08 13:28:47 +02:00
rasmusvt
de2616067d Rename automated test workflow 2022-04-07 17:19:26 +02:00
rasmusvt
f53527fe0b Clean up test files 2022-04-07 17:17:28 +02:00
rasmusvt
08276301f2 Update imports to match new package name 2022-04-07 17:11:14 +02:00
rasmusvt
27c911cf54 Rename package to nafuma from beamtime 2022-04-07 17:05:52 +02:00
rasmusvt
049f30d96b Fix last remaining liniting issue 2022-04-07 15:47:26 +02:00
rasmusvt
3d99af9a7a Fix linting issues 2022-04-07 15:39:07 +02:00
rasmusvt
de8c0ab8d5 Add install of package to workflow 2022-04-07 15:28:49 +02:00
rasmusvt
601a5b0619 Add conda-forge to channels 2022-04-07 15:19:57 +02:00
Rasmus Vester Thøgersen
2c697be9da
New attempt at setting up workflow 2022-04-07 15:18:17 +02:00
rasmusvt
19918b3207 Remove old workflow 2022-04-07 15:17:39 +02:00
rasmusvt
ceb9d1ab46 Upload new environment 2022-04-07 15:15:12 +02:00
rasmusvt
89ea45a9bf Make manual environment file 2022-04-07 14:50:09 +02:00
Rasmus Vester Thøgersen
bae40fcabd
Set up third attempt at autotesting 2022-04-07 14:41:29 +02:00
rasmusvt
521ee4731d Merge branch 'master' of github.com:rasmusthog/nafuma 2022-04-07 14:40:48 +02:00
rasmusvt
c8f1f64af8 Update environment.yml without build 2022-04-07 14:40:38 +02:00
Rasmus Vester Thøgersen
e8bc4d4bc4
Delete second attempt at setting up automated testing 2022-04-07 14:39:03 +02:00
rasmusvt
c10317c1d3 Update requirements with --no-build tag 2022-04-07 14:35:41 +02:00
Rasmus Vester Thøgersen
c9885976f6
Change python version in automated testing 2022-04-07 14:24:57 +02:00
Rasmus Vester Thøgersen
739b197e9a
Set up automated testing with conda 2022-04-07 14:23:03 +02:00
Rasmus Vester Thøgersen
b5f4f98070
Delete first attempt at automated testing 2022-04-07 14:22:28 +02:00
rasmusvt
024f1aa822 Merge branch 'master' of github.com:rasmusthog/nafuma 2022-04-07 14:17:39 +02:00
rasmusvt
5c9c93fbb6 Exporting new requirements and environment 2022-04-07 14:14:38 +02:00
Rasmus Vester Thøgersen
239ea9f61e
Create automated testing 2022-04-07 14:10:25 +02:00
rasmusvt
bf68988665 Add more tests for plotting.py and made more general 2022-04-07 14:05:40 +02:00
halvorhv
baa253ab3e tester fra vscode 2022-04-07 12:34:49 +02:00
halvorhv
ab6bf23100 testing from terminal 2022-04-07 12:30:32 +02:00
rasmusvt
aafcc5a1ed Test of push in VS Code 2022-04-07 12:25:31 +02:00
rasmusvt
e8ae6ba122 Testing push on new repo again 2022-04-07 12:21:43 +02:00
rasmusvt
a84bd065b2 Testing push on new repo 2022-04-07 12:20:08 +02:00
rasmusvt
1db489c21d First commit on new repo 2022-04-07 12:02:13 +02:00
halvorhv
135d577b4b Adjusting post edge processing 2022-04-07 11:34:44 +02:00
halvorhv
872ae759b2 Optimizing code and splitting into smaller functions 2022-04-06 21:10:40 +02:00
Rasmus Vester Thøgersen
4ea0fcce4b Merge pull request #4 from rasmusvt/rasmus_pytest
Merge setup of pytest into master
2022-04-06 17:30:34 +02:00
rasmusvt
e3b0e2bc14 Move some files to root folder 2022-04-06 17:29:45 +02:00
rasmusvt
d702875ab6 Ignore DeprecationWarning 2022-04-06 17:28:04 +02:00
rasmusvt
4587322a9b Add tests for plotting.py 2022-04-06 17:25:46 +02:00
rasmusvt
657276eb91 Add tests for auxillary.py 2022-04-06 15:57:30 +02:00
rasmusvt
e07acdb4bb Remove packages not used 2022-04-06 15:06:03 +02:00
rasmusvt
1146c04a38 Add first tests 2022-04-06 15:02:38 +02:00
rasmusvt
bdfc319013 Initial commit of test 2022-04-06 14:43:07 +02:00
Rasmus Vester Thøgersen
3b1d068e14 Merge pull request #3 from rasmusvt/rasmus_heatmap
Rasmus heatmap
2022-04-06 14:27:32 +02:00
rasmusvt
6372603324 Translate relfections to heatmap x-coords 2022-04-06 13:42:34 +02:00
rasmusvt
dd7f2d9dea Fix bug making last commit not work as intended 2022-04-06 12:44:33 +02:00
rasmusvt
876e0f8d3d Allow rescaling ylim with interactive diff plot 2022-04-05 16:12:19 +02:00
rasmusvt
c0449f2e18 Correct switch of ylim between diff and heatmap 2022-04-05 13:52:27 +02:00
rasmusvt
5541f44a58 Add automatic change of xlim range with heatmaps 2022-04-04 16:47:01 +02:00
rasmusvt
59629fcb61 Add plotting of heatmaps with true xlim 2022-04-04 14:50:20 +02:00
rasmusvt
b0629de9a3 Add functions to round up and down to nearest dec 2022-04-04 14:48:59 +02:00
rasmusvt
567282b80b Fix bug where extract_folder had no default value 2022-04-04 14:48:29 +02:00
rasmusvt
6e851b494b Add heatmap, lacks mapping between x-value and 2th 2022-03-31 17:29:10 +02:00
halvorhv
b0130d49b8 Working on the calibration of the XANES-data, subtracting background and defining post-edge 2022-03-31 17:05:32 +02:00
Halvor Høen Hval
bdce18d195 Merge pull request #2 from rasmusvt/rasmus_interactive_offset
Rasmus interactive offset
2022-03-31 14:07:59 +02:00
rasmusvt
6f9fefae08 Add interactive offset_x 2022-03-31 14:02:04 +02:00
rasmusvt
f504c7dd69 Add interactive offset_y 2022-03-31 13:52:59 +02:00
Rasmus Vester Thøgersen
c637bdce6a Merge pull request #1 from rasmusvt/rasmus_multidiff
Integrating multiple diffractograms into master branch
2022-03-31 11:08:36 +02:00
halvorhv
7c95135c33 Initial commit to XANES-module 2022-03-31 11:04:20 +02:00
rasmusvt
3e4c0a9fc2 Add slider for offset_y (not working yet) 2022-03-30 17:50:41 +02:00
rasmusvt
223be18c3e Change default behaviour with 10+ plots 2022-03-30 17:40:06 +02:00
rasmusvt
0fbfd20a74 Enable reading of CoupledTwoTheta-scans 2022-03-30 16:11:34 +02:00
rasmusvt
606bfc180d Add first stab at ylim widget, should be improved 2022-03-23 16:03:34 +01:00
rasmusvt
2424d89156 Add offset of x- and y-values for stacked diffractograms 2022-03-23 15:31:47 +01:00
rasmusvt
0fb8883d19 Add normalisation of diffractograms (default: True= 2022-03-23 14:26:43 +01:00
rasmusvt
06753ab6b2 Move minmax-determiniation to own function 2022-03-23 14:20:19 +01:00
rasmusvt
d31adb9585 Add feature list 2022-03-22 20:26:38 +01:00
rasmusvt
86a2ae2379 Fix bug to iterate correctly through filenames 2022-03-22 20:25:53 +01:00
rasmusvt
b2ee68859a removing test-file 2022-03-22 20:08:11 +01:00
rasmusvt
0631399d77 removing test-file 2022-03-22 20:07:43 +01:00
rasmusvt
05c06351c0 test2.txt 2022-03-22 20:06:26 +01:00
rasmusvt
f26153757a testing this branching stuff 2022-03-22 20:03:23 +01:00
rasmusvt
da2b2f855b Change plotting method to allow for multiple diffractograms 2022-03-22 19:57:21 +01:00
rasmusvt
9bb52fe819 Add translation of wavelengths and xlim updates 2022-03-18 16:55:43 +01:00
rasmusvt
a96210cf07 Add change modes in interactive mode 2022-03-16 16:16:05 +01:00
rasmusvt
6f2d96005d Fix read_xy() 2022-03-16 15:36:40 +01:00
rasmusvt
17404a36e4 Fix integrate_1d() 2022-03-16 15:23:55 +01:00
rasmusvt
decb69a599 Fix find_wavelength 2022-03-16 15:21:05 +01:00
rasmusvt
0e4f14a6e3 Add translation functions 2022-03-16 14:16:41 +01:00
rasmusvt
f2902aed3a Speed up replotting and add xlim slider 2022-03-15 21:17:15 +01:00
rasmusvt
ebb98debff Open beamline data straight from raw file 2022-03-15 17:13:17 +01:00
rasmusvt
1126809c5a Plot multiple simulated diffractograms 2022-03-15 15:51:39 +01:00
rasmusvt
545c8212c5 Add indices function to xrd plot 2022-03-13 13:58:28 +01:00
rasmusvt
e6c48c1e54 Speed up reflection table plotting 2022-03-12 22:50:09 +01:00
rasmusvt
a5c845fa54 Add reflection table and interactive mode to xrd 2022-03-12 22:26:06 +01:00
rasmusvt
67ea048380 Generalise prepare_plot and adjust_plot 2022-03-11 17:52:01 +01:00
rasmusvt
d993663c7c Make change to prettify_plot() 2022-03-11 11:58:58 +01:00
rasmusvt
e378e63971 Add updated requirements 2022-03-11 10:00:44 +01:00
rasmusvt
cffc0a8f6a Add aux and plotting functions 2022-03-11 10:00:29 +01:00
rasmusvt
b4a8eb5eec Add plotting of multiple diffractograms 2021-11-10 13:49:10 +01:00
rasmusvt
2b1b9b0d9b Add plot functionality for single diffractograms 2021-11-08 17:24:58 +01:00
rasmusvt
d6734184cc Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-11-05 20:13:29 +01:00
rasmusvt
b8cd8e81af Add splice_cycles to electrochemistry module 2021-11-05 20:13:22 +01:00
rasmusvt
92fb8988fb Add differentiation between stillscan and 2th scan 2021-10-31 09:58:18 +01:00
rasmusvt
d95b670af7 Corrected some bugs 2021-10-28 16:47:07 +02:00
rasmusvt
acdc9399eb Add read brml functionality 2021-10-27 15:33:56 +02:00
rasmusvt
5c977cc387 Add placeholder functions to xrd/io.py 2021-10-25 08:33:58 +02:00
rasmusvt
65a5497970 test 2021-10-24 14:08:52 +02:00
rasmusvt
efca9edaab test 2021-10-24 13:49:56 +02:00
rasmusvt
ca14345088 Test 2021-10-24 13:10:23 +02:00
rasmusvt
fe477ca569 Merge branch 'master' of github.uio.no:rasmusvt/beamtime 2021-10-23 18:38:26 +02:00
rasmusvt
fa9f641c3f Add XRD functionality 2021-10-23 18:37:21 +02:00
Rasmus Vester Thøgersen
75acc9cfec Update README.md
Add installation instructions and instructions on basic use of electrochemistry module
2021-10-21 18:57:03 +02:00
rasmusvt
0e3e0e8a4f tstsdtdftg 2021-10-21 15:18:08 +02:00
rasmusvt
d19d3f78ac dfgsdfgsdfgasdfgfdeagrfds 2021-10-21 15:12:27 +02:00
rasmusvt
2f8f87c8f0 Update requirements file with --from-history tag 2021-10-21 15:03:41 +02:00
rasmusvt
11a344c985 Add xrd-functions 2021-10-21 14:41:10 +02:00
rasmusvt
df45d1025f Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-10-21 14:38:02 +02:00
rasmusvt
d2b1c213c4 Update requirements with --no-builds prefix 2021-10-21 14:37:50 +02:00
rasmusvt
0048546767 Merge branch 'master' of github.uio.no:rasmusvt/beamtime 2021-10-21 14:21:27 +02:00
rasmusvt
b66b7d8ea0 Add requirements 2021-10-14 15:10:55 +02:00
rasmusvt
e5bc509a6a Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-10-14 15:10:41 +02:00
rasmusvt
be7d153a9d Update options 2021-10-14 15:10:37 +02:00
halvorhv
9a2aa7e3ab Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-10-14 14:18:53 +02:00
halvorhv
3f3486049b working on split_xanes_scan 2021-10-14 14:18:39 +02:00
rasmusvt
43e6ef27c8 Add plot functionality to electrochemistry 2021-10-13 18:06:56 +02:00
rasmusvt
4f255fd9d5 Add BioLogic-functions to io.py 2021-10-12 15:53:48 +02:00
rasmusvt
26bd7d8a15 Update io.py 2021-10-08 14:02:13 +02:00
rasmusvt
4a987808fc Update unit conversion functions 2021-10-07 16:25:08 +02:00
halvorhv
5241776df7 dette er koedd 2021-10-06 15:28:29 +02:00
rasmusvt
ed5504449f Change unit conversion function and add time string conversions 2021-10-05 14:31:32 +02:00
rasmusvt
2e910a2afb Move plot functions to plot.py 2021-10-05 13:07:22 +02:00
rasmusvt
1bf609e318 test 2021-09-15 13:42:14 +02:00
rasmusvt
3179ac46b0 test 2021-09-15 13:20:03 +02:00
rasmusvt
bd722d572b test 2021-09-15 12:53:28 +02:00
rasmusvt
553681fa41 test 2021-09-15 12:32:28 +02:00
Rasmus Vester Thøgersen
a189c20604 test 2021-09-15 12:19:41 +02:00
Rasmus Vester Thøgersen
c5caddf8c9 Test again 2021-09-15 09:53:56 +02:00
Rasmus Vester Thøgersen
69fcb5ff87 Remove test.txt 2021-09-15 09:52:45 +02:00
Rasmus Vester Thøgersen
1ab2efad0d Test push from laptop 2021-09-15 09:50:16 +02:00
rasmusvt
994a7d8c54 Add plot-function to ec-module 2021-09-14 16:22:24 +02:00
rasmusvt
795c111807 Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-09-10 16:53:33 +02:00
rasmusvt
bdf5264bcf Remove uncessary test prints from io.py 2021-09-10 16:53:28 +02:00
rasmusvt
d4b2832697 Add working unit conversion function for electrochemistry module 2021-09-10 16:53:07 +02:00
rasmusvt
e7f3bca376 Clean up __init__.py's 2021-09-10 16:52:27 +02:00
halvorhv
7b02924902 fikifiksi 2021-09-10 16:29:31 +02:00
halvorhv
bd3ca65827 rasmus' splitting fuction 2021-09-10 15:38:27 +02:00
halvorhv
78df31fe4c second 2021-09-10 15:07:43 +02:00
rasmusvt
d6e8589f13 Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-09-10 15:06:03 +02:00
rasmusvt
ea3eb8c2ab Add unit conversion matrices 2021-09-10 15:05:58 +02:00
halvorhv
6a78d2f270 hm 2021-09-10 15:00:49 +02:00
halvorhv
9bc0eb44b5 testttttt 2021-09-10 14:38:35 +02:00
rasmusvt
86c49af908 Add plot.py to electrochemistry 2021-09-10 14:30:04 +02:00
rasmusvt
7b81b9f004 Clean up 2021-09-10 14:29:35 +02:00
rasmusvt
27f0245183 Remove test-folder 2021-09-10 13:39:05 +02:00
halvorhv
039718207e Test 5 2021-09-10 13:24:57 +02:00
rasmusvt
59c4249b07 Test 3 2021-09-10 13:19:55 +02:00
rasmusvt
d200b25b7a Test 2 2021-09-10 13:17:03 +02:00
Rasmus Vester Thøgersen
322d73a241 Test 2021-09-10 13:16:00 +02:00
Rasmus Vester Thøgersen
aac9491d16 Add io.py 2021-09-10 13:05:38 +02:00
Rasmus Vester Thøgersen
dc2a7ccd59 Add electrochemistry module 2021-09-10 10:51:00 +02:00
Rasmus Vester Thøgersen
7825871631 Test imports of pandas and numpy 2021-09-09 16:07:07 +02:00
Rasmus Vester Thøgersen
b9d810aafd Delete test.py 2021-09-09 16:02:59 +02:00
nanowhale
add466456a calibration script 2021-09-09 15:44:19 +02:00
Rasmus Vester Thøgersen
776d52e1ff Move io.py into right folder 2021-09-09 15:39:00 +02:00
Rasmus Vester Thøgersen
98a390e88e Merge branch 'master' of https://github.uio.no/rasmusvt/beamtime 2021-09-09 15:37:23 +02:00
Rasmus Vester Thøgersen
2c306003bb Add new subfolder to match package structure 2021-09-09 15:36:22 +02:00
nanowhale
977ec020f3 add io function 2021-09-09 15:32:56 +02:00
Rasmus Vester Thøgersen
04c8b05df8 Add subpackage xrd 2021-09-09 14:52:07 +02:00
Rasmus Vester Thøgersen
c1a4db460a Add subpackage xanes 2021-09-09 14:51:50 +02:00
Rasmus Vester Thøgersen
eaabacb34f Add subpackage pdf 2021-09-09 14:51:28 +02:00
Rasmus Vester Thøgersen
b0b5acdb14 Create __init__.py 2021-09-09 14:39:36 +02:00
Rasmus Vester Thøgersen
a897596ca3 Initial commit 2021-09-09 14:37:40 +02:00
54 changed files with 13868 additions and 2 deletions

35
.github/workflows/automated-testing.yml vendored Normal file
View file

@ -0,0 +1,35 @@
name: Automated testing
on: [push]
jobs:
build-linux:
runs-on: ubuntu-latest
strategy:
max-parallel: 5
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
python-version: 3.10.4
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies
run: |
conda env update --file environment.yml --name base
pip install .
- name: Lint with flake8
run: |
conda install flake8
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
conda install pytest
pytest

129
.gitignore vendored Normal file
View file

@ -0,0 +1,129 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/

View file

@ -1,2 +0,0 @@
# nafuma

20
docs/Makefile Normal file
View file

@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

9
docs/about.md Normal file
View file

@ -0,0 +1,9 @@
# About
This package contains data processing, analysis and viewing tools written in Python for several different activities related to inorganic materials chemistry conducted in the NAFUMA-group at the University of Oslo. It is written with the intention of creating a reproducible workflow for documentation purposes, with a focus on interactivity in the data exploration process.
As of now (08-04-22), the intention is to include tools for XRD-, XANES- and electrochemistry-analysis, however other modules might be added as well.

57
docs/conf.py Normal file
View file

@ -0,0 +1,57 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'NAFUMA'
copyright = '2022, Rasmus Vester Thøgersen & Halvor Høen Hval'
author = 'Rasmus Vester Thøgersen & Halvor Høen Hval'
# The full version, including alpha/beta/rc tags
release = '0.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['myst_parser']
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']}

22
docs/index.rst Normal file
View file

@ -0,0 +1,22 @@
.. NAFUMA documentation master file, created by
sphinx-quickstart on Fri Apr 8 15:32:14 2022.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to NAFUMA's documentation!
==================================
.. toctree::
:maxdepth: 2
:caption: Contents:
about
installation
modules/modules
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

25
docs/installation.md Normal file
View file

@ -0,0 +1,25 @@
# Installation
This package is not available on any package repositories, but can be installed by cloning the repository from GitHub and installing via ```pip install``` from the root folder:
```
$ git clone git@github.com:rasmusthog/nafuma.git
$ cd nafuma
$ pip install .
```
If you are planning on making changes to the code base, you might want to consider installing it in develop-mode in order for changes to take effect without reinstalling by including the ```-e``` flag:
```
pip install -e .
```
As of now (v0.2, 08-04-22), the installer will not install any dependencies. It is recommended that you use `conda` to create an environment from `environment.yml` in the root folder:
```
$ conda env create --name <your_environment_name_here> --file environment.yml
$ conda activate <your_environment_name_here>
```
(remember to also get rid of <> when substituting your environment name).
This should get you up and running!

35
docs/make.bat Normal file
View file

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://www.sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd

View file

@ -0,0 +1,3 @@
# Electrochemistry
This is a placeholder

12
docs/modules/modules.rst Normal file
View file

@ -0,0 +1,12 @@
Modules
==================================
.. toctree::
:maxdepth: 1
:caption: Contents
xrd.md
xanes.md
electrochemistry.md

1
docs/modules/xanes.md Normal file
View file

@ -0,0 +1 @@
# XANES

130
docs/modules/xrd.md Normal file
View file

@ -0,0 +1,130 @@
# XRD
This module contains functions to view diffractogram data from several different sources. The Some features include:
- Allows the user to plot the data in wavelength independent parameters (d, 1/d, q, q{math}`^2`, q{math}`^4`), or translated to CuK{math}`\alpha` or MoK{math}`\alpha` allowing comparison between diffractograms obtained with different wavelengths
- Plotting in interactive mode within Jupyter Notebook using the `ipywidgets`-package allowing real-time change of (certain) parameters
- Plotting reflection ticks and/or reflection indices from multiple simulated reflection tables (generated by VESTA) for comparison
- Plotting series of diffractograms in stacked mode (including ability to rotate the view for a 3D-view) or as a heatmap
## 1 Compatible file formats
The module is partially built as a wrapper around [pyFAI](https://github.com/silx-kit/pyFAI) (Fast Azimuthal Integrator) developed at the ESRF for integrating 2D diffractograms from the detectors they have. Given a suitable calibration file (`.poni`), the XRD-module will automatically integrate any file pyFAI can integrate. Upon running in interactive mode, the integration is only done once, but it is advised to perform integration of many diffractograms in a separate processing step and saving the results as `.xy`-files, as the integration will run again each time the function is called.
In addition to this, it can also read the `.brml`-files produced by Bruker-instruments in the RECX-lab at the University of Oslo.
## 2 Basic usage
Plotting diffractograms is done by calling the `xrd.plot.plot_diffractogram()`-function, which takes two dictionaries as arguments: `data`, containing all data specific information and `options` which allows customisation of a range of different parameters. The `options`-argument is optional, and the function will contains a bunch of default values to make an as good plot as possible to begin with.
**Example #1: Single diffractogram**
```py
import nafuma.xrd as xrd
data = {
'path': 'path/to/data/diffractogram.brml'
}
options = {
'reflections_data': [
{'path': 'reflections_phase_1.txt', 'min_alpha': 0.1, 'reflection_indices': 4, 'label': 'Phase 1', 'text_colour': 'black'},
{'path': 'reflections_phase_2.txt', 'min_alpha': 0.1, 'reflections_indices': 4, 'label': 'Phase 2', 'text_colour': 'red'}
],
'hide_y_ticklabels': True,
'hide_y_ticks': True
}
diff, fig, ax = xrd.plot.plot_diffractogram(data=data, options=options)
```
The return value `diff` is a list containing one `pandas.DataFrame` per diffractogram passed, in the above example only one. `fig` and `ax` are `matplotlib.pyplot.Figure`- and `matplotlib.pyplot.Axes`-objects, respectively.
**Example #2: 2D diffractogram from ESRF requiring integration**
```py
import nafuma.xrd as xrd
data = {
'path': 'path/to/data/2d_diffractogram.edf',
'calibrant': 'path/to/calibrant/calibrant.poni',
'nbins': 3000
}
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
```
In this case we did not specify any options and will thus only use default values, and we stored both `fig` and `ax` in the variable `_` as we do not intend to use these.
**Example #3: Plotting with interactive mode**
This will can be done within a Jupyter Notebook, and will allow the user to tweak certain parameters real-time instead of having to recall the function every time.
```py
import nafuma.xrd as xrd
data = {
'path': 'path/to/data/diffractogram.brml'
}
options = {
'interactive': True
}
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
```
**Example #4: Plotting multiple diffractograms as stacked plots**
Instead of passing just a string, you can pass a lsit of filenames. This will be plotted sequentially, with offsets, if desired (`offset_x` and `offset_y`). Default values of `offset_y` is 1 if less than 10 diffractograms have been passed, and 0.1 if more than 10 diffractograms are passed. When plotting series data (e.g. from *in situ* or *operando* measurements), a smaller offset is suitable. Keep in mind that these values only makes sense when the diffractograms are normalised (`'normalise': True`) - if not, the default offsets will be way too small to be noticeable.
```py
import nafuma.xrd as xrd
data = {
'path': ['path/to/data/diffractogram_1.brml', 'path/to/data/diffractogram_2.brml']
}
options = {
'offset_y': 0.1,
'offset_x': 0.05,
}
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
```
**Example #5: Plotting series data as heatmap**
This differs very little from above, except that heatmaps are probably nonesense if not used on series data, and that you don't want offset in heatmaps.
```py
import nafuma.xrd as xrd
list_of_data = ['data_1.brml', 'data_2.brml'. ...., 'data_n.brml']
data = {
'path': lists_of_data
}
options = {
'heatmap': True
}
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
```

16
environment.yml Normal file
View file

@ -0,0 +1,16 @@
name: nafuma
channels:
- diffpy
- defaults
- conda-forge
dependencies:
- ipywidgets
- seaborn
- sympy
- matplotlib
- pytest
- numpy
- pandas
- palettable
- pyfai
prefix: C:\Users\rasmusvt\Anaconda3\envs\nafuma

1
nafuma/__init__.py Normal file
View file

@ -0,0 +1 @@

188
nafuma/auxillary.py Normal file
View file

@ -0,0 +1,188 @@
import json
import numpy as np
import os
import shutil
import time
from datetime import datetime
def update_options(options, default_options, required_options=None):
''' Takes a dictionary of options along with a list of required options and dictionary of default options, and sets all keyval-pairs of options that is not already defined to the default values'''
#FIXME This has been updated so that required_options is not needed. But lots of scripts still passes required_options, so for now it is still accepted, but has a default value and remains unused. Needs to go through all scripts to stop passing of this variable to remove it.
for option in default_options.keys():
if option not in options.keys():
options[option] = default_options[option]
return options
def save_options(options, path, ignore=None):
''' Saves any options dictionary to a JSON-file in the specified path'''
options_copy = options.copy()
if ignore:
if not isinstance(ignore, list):
ignore = [ignore]
for i in ignore:
options_copy[i] = 'Removed'
if not os.path.isdir(os.path.dirname(path)):
if os.path.dirname(path):
os.makedirs(os.path.dirname(path))
with open(path, 'w') as f:
json.dump(options_copy, f, skipkeys=True, indent=4)
def load_options(path):
''' Loads JSON-file into a dictionary'''
with open(path, 'r') as f:
options = json.load(f)
return(options)
def ceil(a, roundto=1):
fac = 1/roundto
a = np.ceil(a*fac) / fac
return a
def floor(a, roundto=1):
fac = 1/roundto
a = np.floor(a*fac) / fac
return a
def write_log(message, options={}):
required_options = ['logfile']
default_options = {
'logfile': f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S.log")}'
}
options = update_options(options=options, required_options=required_options, default_options=default_options)
if not os.path.isdir(os.path.dirname(options['logfile'])):
os.makedirs(os.path.dirname(options['logfile']))
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
message = f'[{now}] {message} \n'
with open(options['logfile'], 'a') as f:
f.write(message)
#Function that "collects" all the files in a folder, only accepting .dat-files from xanes-measurements
def get_filenames(path, ext, filter=''):
''' Collects all filenames from specified path with a specificed extension
Input:
path: path to find all filenames (relative or absolute)
ext: extension (including ".")'''
filenames = [os.path.join(path, filename) for filename in os.listdir(path) if os.path.isfile(os.path.join(path, filename)) and filename.endswith(ext) and filter in filename]
return filenames
def move_list_element_last(filenames,string):
for i,file in enumerate(filenames):
if string in file:
del filenames[i]
filenames.append(file)
return filenames
def backup_file(filename, backup_dir):
# Creates backup-folder if it does not exist
if not os.path.isdir(backup_dir):
os.makedirs(backup_dir)
# Get a list of all previous backup files with the same basename as well as the creation time for the
prev_backup_files = [file for file in os.listdir(backup_dir) if os.path.basename(filename.split('.')[0]) in file]
creation_time = datetime.strptime(time.ctime(os.path.getmtime(filename)), '%a %b %d %H:%M:%S %Y').strftime("%Y-%m-%d_%H-%M-%S")
ext = '.' + filename.split('.')[-1]
dst_basename = creation_time + '_' + filename.split('.')[0] + '_' + f'{len(prev_backup_files)}'.zfill(4) + ext
dst = os.path.join(backup_dir, dst_basename)
shutil.copy(filename, dst)
def get_unique(full_list):
unique_list = []
for entry in full_list:
if not entry in unique_list:
unique_list.append(entry)
return unique_list
def swap_values(options: dict, key1, key2):
if not isinstance(key1,list):
key1 = [key1]
if not isinstance(key2,list):
key2 = [key2]
assert len(key1) == len(key2)
for k1, k2 in zip(key1, key2):
options[k1], options[k2] = options[k2], options[k1]
return options
def find_neighbours(value, df, colname, start=0, end=-1):
''' Finds closest match to a given value in colname of df. If there is an exact match, returns index of this value. Else, it returns the nearest neighbors (upper and lower)'''
df = df.iloc[start:end]
exactmatch = df[df[colname] == value]
if not exactmatch.empty:
return exactmatch.index.values[0]
else:
lower_df = df[df[colname] < value][colname]
upper_df = df[df[colname] > value][colname]
if not lower_df.empty:
lowerneighbour_ind = lower_df.idxmax()
else:
lowerneighbour_ind = np.nan
if not upper_df.empty:
upperneighbour_ind = upper_df.idxmin()
else:
upperneighbour_ind = np.nan
return [lowerneighbour_ind, upperneighbour_ind]
def isnan(value):
return value!=value

1
nafuma/dft/__init__.py Normal file
View file

@ -0,0 +1 @@
from . import electrons, io, structure, phonons

1228
nafuma/dft/electrons.py Normal file

File diff suppressed because it is too large Load diff

1021
nafuma/dft/io.py Normal file

File diff suppressed because it is too large Load diff

1700
nafuma/dft/phonons.py Normal file

File diff suppressed because it is too large Load diff

935
nafuma/dft/structure.py Normal file
View file

@ -0,0 +1,935 @@
import math
import re
import pandas as pd
import numpy as np
from scipy.optimize import curve_fit
import warnings
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import importlib
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
from mpl_toolkits.axisartist.axislines import Subplot
from cycler import cycler
import itertools
from ase import Atoms
from ase.io.trajectory import Trajectory
from ase import io
from ase.units import kJ
from ase.eos import EquationOfState
import os
import os.path
import nafuma.auxillary as aux
import nafuma.plotting as btp
def read_eos_data(path, options):
''' Reads volume and energy data from a energy-volume run and fits the data to an equation of state. Outputs a list with one pandas DataFrame containing the data points from the DFT-calculations,
one DataFrame containing the fitted curve data points and one dictionary with equilibrium volume, equilibrium energy and bulk modulus in GPa
path: Path to the folder containing the energ.dat and POSCAR files. energ.dat must have two columns with volumes in the first, energy in the second separated by whitespace.
atoms_per_fu: Number of atoms per formula unit. Used to scale the values to be comparable with other calculations that may have a different sized unit cell.
eos: Type of equation of state to fit to. Same keywords as the ones used in ASE, as it simply calls ASE to fit the equation of state.
'''
required_options = ['atoms_per_fu', 'reference', 'eos']
default_options = {
'atoms_per_fu': -1, # Scaling factor to output energy per f.u.
'reference': 0, # Whether the energy should be relative to some reference energy (typically lowest energy)
'eos': 'birchmurnaghan', # what type of EoS curve to fit the data to. Options: murnaghan, birch, birchmurnaghan, vinet, pouriertarantola
}
options = update_options(options=options, required_options=required_options, default_options=default_options)
# Make paths for the energ.dat and POSCAR files.
energ_path = os.path.join(path, 'energ.dat')
poscar_path = os.path.join(path, 'POSCAR')
# Read POSCAR and calculate the scale factor to give values per formula unit
at = io.read(poscar_path)
if options['atoms_per_fu'] == -1:
scale_factor = 1
else:
scale_factor = options['atoms_per_fu'] / len(at)
# Get the label
label = os.path.basename(path)
# Reads the energ.dat file and structures the data into a pandas DataFrame. Then scales the values according to the scale factor.
dft_df = pd.read_csv(energ_path, delim_whitespace=True, header=None)
dft_df.columns = ['Configuration', 'Volume', 'Energy']
dft_df['Energy'] = dft_df['Energy'] * scale_factor
dft_df['Volume'] = dft_df['Volume'] * scale_factor
dft_df["Energy"] = dft_df["Energy"] - options['reference'] # subtracts a reference energy if provided. THis value defaults to 0, so will not do anything if not provided.
# Fit data to Equation of State using ASEs EquationOfState object. Makes a DataFrame out of the data points of the fitted curve. Also makes a ditionary of the equilibrium constants,
#then packages everything in a list which is returned by the function.
eos = EquationOfState(dft_df['Volume'].values, dft_df['Energy'].values, eos=options['eos'])
try:
v0, e0, B = eos.fit()
eos_df = pd.DataFrame(data={'Volume': eos.getplotdata()[4], 'Energy': eos.getplotdata()[5]})
equilibrium_constants = {'v0': v0, 'e0': e0,'B': B/kJ * 1.0e24}
data = [dft_df, eos_df, equilibrium_constants, label]
return data
except:
warnings.warn(f'WARNING: Unable to fit EoS curve for {label}')
return [None, None, None, label]
def read_eos_datas(path, options):
required_options = ['subset', 'sort_by']
default_options = {
'subset': None, # list with directory names of what you want to include
'sort_by': 'e0', # whether the data should be sorted or not - relevant for bar plots, but also for the order of the entries in the legend in the EoScruve plot
}
options = update_options(options=options, required_options=required_options, default_options=default_options)
# If a subset of directories is not specified, will create a list of all directories in the path given.
if not options['subset']:
dirs = [dir for dir in os.listdir(path) if os.path.isdir(os.path.join(path, dir)) and dir[0] != '.']
else:
dirs = options['subset']
datas = []
# Loop through all subdirectories and reads the data from these. Also appends the name of the directory to the list that is returned from the plot_eos_data() function
for dir in dirs:
subdir = os.path.join(path, dir)
data = read_eos_data(subdir, options)
if isinstance(data[0], pd.DataFrame):
datas.append(data)
# Sorts the data if sort is enabled.
if options['sort_by']:
datas = sort_data(datas, options['sort_by'])
return datas
def get_summarised_data(path, options):
datas = read_eos_datas(path=path, options=options)
summary = []
for data in datas:
summary.append([data[3], data[2]['e0'], data[2]['v0'], data[2]['B']])
df = pd.DataFrame(summary)
df.columns = ['Label', 'E0', 'V0', 'B']
emin = df["E0"].min()
df["dE0"] = df["E0"] - emin
# Rearranging the columns
df = df[['Label', 'E0', 'dE0', 'V0', 'B']]
return df
def plot_eos_data(path, options):
''' Plots the data from the energy-volume curve runs. Allows plotting of just the energy-volume curves, a bar plot showing the equilibrium energies or both.
path: path to where the data is located. It should point to a directory with subdirectories for each structure to be plotted. Inside each of these subdirectories there should be an energ.dat and a POSCAR file.
atoms_per_fu: Number of atoms per formula unit. Used to scale the values to be comparable with other calculations that may have a different sized unit cell.
dirs: List of directory names if only a subset of all available datasets is to be plotted. Defaults to None, and will thus get data from all subdirectories.
eos: Type of equation of state to fit to. Same keywords as the ones used in ASE, as it simply calls ASE to fit the equation of state.
width: Width of the total figure. Defaults to None, which will again default to width=20.
height: Height of the total figure. Defaults to None, which will again will default to height= width / phi where phi is the golden ratio.
dpi: Dots per inch of the figure. Defaults to pyplot's default
colour_cycles: List of tuples with sets of colours for the palettable colour collection. Defaults to two sets of in total 20 colours. Used for giving different colours to energy-volume curves.
energyunit: The energy unit. Defaults to eV per formula unit. Only used on the axis labels.
volumeunit: The volume unit. Defaults to Å^3. Only used on the axis labels.
xlim: Limits of the x-axes. List of min and max. If mode = both is used, has to contain two lists for each of the plots. As the x-limits for a bar plot is nonesense, should just contain a list with a NoneType.
ylim: Limits of the y-axes. List of min and max. If mode = both is used, has to contain two lists for each of the plots.
sort: Whether or not to sort the data from lowest to highest equilibrium energy. Defaults to True.
sort_by: What to sort by if sort is enabled. Defaults to e0. Other options: v0 = equilibrium volumes, B = bulk moduli. Alphabetical order sorting is not implemented.
mode: Determines what to plot. Defaults to energy-volume curves ('curves'). Other options: 'bars', bar-plot of equilibrium energies. 'both', both energy-volume curves and bar plots are plotted side-by-side.
highlight: Takes a list, either of booleans to highlight certain bars (must be the same length as the number of data sets). Alternatively can contain only names of the datasets to highlight. Defaults to None.'''
# FIXME A lot of refactoring required to tidy this up
required_options = ['plot_kind', 'highlight',
'reference',
'eos', 'sort_by',
'colours',
'xlabel', 'ylabel',
'xunit', 'yunit',
'palettes',
'markers',
'ylim',
'legend_map',
'rc_params',
'legend']
default_options = {
'plot_kind': 'EoScurve', # EoScurve or EoSbars
'highlight': None, # list with directory names (or Boolean array) of which bars to highlight. Only relevant to EoSbars
'reference': 0, # Whether the energy should be relative to some reference energy (typically lowest energy)
'eos': 'birchmurnaghan', # what type of EoS curve to fit the data to. Options: murnaghan, birch, birchmurnaghan, vinet, pouriertarantola
'sort_by': 'e0', # whether the data should be sorted or not - relevant for bar plots, but also for the order of the entries in the legend in the EoScruve plot
'colours': None,
'xlabel': 'Volume', 'ylabel': 'Energy',
'xunit': 'Å$^3$', 'yunit': 'eV',
'palettes': [('qualitative', 'Dark2_8'), ('qualitative', 'Paired_12')], # a set of two colour cycles from the palettable package. Requires many colours for the EoScurve plot
'markers': ('o', '*', '^', 'v', 'd', 'H', '8', '>', 'P', 'X'), # marker styles for the EoScurve plot
'ylim': None, # y-limits (ist)
'legend': True,
'legend_map': None, # a dictionary with mappings between the folder names and what should appear in the legend
'rc_params': None # dictionary of run commands to update plot style
}
options = update_options(options=options, required_options=required_options, default_options=default_options)
# Create path to the data
datas = read_eos_datas(path=path, options=options)
### PLOT THE ENERGY-VOLUME CURVES
if options['plot_kind'] == 'EoScurve':
# Fetches a figure and axes object from the prepare_plot() function
fig, ax = btp.prepare_plot(options=options)
# Make an cyclic iterable of markers to be used for the calculated data points.
marker_cycle = itertools.cycle(options['markers'])
# Creates a list of all the colours that is passed in the colour_cycles argument. Then makes cyclic iterables of these.
colour_collection = []
for cycle in options['palettes']:
mod = importlib.import_module("palettable.colorbrewer.%s" % cycle[0])
colour = getattr(mod, cycle[1]).mpl_colors
colour_collection = colour_collection + colour
colour_cycle = itertools.cycle(colour_collection)
labels = []
colours = []
markers = []
# For each of the data sets, extracts the data and plots them.
for data in datas:
dft_df, eos_df, label = data[0], data[1], data[3]
# If ylim is passed, only plot those that have a minimum energy below the max ylim parameter
if options['ylim']:
plot = True if dft_df["Energy"].min() < options['ylim'][1] else False
else:
plot = True
if plot:
if options['label_map']:
labels.append(options['label_map'][label])
colours.append(next(colour_cycle))
markers.append(next(marker_cycle))
dft_df.plot.scatter(x=1, y=2, ax=ax, marker=markers[-1], color=colours[-1], s=20)
eos_df.plot(x=0, y=1, ax=ax, color=colours[-1], label='_', ls='--')
options['labels'] = labels
if options['legend']:
options['legend_content'] = [labels, colours, markers]
### PLOT THE BAR PLOTS
elif options['plot_kind'] == 'EoSbars':
# Fetches a figure and axes object from the prepare_plot() function
fig, ax = btp.prepare_plot(options=options)
e0 = []
labels = []
colours = []
# Pick out colour for highlighting (NB! These colours are not passed as arguments, but could be in future)
bar_colours = []
for cycle in options['palettes']:
mod = importlib.import_module("palettable.colorbrewer.%s" % cycle[0])
bar_colours.append(getattr(mod, cycle[1]).mpl_colors[0])
# Loops through the datasets, picks out equilibrium volume and labels and sets colours according to the whether the highlight option is used or not.
for data in datas:
if options['ylim']:
plot = True if data[2]['e0'] < options['ylim'][1] else False
else:
plot = True
if plot:
# Adds 100 if plotting in relative mode. The bases of the bar plots are sunk by 100 during plotting
adjustment = 100 if options['reference'] != 0 else 100
print(adjustment)
e0.append(data[2]['e0']+adjustment)
print(e0[-1])
labels.append(options['label_map'][data[3]])
if options['highlight'] is not None:
if data[3] in options['highlight']:
colours.append(bar_colours[0])
else:
colours.append(bar_colours[1])
elif options['highlight'] is not None and type(options['highlight'][0] == str):
if labels[-1] in options['highlight']:
colours.append(bar_colours[0])
else:
colours.append(bar_colours[1])
else:
colours.append(bar_colours[0])
# Makes the bar plot.
bottom = -100 if options['reference'] != 0 else 0
plt.bar(range(len(e0)), e0, color=colours, bottom=bottom)
plt.xticks(range(len(e0)), labels, rotation=90)
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
return datas, fig, ax
def sort_data(datas, sort_by='e0'):
''' Bubble sort algorithm to sort the data sets'''
l = len(datas)
for i in range(0, l):
for j in range(0, l-i-1):
if datas[j][2]['{}'.format(sort_by)] > datas[j+1][2]['{}'.format(sort_by)]:
temp = datas[j]
datas[j] = datas[j+1]
datas[j+1] = temp
return datas
def prepare_plot(options={}):
# Reset run commands
plt.rcdefaults()
# Update run commands if any is passed
if 'rc_params' in options.keys():
update_rc_params(options['rc_params'])
required_options = ['single_column_width', 'double_column_width', 'column_type', 'width_ratio', 'aspect_ratio', 'compress_width', 'compress_height', 'upscaling_factor', 'dpi']
default_options = {
'single_column_width': 8.3,
'double_column_width': 17.1,
'column_type': 'single',
'width_ratio': '1:1',
'aspect_ratio': '1:1',
'compress_width': 1,
'compress_height': 1,
'upscaling_factor': 1.0,
'dpi': 600}
options = update_options(options, required_options, default_options)
width = determine_width(options)
height = determine_height(options, width)
width, height = scale_figure(options=options, width=width, height=height)
fig, ax = plt.subplots(figsize=(width, height), dpi=options['dpi'])
return fig, ax
def update_rc_params(rc_params):
''' Update all passed run commands in matplotlib'''
if rc_params:
for key in rc_params.keys():
plt.rcParams.update({key: rc_params[key]})
def update_options(options, required_options, default_options):
''' Update all passed options'''
for option in required_options:
if option not in options.keys():
options[option] = default_options[option]
return options
def determine_width(options):
conversion_cm_inch = 0.3937008 # cm to inch
if options['column_type'] == 'single':
column_width = options['single_column_width']
elif options['column_type'] == 'double':
column_width = options['double_column_width']
column_width *= conversion_cm_inch
width_ratio = [float(num) for num in options['width_ratio'].split(':')]
width = column_width * width_ratio[0]/width_ratio[1]
return width
def determine_height(options, width):
aspect_ratio = [float(num) for num in options['aspect_ratio'].split(':')]
height = width/(aspect_ratio[0] / aspect_ratio[1])
return height
def scale_figure(options, width, height):
width = width * options['upscaling_factor'] * options['compress_width']
height = height * options['upscaling_factor'] * options['compress_height']
return width, height
def prepare_plot_old(width=None, height=None, dpi=None, energyunit='eV', volumeunit=r'Å$^3$', mode='curves', width_ratio=[1, 1], square=True, pad_bottom=None, scale=1, format_params=None):
'''Prepares pyplot figure and axes objects.'''
linewidth = 3*scale
axeswidth = 3*scale
plt.rc('lines', linewidth=linewidth)
plt.rc('axes', linewidth=axeswidth)
if square:
if not width:
width = 20
height = width
else:
if not width:
width = 20
if not height:
golden_ratio = (math.sqrt(5) - 1) / 2
height = width*golden_ratio
if mode == 'curves':
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(width, height), facecolor='w', dpi=dpi)
if mode == 'bars':
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(width, height), facecolor='w', dpi=dpi)
if mode == 'both':
fig, ax = plt.subplots(1, 2, figsize=(width, height), gridspec_kw={'width_ratios': width_ratio})
return fig, ax
def prettify_plot(fig, ax, options):
'''Prepares pyplot figure and axes objects.'''
required_options = ['plot_kind', 'hide_x_labels', 'hide_y_labels', 'xunit', 'yunit', 'legend_content', 'legend_position', 'x_tick_locators', 'y_tick_locators', 'tick_directions', 'subplots_adjust', 'xlim', 'ylim']
default_options = {
'plot_kind': 'EoScurve', # EoScurve or EoSbars
'hide_x_labels': False, # Whether x labels should be hidden
'hide_y_labels': False, # whether y labels should be hidden
'xunit': r'Å$^3$', # The unit of the x-values in the curve plot
'yunit': r'eV f.u.$^{-1}$', # The unit of the y-values in the curve and bar plots
'xlim': None,
'ylim': None,
'legend_content': None,
'legend_position': ['upper center', (1.10, 0.90)], # the position of the legend passed as arguments to loc and bbox_to_anchor respectively
'x_tick_locators': [10, 5], # Major and minor tick locators
'y_tick_locators': [.1, .05], # Major and minor tick locators
'tick_directions': 'in', # in or out
'subplots_adjust': [0.1, 0.1, 0.9, 0.9]
}
options = update_options(options=options, required_options=required_options, default_options=default_options)
if options['plot_kind'] == 'EoScurve':
# Set labels on x- and y-axes
ax.set_xlabel('Volume [{}]'.format(options['xunit']))
if not options['hide_y_labels']:
ax.set_ylabel('Energy [{}]'.format(options['yunit']))
else:
ax.set_ylabel('')
ax.tick_params(labelleft=False)
ax.xaxis.set_major_locator(MultipleLocator(options['x_tick_locators'][0]))
ax.xaxis.set_minor_locator(MultipleLocator(options['x_tick_locators'][1]))
ax.yaxis.set_major_locator(MultipleLocator(options['y_tick_locators'][0]))
ax.yaxis.set_minor_locator(MultipleLocator(options['y_tick_locators'][1]))
if ax.get_legend():
ax.get_legend().remove()
if options['legend']:
labels = options['legend_content'][0]
colours = options['legend_content'][1]
markers = options['legend_content'][2]
entries = []
for i in range(len(options['legend_content'][0])):
entries.append(mlines.Line2D([], [], label=labels[i], color=colours[i], marker=markers[i], linestyle='None'))
fig.legend(handles=entries, loc=options['legend_position'][0], bbox_to_anchor=options['legend_position'][1], frameon=False)
if options['plot_kind'] == 'EoSbars':
if not options['hide_y_labels']:
ax.set_ylabel('Energy [{}]'.format(options['yunit']))
ax.yaxis.set_major_locator(MultipleLocator(options['y_tick_locators'][0]))
ax.yaxis.set_minor_locator(MultipleLocator(options['y_tick_locators'][1]))
ax.tick_params(axis='x', which='minor', bottom=False, top=False)
# Adjust where the axes start within the figure. Default value is 10% in from the left and bottom edges. Used to make room for the plot within the figure size (to avoid using bbox_inches='tight' in the savefig-command, as this screws with plot dimensions)
plt.subplots_adjust(left=options['subplots_adjust'][0], bottom=options['subplots_adjust'][1], right=options['subplots_adjust'][2], top=options['subplots_adjust'][3])
# If limits for x- and y-axes is passed, sets these.
if options['xlim'] is not None:
ax.set_xlim(options['xlim'])
if options['ylim'] is not None:
ax.set_ylim(options['ylim'])
return fig, ax
def prettify_plot_old(fig, ax, energyunit='eV', volumeunit=r'Å$^3$', mode='curves', legend_content=None, pad_bottom=None, scale=1, hide_ylabels=False, xpad=None, ypad=None):
'''Prepares pyplot figure and axes objects.'''
# Set sizes of ticks, labes etc.
ticksize = 30*scale
labelsize = 30*scale
legendsize = 15*scale
titlesize = 30*scale
linewidth = 3*scale
axeswidth = 3*scale
markersize = 15*scale
majorticklength = 20*scale
minorticklength = 10*scale
xpad = 4 if not xpad else xpad
ypad = 4 if not ypad else ypad
if mode == 'curves':
# Set labels on x- and y-axes
ax.set_xlabel('Volume [{}]'.format(volumeunit), size=labelsize, labelpad=xpad)
if not hide_ylabels:
ax.set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, labelpad=ypad)
else:
ax.set_ylabel('')
# Set tick parameters
ax.tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, right=True, top=True, labelsize=ticksize)
ax.tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, right=True, top=True, labelsize=ticksize)
ax.tick_params(axis='x', pad=xpad)
ax.tick_params(axis='y', pad=ypad)
if hide_ylabels:
ax.tick_params(labelleft=False)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
ax.xaxis.set_major_locator(MultipleLocator(10))
ax.xaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.set_major_locator(MultipleLocator(.1))
ax.yaxis.set_minor_locator(MultipleLocator(.05))
ax.get_legend().remove()
if legend_content:
patches = []
labels = legend_content[0]
colours = legend_content[1]
markers = legend_content[2]
entries = []
for ind, label in enumerate(legend_content[0]):
entries.append(mlines.Line2D([], [], color=colours[ind], marker=markers[ind], linestyle='None',
markersize=markersize, label=labels[ind]))
#patches.append(mpatches.Patch(color=colours[ind], label=labels[ind]))
fig.legend(handles=entries, loc='upper center', bbox_to_anchor=(1.10, 0.90), fontsize=legendsize, frameon=False)
if pad_bottom is not None:
bigax = fig.add_subplot(111)
bigax.set_facecolor([1,1,1,0])
bigax.spines['top'].set_visible(False)
bigax.spines['bottom'].set_visible(True)
bigax.spines['left'].set_visible(False)
bigax.spines['right'].set_visible(False)
bigax.tick_params(labelcolor='w', color='w', direction='in', top=False, bottom=True, left=False, right=False, labelleft=False, pad=pad_bottom)
if mode == 'bars':
ax.tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, right=True, top=True)
ax.tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, right=True, top=True)
if not hide_ylabels:
ax.set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, labelpad=ypad)
ax.yaxis.set_major_locator(MultipleLocator(.1))
ax.yaxis.set_minor_locator(MultipleLocator(.05))
ax.tick_params(axis='x', pad=xpad)
ax.tick_params(axis='y', pad=ypad)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
if pad_bottom is not None:
bigax = fig.add_subplot(111)
bigax.set_facecolor([1,1,1,0])
bigax.spines['top'].set_visible(False)
bigax.spines['bottom'].set_visible(True)
bigax.spines['left'].set_visible(False)
bigax.spines['right'].set_visible(False)
bigax.tick_params(labelcolor='w', color='w', direction='in', top=False, bottom=True, left=False, right=False, labelleft=False, pad=pad_bottom)
if mode == 'both':
# Set labels on x- and y-axes
ax[0].set_xlabel('Volume [{}]'.format(volumeunit), size=labelsize, labelpad=xpad)
ax[0].set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, labelpad=ypad)
# Set tick parameters
ax[0].tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, right=True, left=True, top=True, labelsize=ticksize)
ax[0].tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, right=True, left=True, top=True, labelsize=ticksize)
ax[0].tick_params(axis='x', pad=xpad)
ax[0].tick_params(axis='y', pad=ypad)
ax[0].xaxis.set_major_locator(MultipleLocator(10))
ax[0].xaxis.set_minor_locator(MultipleLocator(5))
ax[0].yaxis.set_major_locator(MultipleLocator(.1))
ax[0].yaxis.set_minor_locator(MultipleLocator(.05))
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
ax[1].yaxis.set_major_locator(MultipleLocator(.2))
ax[1].yaxis.set_minor_locator(MultipleLocator(.1))
ax[1].yaxis.set_label_position('right')
ax[1].yaxis.tick_right()
ax[1].set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, ypad=ypad)
ax[1].tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, left=True, right=True, top=True)
ax[1].tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, left=True, right=True, top=True)
ax[1].tick_params(axis='x', pad=xpad)
ax[1].tick_params(axis='y', pad=ypad)
plt.xticks(fontsize=ticksize)
plt.yticks(fontsize=ticksize)
return fig, ax
def parabola(V, a, b, c):
"""parabola polynomial function
this function is used to fit the data to get good guesses for
the equation of state fits
a 4th order polynomial fit to get good guesses for
was not a good idea because for noisy data the fit is too wiggly
2nd order seems to be sufficient, and guarantees a single minimum"""
E = (a * V**2) + (b * V) + c
return E
def murnaghan(V, E0, V0, B0, BP):
'From PRB 28,5480 (1983'
E = E0 + ((B0 * V) / BP) * (((V0 / V)**BP) / (BP - 1) + 1) - ((V0 * B0) / (BP - 1))
return E
def birch(V, E0, V0, B0, BP):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos
paper downloaded from Web
case where n=0
"""
E = (E0 +
9 / 8 * B0 * V0 * ((V0 / V)**(2 / 3) - 1)**2 +
9 / 16 * B0 * V0 * (BP - 4) * ((V0 / V)**(2 / 3) - 1)**3)
return E
def birchmurnaghan(V, E0, V0, B0, BP):
"""
BirchMurnaghan equation from PRB 70, 224107
Eq. (3) in the paper. Note that there's a typo in the paper and it uses
inversed expression for eta.
"""
eta = (V0 / V)**(1 / 3)
E = E0 + 9 * B0 * V0 / 16 * (eta**2 - 1)**2 * (6 + BP * (eta**2 - 1) - 4 * eta**2)
return E
def vinet(V, E0, V0, B0, BP):
'Vinet equation from PRB 70, 224107'
eta = (V / V0)**(1 / 3)
E = (E0 + 2 * B0 * V0 / (BP - 1)**2 *
(2 - (5 + 3 * BP * (eta - 1) - 3 * eta) *
np.exp(-3 * (BP - 1) * (eta - 1) / 2)))
return E
def pouriertarantola(V, E0, V0, B0, BP):
'Pourier-Tarantola equation from PRB 70, 224107'
eta = (V / V0)**(1 / 3)
squiggle = -3 * np.log(eta)
E = E0 + B0 * V0 * squiggle**2 / 6 * (3 + squiggle * (BP - 2))
return E
def get_initial_guesses(volume, energy):
p = np.polyfit(volume, energy, deg=2)
a, b, c = p[0], p[1], p[2]
# Estimated from dE/dV = 2aV0 + b => V0 = -b / 2a
v0 = -b / (2*a)
# Estimated by evaluating a parabola with a, b and c values at V = V0
e0 = parabola(v0, a, b, c)
# Estimated form B0 ~ V0 * d^2E / dV^2. d^2E / dV^2 = 2a.
b0 = 2 * a * v0
# Just a reasonable starting value
bp = 4
return [e0, v0, b0, bp]
def fit_eos_curve(volume, energy, p0, eos):
eos_dict = {'murnaghan': murnaghan, 'birch': birch, 'birchmurnaghan': birchmurnaghan, 'vinet': vinet, 'pouriertarantola': pouriertarantola}
func = eos_dict[eos]
popt, pcov = curve_fit(func, volume, energy, p0)
E0, V0, B0, BP = popt[0], popt[1], popt[2], popt[3]
return [E0, V0, B0, BP]
def get_plotdata(volume, energy, equilibrium_values, eos):
eos_dict = {'murnaghan': murnaghan, 'birch': birch, 'birchmurnaghan': birchmurnaghan, 'vinet': vinet, 'pouriertarantola': pouriertarantola}
V = np.linspace(volume.min(), volume.max(), 100)
E0, V0, B0, BP = equilibrium_values[0], equilibrium_values[1], equilibrium_values[2], equilibrium_values[3]
print(E0, V0, B0, BP)
func = eos_dict[eos]
print(func)
E = func(V, E0, V0, B0, BP)
return E, V
def get_atoms(poscar):
with open(poscar, 'r') as poscar:
lines = poscar.readlines()
atoms = lines[5].split()
atom_num = lines[6].split()
atom_num = [int(num) for num in atom_num]
atoms_dict = {}
for ind, atom in enumerate(atoms):
atoms_dict[atom] = atom_num[ind]
return atoms, atom_num, atoms_dict
def get_equilibrium_data(path, atoms_per_formula_unit, eos=None):
if not eos:
eos = 'murnaghan'
dirs = [os.path.join(path, dir) for dir in os.listdir(path)]
data = []
for dir in dirs:
atoms, atom_num, atoms_dict = get_atoms(os.path.join(dir, 'POSCAR'))
scaling_factor = sum(atom_num) / atoms_per_formula_unit
label = os.path.basename(dir)
dft_df = pd.read_csv(os.path.join(dir, 'energ.dat'), header=None, delim_whitespace=True, index_col=0)
dft_df.reset_index(drop=True, inplace=True)
dft_df.columns = ['Volume', 'Energy']
volume = dft_df["Volume"].to_numpy() / scaling_factor
energy = dft_df["Energy"].to_numpy() / scaling_factor
p0 = get_initial_guesses(volume, energy)
try:
equilibrium_constants = fit_eos_curve(volume, energy, p0, eos)
e0, v0, b0, bp = equilibrium_constants[0], equilibrium_constants[1], equilibrium_constants[2], equilibrium_constants[3]
data.append([label, e0, v0, b0/kJ*1e24, bp])
except:
data.append([label, None, None, None, None])
df = pd.DataFrame(data)
df.columns = ['Label', 'E0', 'V0', 'B0', 'Bp']
df.sort_values(by='E0', ascending=True, inplace=True)
df.reset_index(inplace=True)
E_min = df['E0'].min()
df['dE'] = df['E0'] - E_min
df = df[['Label', 'E0', 'dE', 'V0', 'B0', 'Bp']]
return df

1
nafuma/eds/__init__.py Normal file
View file

@ -0,0 +1 @@
from . import io, plot

152
nafuma/eds/io.py Normal file
View file

@ -0,0 +1,152 @@
from PIL import Image
import numpy as np
import cv2
import pandas as pd
def read_image(path, weight=None, colour=None, crop=None, resize=None, brightness=None):
img = np.array(Image.open(path))
if colour is not None:
img = change_colour(img, colour)
if brightness is not None:
img = increase_brightness(img, increase=brightness)
if crop is not None:
img = crop_image(img, crop)
if resize is not None:
img = resize_image(img, resize)
if weight is not None:
img = scale_image(img, weight)
return img
def scale_image(image, factor):
for i in range(0,image.shape[0]):
for j in range(0, image.shape[1]):
image[i][j][0] = image[i][j][0]*factor
image[i][j][1] = image[i][j][1]*factor
image[i][j][2] = image[i][j][2]*factor
return image
def crop_image(image, factor):
y, x = image.shape[0:2]
new_y, new_x = int(y*factor), int(x*factor)
image = image[:new_y, :new_x]
res = cv2.resize(image, dsize=(x, y), interpolation=cv2.INTER_CUBIC)
return res
def resize_image(image, factor):
y, x = image.shape[0:2]
new_y, new_x = int(y*factor), int(x*factor)
res = cv2.resize(image, dsize=(new_x, new_y), interpolation=cv2.INTER_CUBIC)
return res
def increase_brightness(image, brightness):
for i in range(0,image.shape[0]):
for j in range(0, image.shape[1]):
image[i][j][0] = image[i][j][0]+brightness
image[i][j][1] = image[i][j][1]+brightness
image[i][j][2] = image[i][j][2]+brightness
return image
def add_images(image1, image2):
assert image1.shape == image2.shape
compound_image = np.zeros((image1.shape[0], image1.shape[1], image1.shape[2]))
for i in range(image1.shape[0]):
for j in range(image1.shape[1]):
compound_image[i][j] = [0, 0, 0]
compound_image[i][j][0] = int(int(image1[i][j][0]) + int(image2[i][j][0]))
compound_image[i][j][1] = int(int(image1[i][j][1]) + int(image2[i][j][1]))
compound_image[i][j][2] = int(int(image1[i][j][2]) + int(image2[i][j][2]))
return compound_image
def get_colour(image):
colour = [0, 0, 0]
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i][j][0] > colour[0]:
colour[0] = image[i][j][0]
if image[i][j][1] > colour[1]:
colour[1] = image[i][j][1]
if image[i][j][2] > colour[2]:
colour[2] = image[i][j][2]
colour = np.array(colour)
return colour
def change_colour(image, new_colour):
new_colour = np.array(new_colour)
old_colour = get_colour(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
factor = max(image[i][j]) / max(old_colour)
image[i][j] = new_colour.astype(float) * factor
return image
def read_spectrum(path):
headers = find_start(path)
spectrum = pd.read_csv(path, skiprows=headers, delim_whitespace=True)
return spectrum
def find_start(path):
with open(path, 'r') as f:
line = f.readline()
i = 0
while not line.startswith('Energy'):
line = f.readline()
i += 1
return i

135
nafuma/eds/plot.py Normal file
View file

@ -0,0 +1,135 @@
import nafuma.auxillary as aux
import nafuma.plotting as btp
import nafuma.eds.io as io
import numpy as np
def show_image(data, options={}):
default_options = {
'hide_x_labels': True,
'hide_y_labels': True,
'hide_x_ticklabels': True,
'hide_y_ticklabels': True,
'hide_x_ticks': True,
'hide_y_ticks': True,
'colours': None,
'brightness': None,
'show_image': True,
'resize': None,
'crop': None,
'ax': None,
'fig': None,
}
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
if not isinstance(data['path'], list):
data['path'] = [data['path']]
if not 'image' in data.keys():
data['image'] = [None for _ in range(len(data['path']))]
if not 'weights' in data.keys():
data['weights'] = [1.0 for _ in range(len(data['path']))]
if not options['colours']:
options['colours'] = [None for _ in range(len(data['path']))]
for i, (path, weight, colour) in enumerate(zip(data['path'], data['weights'], options['colours'])):
data['image'][i] = io.read_image(path=path, weight=weight, colour=colour, resize=options['resize'], crop=options['crop'])
images = []
for i, image in enumerate(data['image']):
images.append(image)
#
final_image = np.mean(images, axis=0) / 255
if options['brightness']:
final_image = io.increase_brightness(final_image, brightness=options['brightness'])
if len(data['path']) > 1:
data['image'].append(final_image)
if options['show_image']:
if not options['fig'] and not options['ax']:
fig, ax = btp.prepare_plot(options)
else:
fig, ax = options['fig'], options['ax']
ax.imshow(final_image)
btp.adjust_plot(fig=fig, ax=ax, options=options)
return data['image'], fig, ax
else:
return data['image'], None, None
def plot_spectrum(data: dict, options={}):
default_options = {
'deconvolutions': None,
'lines': None,
'colours': None,
'xlabel': 'Energy', 'xunit': 'keV', 'xlim': None,
'ylabel': 'Counts', 'yunit': 'arb. u.', 'ylim': None, 'hide_y_ticklabels': True, 'hide_y_ticks': True,
}
options = aux.update_options(options=options, default_options=default_options)
fig, ax = btp.prepare_plot(options=options)
spectrum = io.read_spectrum(data['path'])
if options['deconvolutions']:
deconvolutions = []
if not isinstance(options['deconvolutions'], list):
options['deconvolutions'] = [options['deconvolutions']]
if options['colours'] and (len(options['colours']) != len(options['deconvolutions'])):
options['colours'] = None
for deconv in options['deconvolutions']:
df = io.read_spectrum(deconv)
deconvolutions.append(df)
spectrum.plot(x='Energy', y='Counts', ax=ax, color='black')
if options['deconvolutions']:
if options['colours']:
for deconv, colour in zip(deconvolutions, options['colours']):
ax.fill_between(x=deconv['Energy'], y1=deconv['Counts'], y2=0, color=colour, alpha=0.4)
else:
for deconv in deconvolutions:
ax.fill_between(x=deconv['Energy'], y1=deconv['Counts'], y2=0, alpha=0.4)
if not options['xlim']:
options['xlim'] = [spectrum['Energy'].min(), spectrum['Energy'].max()]
if not options['ylim']:
options['ylim'] = [0, 1.1*spectrum['Counts'].max()]
if options['lines']:
for i, (line, energy) in enumerate(options['lines'].items()):
ax.axvline(x=energy, ls='--', lw=0.5, c='black')
ax.text(s=line, x=energy, y=(0.9-0.1*i)*options['ylim'][1], fontsize=8)
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
return spectrum, fig, ax

View file

@ -0,0 +1 @@
from . import io, plot, unit_tables

View file

@ -0,0 +1,871 @@
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import nafuma.auxillary as aux
from sympy import re
# FIXME This is not good practice, but a temporary fix as I don't have time to understand what causes the SettingWithCopyWarning.
# Read this: https://www.dataquest.io/blog/settingwithcopywarning/
pd.set_option('mode.chained_assignment', None)
def read_data(data, options={}):
if data['kind'] == 'neware':
df = read_neware(data['path'], options=options)
cycles = process_neware_data(df=df, options=options)
elif data['kind'] == 'batsmall':
df = read_batsmall(data['path'])
cycles = process_batsmall_data(df=df, options=options)
elif data['kind'] == 'biologic':
df = read_biologic(data['path'])
cycles = process_biologic_data(df=df, options=options)
return cycles
def read_neware(path, options={}):
''' Reads electrochemistry data, currently only from the Neware battery cycler. Will convert to .csv if the filetype is .xlsx,
which is the file format the Neware provides for the backup data. In this case it matters if summary is False or not. If file
type is .csv, it will just open the datafile and it does not matter if summary is False or not.'''
from xlsx2csv import Xlsx2csv
# FIXME Do a check if a .csv-file already exists even if the .xlsx is passed
# Convert from .xlsx to .csv to make readtime faster
if path.endswith('xlsx'):
csv_details = ''.join(path[:-5]) + '_details.csv'
csv_summary = os.path.abspath(''.join(path[:-5]) + '_summary.csv')
if not os.path.isfile(csv_summary):
Xlsx2csv(path, outputencoding="utf-8").convert(os.path.abspath(csv_summary), sheetid=3)
if not os.path.isfile(csv_details):
Xlsx2csv(path, outputencoding="utf-8").convert(csv_details, sheetid=4)
if options['summary']:
df = pd.read_csv(csv_summary)
else:
df = pd.read_csv(csv_details)
elif path.endswith('csv'):
df = pd.read_csv(path)
return df
def read_batsmall(path):
''' Reads BATSMALL-data into a DataFrame.
Input:
path (required): string with path to datafile
Output:
df: pandas DataFrame containing the data as-is, but without additional NaN-columns.'''
# Determine if decimal point is . or ,
with open(path, 'r') as f:
for i, line in enumerate(f):
if i == 10:
values = line.split()
if len(values[1].split('.')) == 2:
decimal_point = '.'
elif len(values[1].split(',')) == 2:
decimal_point = ','
df = pd.read_csv(path, skiprows=2, sep='\t', decimal=decimal_point)
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
return df
def read_biologic(path):
''' Reads Bio-Logic-data into a DataFrame.
Input:
path (required): string with path to datafile
Output:
df: pandas DataFrame containing the data as-is, but without additional NaN-columns.'''
with open(path, 'rb') as f:
lines = f.readlines()
header_lines = int(lines[1].split()[-1]) - 1
df = pd.read_csv(path, sep='\t', skiprows=header_lines, encoding='cp1252')
df.dropna(inplace=True, axis=1)
return df
def process_batsmall_data(df, options=None):
''' Takes BATSMALL-data in the form of a DataFrame and cleans the data up and converts units into desired units.
Splits up into individual charge and discharge DataFrames per cycle, and outputs a list where each element is a tuple with the Chg and DChg-data. E.g. cycles[10][0] gives the charge data for the 11th cycle.
For this to work, the cycling program must be set to use the counter.
Input:
df (required): A pandas DataFrame containing BATSMALL-data, as obtained from read_batsmall().
t (optional): Unit for time data. Defaults to ms.
C (optional): Unit for specific capacity. Defaults to mAh/g.
I (optional): Unit for current. Defaults mA.
U (optional): Unit for voltage. Defaults to V.
Output:
cycles: A list with
'''
default_options = {
'splice_cycles': False,
'append': False, # Add max of ions and specific_capacity of previous run #TODO Generalise
'append_gap': 0, # Add a gap between cyclces - only used if append == True.
'molecular_weight': None,
'reverse_discharge': False,
'units': None,
}
aux.update_options(options=options, default_options=default_options)
options['kind'] = 'batsmall'
# Complete set of new units and get the units used in the dataset, and convert values in the DataFrame from old to new.
set_units(options)
options['old_units'] = get_old_units(df, options)
df = add_columns(df=df, options=options) # adds columns to the DataFrame if active material weight and/or molecular weight has been passed in options
df = unit_conversion(df=df, options=options)
if options['splice_cycles']:
df = splice_cycles(df=df, options=options)
# Replace NaN with empty string in the Comment-column and then remove all steps where the program changes - this is due to inconsistent values for current
df[["comment"]] = df[["comment"]].fillna(value={'comment': ''})
df = df[df["comment"].str.contains("program")==False]
# Creates masks for charge and discharge curves
chg_mask = df['current'] >= 0
dchg_mask = df['current'] < 0
# Initiate cycles list
cycles = []
# Loop through all the cycling steps, change the current and capacities in the
for i in range(df["count"].max()):
sub_df = df.loc[df['count'] == i+1].copy()
sub_df.loc[dchg_mask, 'current'] *= -1
sub_df.loc[dchg_mask, 'specific_capacity'] *= -1
sub_df.loc[dchg_mask, 'ions'] *= -1
chg_df = sub_df.loc[chg_mask]
dchg_df = sub_df.loc[dchg_mask]
# Continue to next iteration if the charge and discharge DataFrames are empty (i.e. no current)
if chg_df.empty and dchg_df.empty:
continue
if options['append']:
if cycles:
chg_df.loc[chg_mask, 'ions'] += cycles[-1][1]['ions'].max() + options['append_gap']
dchg_df.loc[dchg_mask, 'ions'] += chg_df['ions'].max() + options['append_gap']
if options['reverse_discharge']:
max_capacity = dchg_df['capacity'].max()
dchg_df['capacity'] = np.abs(dchg_df['capacity'] - max_capacity)
if 'specific_capacity' in df.columns:
max_capacity = dchg_df['specific_capacity'].max()
dchg_df['specific_capacity'] = np.abs(dchg_df['specific_capacity'] - max_capacity)
if 'ions' in df.columns:
max_capacity = dchg_df['ions'].max()
dchg_df['ions'] = np.abs(dchg_df['ions'] - max_capacity)
cycles.append((chg_df, dchg_df))
return cycles
def splice_cycles(df, options: dict) -> pd.DataFrame:
''' Splices two cycles together - if e.g. one charge cycle are split into several cycles due to change in parameters.
Incomplete, only accomodates BatSmall so far, and only for charge.'''
if options['kind'] == 'batsmall':
# Creates masks for charge and discharge curves
chg_mask = df['current'] >= 0
# Loop through all the cycling steps, change the current and capacities in the
for i in range(df["count"].max()):
sub_df = df.loc[df['count'] == i+1]
sub_df_chg = sub_df.loc[chg_mask]
# get indices where the program changed
chg_indices = sub_df_chg[sub_df_chg["comment"].str.contains("program")==True].index.to_list()
# Delete first item if first cycle after rest (this will just be the start of the cycling)
if i+1 == 1:
del chg_indices[0]
if chg_indices:
last_chg = chg_indices.pop()
if chg_indices:
for i in chg_indices:
add = df['specific_capacity'].iloc[i-1]
df['specific_capacity'].iloc[i:last_chg] = df['specific_capacity'].iloc[i:last_chg] + add
if options['kind'] == 'neware':
if options['summary']:
for i in range(df['cycle'].max()):
sub_df = df.loc[df['cycle'] == i+1].copy()
if sub_df['status'].loc[sub_df['status'] == 'CC Chg'].count() > 1:
indices = sub_df.index[sub_df['status'] == 'CC Chg']
add_columns = ['capacity', 'specific_capacity', 'ions', 'energy', 'cycle_time']
for column in add_columns:
if column in df.columns:
df[column].iloc[indices[-1]] = df[column].iloc[indices[-1]] + df[column].iloc[indices[0]]
df.drop(index=indices[0], inplace=True)
df.reset_index(inplace=True, drop=True)
else:
for i in range(df['cycle'].max()):
sub_df = df.loc[df['cycle'] == i+1].copy()
sub_chg_df = sub_df.loc[sub_df['status'] == 'CC Chg'].copy()
steps_indices = sub_chg_df['steps'].unique()
if len(steps_indices) > 1:
add_columns = ['capacity', 'specific_capacity', 'ions', 'energy', 'cycle_time']
for column in add_columns:
if column in df.columns:
# Extract the maximum value from the first of the two cycles by accessing the column value of the highest index of the first cycle
add = df[column].iloc[df.loc[df['steps'] == steps_indices[0]].index.max()]
df[column].loc[df['steps'] == steps_indices[1]] += add
return df
def process_neware_data(df, options={}):
""" Takes data from NEWARE in a DataFrame as read by read_neware() and converts units, adds columns and splits into cycles.
Input:
df: pandas DataFrame containing NEWARE data as read by read_neware()
units: dictionary containing the desired units. keywords: capacity, current, voltage, mass, energy, time
splice_cycles: tuple containing index of cycles that should be spliced. Specifically designed to add two charge steps during the formation cycle with two different max voltages
active_materiale_weight: weight of the active material (in mg) used in the cell.
molecular_weight: the molar mass (in g mol^-1) of the active material, to calculate the number of ions extracted. Assumes one electron per Li+/Na+-ion """
required_options = ['units', 'active_material_weight', 'molecular_weight', 'reverse_discharge', 'splice_cycles', 'increment_cycles_from', 'delete_datapoints']
default_options = {
'units': None,
'active_material_weight': None,
'molecular_weight': None,
'reverse_discharge': False,
'splice_cycles': None,
'increment_cycles_from': None,# index
'delete_datapoints': None, # list of indices
}
aux.update_options(options=options, required_options=required_options, default_options=default_options)
options['kind'] = 'neware'
if not options['summary']:
# Complete set of new units and get the units used in the dataset, and convert values in the DataFrame from old to new.
set_units(options=options) # sets options['units']
options['old_units'] = get_old_units(df=df, options=options)
df = add_columns(df=df, options=options) # adds columns to the DataFrame if active material weight and/or molecular weight has been passed in options
df = unit_conversion(df=df, options=options) # converts all units from the old units to the desired units
if options['increment_cycles_from']:
df['cycle'].iloc[options['increment_cycles_from']:] += 1
if options['delete_datapoints']:
for datapoint in options['delete_datapoints']:
df.drop(index=datapoint, inplace=True)
if options['splice_cycles']:
df = splice_cycles(df=df, options=options)
# Creates masks for charge and discharge curves
chg_mask = df['status'] == 'CC Chg'
dchg_mask = df['status'] == 'CC DChg'
# Initiate cycles list
cycles = []
# Loop through all the cycling steps, change the current and capacities in the
for i in range(df["cycle"].max()):
sub_df = df.loc[df['cycle'] == i+1].copy()
chg_df = sub_df.loc[chg_mask]
dchg_df = sub_df.loc[dchg_mask]
# Continue to next iteration if the charge and discharge DataFrames are empty (i.e. no current)
if chg_df.empty and dchg_df.empty:
continue
# Reverses the discharge curve if specified
if options['reverse_discharge']:
max_capacity = dchg_df['capacity'].max()
dchg_df['capacity'] = np.abs(dchg_df['capacity'] - max_capacity)
if 'specific_capacity' in df.columns:
max_capacity = dchg_df['specific_capacity'].max()
dchg_df['specific_capacity'] = np.abs(dchg_df['specific_capacity'] - max_capacity)
if 'ions' in df.columns:
max_capacity = dchg_df['ions'].max()
dchg_df['ions'] = np.abs(dchg_df['ions'] - max_capacity)
if not chg_df.empty:
chg_df.reset_index(inplace=True)
if not dchg_df.empty:
dchg_df.reset_index(inplace=True)
cycles.append((chg_df, dchg_df))
return cycles
elif options['summary']:
set_units(options=options)
options['old_units'] = get_old_units(df=df, options=options)
df = add_columns(df=df, options=options)
df = unit_conversion(df=df, options=options)
if options['splice_cycles']:
df = splice_cycles(df=df, options=options)
chg_df = df.loc[df['status'] == 'CC Chg']
chg_df.reset_index(inplace=True)
dchg_df = df.loc[df['status'] == 'CC DChg']
dchg_df.reset_index(inplace=True)
# Construct new DataFrame
new_df = pd.DataFrame(chg_df["cycle"])
new_df.insert(1,'charge_capacity',chg_df['capacity'])
new_df.insert(1,'charge_specific_capacity',chg_df['specific_capacity'])
new_df.insert(1,'discharge_capacity',dchg_df['capacity'])
new_df.insert(1,'discharge_specific_capacity',dchg_df['specific_capacity'])
new_df.insert(1,'charge_energy',chg_df['energy'])
new_df.insert(1,'charge_specific_energy',chg_df['specific_energy'])
new_df.insert(1,'discharge_energy',dchg_df['energy'])
new_df.insert(1,'discharge_specific_energy',dchg_df['specific_energy'])
new_df = calculate_efficiency(df=new_df, options=options)
return new_df
def process_biologic_data(df, options=None):
required_options = ['units', 'active_material_weight', 'molecular_weight', 'reverse_discharge', 'splice_cycles']
default_options = {
'units': None,
'active_material_weight': None,
'molecular_weight': None,
'reverse_discharge': False,
'splice_cycles': None}
# Check if the DataFrame contains GC or CV data.
# FIXME This might not be a very rigorous method of checking. E.g. Rest has mode == 3, so if loading a short GC with many Rest-datapoints, the mean will be 2 and it will be treated as CV. For now manual override is sufficient
if not 'mode' in options.keys():
options['mode'] = 'GC' if int(df['mode'].mean()) == 1 else 'CV'
aux.update_options(options=options, required_options=required_options, default_options=default_options)
options['kind'] = 'biologic'
# Pick out necessary columns
headers = [
'Ns changes', 'Ns', 'time/s', 'Ewe/V', 'Energy charge/W.h', 'Energy discharge/W.h', '<I>/mA', 'Capacity/mA.h', 'cycle number' ] if options['mode'] == 'GC' else [
'ox/red', 'time/s', 'control/V', 'Ewe/V', '<I>/mA', 'cycle number', '(Q-Qo)/C', 'P/W'
]
df = df[headers].copy()
# Complete set of new units and get the units used in the dataset, and convert values in the DataFrame from old to new.
set_units(options)
options['old_units'] = get_old_units(df=df, options=options)
df = add_columns(df=df, options=options)
df = unit_conversion(df=df, options=options)
# Creates masks for charge and discharge curves
if options['mode'] == 'GC':
chg_mask = (df['status'] == 1) & (df['status_change'] != 1)
dchg_mask = (df['status'] == 2) & (df['status_change'] != 1)
elif options['mode'] == 'CV':
chg_mask = (df['status'] == 1) # oxidation
dchg_mask = (df['status'] == 0) # reduction
# Initiate cycles list
cycles = []
if df['cycle'].max() == 0:
no_cycles = 1
else:
no_cycles = int(df['cycle'].max())
# Loop through all the cycling steps, change the current and capacities in the
for i in range(no_cycles):
sub_df = df.loc[df['cycle'] == i].copy()
#sub_df.loc[dchg_mask, 'current'] *= -1
#sub_df.loc[dchg_mask, 'capacity'] *= -1
chg_df = sub_df.loc[chg_mask]
dchg_df = sub_df.loc[dchg_mask]
# Continue to next iteration if the charge and discharge DataFrames are empty (i.e. no current)
if chg_df.empty and dchg_df.empty:
continue
if options['mode'] == 'GC' and options['reverse_discharge']:
max_capacity = dchg_df['capacity'].max()
dchg_df['capacity'] = np.abs(dchg_df['capacity'] - max_capacity)
if 'specific_capacity' in df.columns:
max_capacity = dchg_df['specific_capacity'].max()
dchg_df['specific_capacity'] = np.abs(dchg_df['specific_capacity'] - max_capacity)
if 'ions' in df.columns:
max_capacity = dchg_df['ions'].max()
dchg_df['ions'] = np.abs(dchg_df['ions'] - max_capacity)
if options['mode'] == 'CV':
chg_df = chg_df.sort_values(by='voltage').reset_index(drop=True)
dchg_df = dchg_df.sort_values(by='voltage', ascending=False).reset_index(drop=True)
cycles.append((chg_df, dchg_df))
return cycles
def add_columns(df, options):
from . import unit_tables
if options['kind'] == 'neware':
if options['summary']:
df[f'Energy({options["old_units"]["energy"]})'] = np.abs(df[f'Net discharge energy({options["old_units"]["energy"]})'])
if options['active_material_weight']:
df[f"SpecificCapacity({options['old_units']['capacity']}/mg)"] = df["Capacity({})".format(options['old_units']['capacity'])] / (options['active_material_weight'])
df[f"SpecificEnergy({options['old_units']['energy']}/mg)"] = df["Energy({})".format(options['old_units']['energy'])] / (options['active_material_weight'])
if options['molecular_weight']:
faradays_constant = 96485.3365 # [F] = C mol^-1 = As mol^-1
seconds_per_hour = 3600 # s h^-1
f = faradays_constant / seconds_per_hour * 1000.0 # [f] = mAh mol^-1
df["IonsExtracted"] = (df["SpecificCapacity({}/mg)".format(options['old_units']['capacity'])]*options['molecular_weight'])*1000/f
if options['kind'] == 'biologic':
if options['active_material_weight']:
capacity = options['old_units']['capacity'].split('h')[0] + '.h'
df["SpecificCapacity({}/mg)".format(options['old_units']["capacity"])] = df["Capacity/{}".format(capacity)] / (options['active_material_weight'])
if options['molecular_weight']:
faradays_constant = 96485.3365 # [F] = C mol^-1 = As mol^-1
seconds_per_hour = 3600 # s h^-1
f = faradays_constant / seconds_per_hour * 1000.0 # [f] = mAh mol^-1
df["IonsExtracted"] = (df["SpecificCapacity({}/mg)".format(options['old_units']['capacity'])]*options['molecular_weight'])*1000/f
if options['kind'] == 'batsmall':
if options['active_material_weight']:
active_material_weight = options['active_material_weight'] * unit_tables.mass()['mg'].loc[options['units']['mass']]
capacity = options['old_units']['capacity']
df[f'Capacity [{options["old_units"]["capacity"]}]'] = df[f'C [{options["old_units"]["capacity"]}/{options["old_units"]["mass"]}]'] * active_material_weight
if options['molecular_weight']:
faradays_constant = 96485.3365 # [F] = C mol^-1 = As mol^-1
seconds_per_hour = 3600 # s h^-1
f = faradays_constant / seconds_per_hour * 1000.0 # [f] = mAh mol^-1
molecular_weight = options['molecular_weight'] * unit_tables.mass()['g'].loc[options['old_units']['mass']]
df["IonsExtracted"] = (df[f'C [{options["old_units"]["capacity"]}/{options["old_units"]["mass"]}]'] * molecular_weight)/f
#df['reaction_coordinate'] = (df[f'TT [{options["old_units"]["time"]}]'] * unit_tables.time()[options['old_units']["time"]].loc["h"]) / np.abs(df[f'I [{options["old_units"]["current"]}]'] * unit_tables.current()[options['old_units']["current"]].loc['A'])
return df
def calculate_efficiency(df: pd.DataFrame, options: dict) -> pd.DataFrame:
default_options = {
'reference_index': 0
}
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
df['charge_capacity_fade'] = (df['charge_capacity'] / df['charge_capacity'].iloc[options['reference_index']])*100
df['discharge_capacity_fade'] = (df['discharge_capacity'] / df['discharge_capacity'].iloc[options['reference_index']])*100
df['coulombic_efficiency'] = (df['discharge_capacity'] / df['charge_capacity'])*100
df['energy_efficiency'] = (df['discharge_energy'] / df['charge_energy'])*100
return df
def unit_conversion(df, options):
from . import unit_tables
if options['kind'] == 'batsmall':
df["TT [{}]".format(options['old_units']["time"])] = df["TT [{}]".format(options['old_units']["time"])] * unit_tables.time()[options['old_units']["time"]].loc[options['units']['time']]
df["U [{}]".format(options['old_units']["voltage"])] = df["U [{}]".format(options['old_units']["voltage"])] * unit_tables.voltage()[options['old_units']["voltage"]].loc[options['units']['voltage']]
df["I [{}]".format(options['old_units']["current"])] = df["I [{}]".format(options['old_units']["current"])] * unit_tables.current()[options['old_units']["current"]].loc[options['units']['current']]
df["C [{}/{}]".format(options['old_units']["capacity"], options['old_units']["mass"])] = df["C [{}/{}]".format(options['old_units']["capacity"], options['old_units']["mass"])] * (unit_tables.capacity()[options['old_units']["capacity"]].loc[options['units']["capacity"]] / unit_tables.mass()[options['old_units']["mass"]].loc[options['units']["mass"]])
columns = ['time', 'voltage', 'current', 'count', 'specific_capacity', 'comment']
# Add column labels for specific capacity and ions if they exist
if f'Capacity [{options["old_units"]["capacity"]}]' in df.columns:
df[f'Capacity [{options["old_units"]["capacity"]}]'] = df[f'Capacity [{options["old_units"]["capacity"]}]'] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']]
columns.append('capacity')
if 'IonsExtracted' in df.columns:
columns.append('ions')
#columns.append('reaction_coordinate')
df.columns = columns
if options['kind'] == 'neware':
record_number = 'Data serial number' if 'Data serial number' in df.columns else 'Record number'
relative_time = 'Relative Time(h:min:s.ms)' if 'Relative Time(h:min:s.ms)' in df.columns else 'Relative Time'
continuous_time = 'Continuous Time(h:min:s.ms)' if 'Continuous Time(h:min:s.ms)' in df.columns else 'Continuous Time'
real_time = 'Real Time(h:min:s.ms)' if 'Real Time(h:min:s.ms)' in df.columns else 'Real Time'
if options['summary']:
df[f'Energy({options["old_units"]["energy"]})'] = df[f'Energy({options["old_units"]["energy"]})'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']]
df[f'Starting current({options["old_units"]["current"]})'] = df[f'Starting current({options["old_units"]["current"]})'] * unit_tables.current()[options['old_units']['current']].loc[options['units']['current']]
df[f'Start Volt({options["old_units"]["voltage"]})'] = df[f'Start Volt({options["old_units"]["voltage"]})'] * unit_tables.voltage()[options['old_units']['voltage']].loc[options['units']['voltage']]
df[f'Capacity({options["old_units"]["capacity"]})'] = df[f'Capacity({options["old_units"]["capacity"]})'] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']]
df[f'Energy({options["old_units"]["energy"]})'] = df[f'Energy({options["old_units"]["energy"]})'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']]
df[f'CycleTime({options["units"]["time"]})'] = df.apply(lambda row : convert_time_string(row[relative_time], unit=options['units']['time']), axis=1)
df[f'RunTime({options["units"]["time"]})'] = df.apply(lambda row : convert_datetime_string(row[real_time], reference=df[real_time].iloc[0], ref_time=df[f'CycleTime({options["units"]["time"]})'].iloc[0],unit=options['units']['time']), axis=1)
droplist = [
'Chnl',
'Original step',
f'End Volt({options["old_units"]["voltage"]})',
f'Termination current({options["old_units"]["current"]})',
relative_time,
real_time,
continuous_time,
f'Net discharge capacity({options["old_units"]["capacity"]})',
f'Chg Cap({options["old_units"]["capacity"]})',
f'DChg Cap({options["old_units"]["capacity"]})',
f'Net discharge energy({options["old_units"]["energy"]})',
f'Chg Eng({options["old_units"]["energy"]})',
f'DChg Eng({options["old_units"]["energy"]})'
]
# Drop all undesireable columns
for drop in droplist:
if drop in df.columns:
df.drop(drop, axis=1, inplace=True)
columns = ['cycle', 'steps', 'status', 'voltage', 'current', 'capacity', 'energy']
# Add column labels for specific capacity and ions if they exist
if 'SpecificCapacity({}/mg)'.format(options['old_units']['capacity']) in df.columns:
df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] = df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
columns.append('specific_capacity')
if f'SpecificEnergy({options["old_units"]["energy"]}/mg)' in df.columns:
df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] = df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
columns.append('specific_energy')
if 'IonsExtracted' in df.columns:
columns.append('ions')
# Append energy column label here as it was the last column to be generated
columns.append('cycle_time')
columns.append('runtime')
# Apply new column labels
df.columns = columns
else:
df['Current({})'.format(options['old_units']['current'])] = df['Current({})'.format(options['old_units']['current'])] * unit_tables.current()[options['old_units']['current']].loc[options['units']['current']]
df['Voltage({})'.format(options['old_units']['voltage'])] = df['Voltage({})'.format(options['old_units']['voltage'])] * unit_tables.voltage()[options['old_units']['voltage']].loc[options['units']['voltage']]
df['Capacity({})'.format(options['old_units']['capacity'])] = df['Capacity({})'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']]
df['Energy({})'.format(options['old_units']['energy'])] = df['Energy({})'.format(options['old_units']['energy'])] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']]
df['CycleTime({})'.format(options['units']['time'])] = df.apply(lambda row : convert_time_string(row[relative_time], unit=options['units']['time']), axis=1)
df['RunTime({})'.format(options['units']['time'])] = df.apply(lambda row : convert_datetime_string(row[real_time], reference=df[real_time].iloc[0], ref_time=df[f'CycleTime({options["units"]["time"]})'].iloc[0], unit=options['units']['time']), axis=1)
columns = ['status', 'jump', 'cycle', 'steps', 'current', 'voltage', 'capacity', 'energy']
if 'SpecificCapacity({}/mg)'.format(options['old_units']['capacity']) in df.columns:
df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] = df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
columns.append('specific_capacity')
if f'SpecificEnergy({options["old_units"]["energy"]}/mg)' in df.columns:
df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] = df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
columns.append('specific_energy')
if 'IonsExtracted' in df.columns:
columns.append('ions')
columns.append('cycle_time')
columns.append('time')
droplist = [record_number, relative_time, real_time]
for drop in droplist:
if drop in df.columns:
df.drop(drop, axis=1, inplace=True)
df.columns = columns
if options['kind'] == 'biologic':
for column in df.columns:
if 'time' in column:
df['time/{}'.format(options['old_units']['time'])] = df["time/{}".format(options['old_units']["time"])] * unit_tables.time()[options['old_units']["time"]].loc[options['units']['time']]
if 'Ewe' in column:
df["Ewe/{}".format(options['old_units']["voltage"])] = df["Ewe/{}".format(options['old_units']["voltage"])] * unit_tables.voltage()[options['old_units']["voltage"]].loc[options['units']['voltage']]
if '<I>' in column:
df["<I>/{}".format(options['old_units']["current"])] = df["<I>/{}".format(options['old_units']["current"])] * unit_tables.current()[options['old_units']["current"]].loc[options['units']['current']]
if 'Capacity' in column:
capacity = options['old_units']['capacity'].split('h')[0] + '.h'
df["Capacity/{}".format(capacity)] = df["Capacity/{}".format(capacity)] * (unit_tables.capacity()[options['old_units']["capacity"]].loc[options['units']["capacity"]])
columns = [
'status_change', 'status', 'time', 'voltage', 'energy_charge', 'energy_discharge', 'current', 'capacity', 'cycle'] if options['mode'] == 'GC' else [ # GC headers
'status', 'time', 'control_voltage', 'voltage', 'current', 'cycle', 'charge', 'power' # CV headers
]
if options['mode'] == 'GC':
if 'SpecificCapacity({}/mg)'.format(options['old_units']['capacity']) in df.columns:
df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] = df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
columns.append('specific_capacity')
if 'IonsExtracted' in df.columns:
columns.append('ions')
df.columns = columns
return df
def set_units(options: dict) -> None:
# Complete the list of units - if not all are passed, then default value will be used
required_units = ['time', 'current', 'voltage', 'capacity', 'mass', 'energy', 'specific_capacity']
default_units = {
'time': 'h',
'current': 'mA',
'voltage': 'V',
'capacity': 'mAh',
'mass': 'g',
'energy': 'mWh',
'specific_capacity': None}
if not options['units']:
options['units'] = default_units
aux.update_options(options=options['units'], required_options=required_units, default_options=default_units)
options['units']['specific_capacity'] = r'{} {}'.format(options['units']['capacity'], options['units']['mass']) + '$^{-1}$'
def get_old_units(df: pd.DataFrame, options: dict) -> dict:
''' Reads a DataFrame with cycling data and determines which units have been used and returns these in a dictionary'''
if options['kind'] == 'batsmall':
old_units = {}
for column in df.columns:
if 'TT [' in column:
old_units['time'] = column.split()[-1].strip('[]')
elif 'U [' in column:
old_units['voltage'] = column.split()[-1].strip('[]')
elif 'I [' in column:
old_units['current'] = column.split()[-1].strip('[]')
elif 'C [' in column:
old_units['capacity'], old_units['mass'] = column.split()[-1].strip('[]').split('/')
# time = df.columns[0].split()[-1].strip('[]')
# voltage = df.columns[1].split()[-1].strip('[]')
# current = df.columns[2].split()[-1].strip('[]')
# capacity, mass = df.columns[4].split()[-1].strip('[]').split('/')
# old_units = {'time': time, 'current': current, 'voltage': voltage, 'capacity': capacity, 'mass': mass}
if options['kind']=='neware':
for column in df.columns:
if 'Voltage' in column or 'Start Volt' in column:
voltage = column.split('(')[-1].strip(')')
elif 'Current' in column or 'Starting current' in column:
current = column.split('(')[-1].strip(')')
elif 'Capacity' in column:
capacity = column.split('(')[-1].strip(')')
elif 'Energy' in column or 'Eng' in column:
energy = column.split('(')[-1].strip(')')
old_units = {'voltage': voltage, 'current': current, 'capacity': capacity, 'energy': energy}
if options['kind'] == 'biologic':
old_units = {}
for column in df.columns:
if 'time' in column:
old_units['time'] = column.split('/')[-1]
elif 'Ewe' in column:
old_units['voltage'] = column.split('/')[-1]
elif 'Capacity' in column:
old_units['capacity'] = column.split('/')[-1].replace('.', '')
elif 'Energy' in column:
old_units['energy'] = column.split('/')[-1].replace('.', '')
elif '<I>' in column:
old_units['current'] = column.split('/')[-1]
return old_units
def convert_time_string(time_string, unit='ms'):
''' Convert time string from Neware-data with the format hh:mm:ss.xx to any given unit'''
h, m, s = time_string.split(':')
ms = float(s)*1000 + int(m)*1000*60 + int(h)*1000*60*60
factors = {'ms': 1, 's': 1/1000, 'min': 1/(1000*60), 'h': 1/(1000*60*60)}
t = ms*factors[unit]
return t
def convert_datetime_string(datetime_string, reference, ref_time, unit='s'):
''' Convert time string from Neware-data with the format yyy-mm-dd hh:mm:ss to any given unit'''
from datetime import datetime
# Parse the
current_date, current_time = datetime_string.split()
current_year, current_month, current_day = current_date.split('-')
current_hour, current_minute, current_second = current_time.split(':')
current_date = datetime(int(current_year), int(current_month), int(current_day), int(current_hour), int(current_minute), int(current_second))
reference_date, reference_time = reference.split()
reference_year, reference_month, reference_day = reference_date.split('-')
reference_hour, reference_minute, reference_second = reference_time.split(':')
reference_date = datetime(int(reference_year), int(reference_month), int(reference_day), int(reference_hour), int(reference_minute), int(reference_second))
days = current_date - reference_date
s = days.days*24*60*60 + days.seconds
factors = {'ms': 1000, 's': 1, 'min': 1/(60), 'h': 1/(60*60)}
time = s * factors[unit] + ref_time
return time

View file

@ -0,0 +1,747 @@
from pickle import MARK
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import pandas as pd
import numpy as np
import math
import os
import shutil
from PIL import Image
import ipywidgets as widgets
from IPython.display import display
import nafuma.electrochemistry as ec
import nafuma.plotting as btp
import nafuma.auxillary as aux
def plot_gc(data, options=None):
# Update options
default_options = {
'force_reload': False,
'x_vals': 'capacity', 'y_vals': 'voltage',
'which_cycles': 'all',
'limit': None, # Limit line to be drawn
'exclude_cycles': [],
'show_plot': True,
'summary': False,
'charge': True, 'discharge': True,
'colours': None,
'markers': None,
'differentiate_charge_discharge': True,
'gradient': False,
'interactive': False,
'interactive_session_active': False,
'rc_params': {},
'format_params': {},
'save_gif': False,
'save_path': 'animation.gif',
'fps': 1,
'fig': None, 'ax': None,
'edgecolor': plt.rcParams['lines.markeredgecolor'],
'plot_every': 1,
}
options = aux.update_options(options=options, default_options=default_options)
# Read data if not already loaded
if not 'cycles' in data.keys() or options['force_reload']:
data['cycles'] = ec.io.read_data(data=data, options=options)
# Update list of cycles to correct indices
update_cycles_list(data=data, options=options)
if options['interactive']:
options['interactive'], options['interactive_session_active'] = False, True
plot_gc_interactive(data=data, options=options)
return
colours = generate_colours(options=options)
markers = generate_markers(options=options)
if not options['summary']:
if options['show_plot']:
# Prepare plot
if not options['fig'] and not options['ax']:
fig, ax = btp.prepare_plot(options=options)
else:
fig, ax = options['fig'], options['ax']
for i, cycle in enumerate(options['which_cycles']):
if options['charge']:
data['cycles'][cycle][0].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][0])
if options['discharge']:
data['cycles'][cycle][1].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][1])
if options['interactive_session_active']:
update_labels(options, force=True)
else:
update_labels(options)
if options['save_gif'] and not options['interactive_session_active']:
if not os.path.isdir('tmp'):
os.makedirs('tmp')
# Scale image to make GIF smaller
options['format_params']['width'] = 7.5
options['format_params']['height'] = 3
options['format_params']['dpi'] = 200
for i, cycle in enumerate(data['cycles']):
if i in options['which_cycles']:
giffig, gifax = btp.prepare_plot(options=options)
if options['charge']:
cycle[0].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][0])
if options['discharge']:
cycle[1].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][1])
gifax.text(x=gifax.get_xlim()[1]*0.8, y=3, s=f'{i+1}')
update_labels(options)
giffig, gifax = btp.adjust_plot(fig=giffig, ax=gifax, options=options)
plt.savefig(os.path.join('tmp', str(i+1).zfill(4)+'.png'))
plt.close()
img_paths = [os.path.join('tmp', path) for path in os.listdir('tmp') if path.endswith('png')]
frames = []
for path in img_paths:
frame = Image.open(path)
frames.append(frame)
frames[0].save(options['save_path'], format='GIF', append_images=frames[1:], save_all=True, duration=(1/options['fps'])*1000, loop=0)
shutil.rmtree('tmp')
elif options['summary'] and options['show_plot']:
# Prepare plot
if not options['fig'] and not options['ax']:
fig, ax = btp.prepare_plot(options=options)
else:
fig, ax = options['fig'], options['ax']
mask = []
for i in range(data['cycles'].shape[0]):
if i+1 in options['which_cycles']:
mask.append(True)
else:
mask.append(False)
# Drop the last row if it is midway through a charge in order to avoid mismatch of length of mask and dataset.
if len(mask) > data['cycles'].shape[0]:
del mask[-1]
data['cycles'].drop(data['cycles'].tail(1).index, inplace=True)
# FIXME To begin, the default is that y-values correspond to x-values. This should probably be implemented in more logical and consistent manner in the future.
if options['x_vals'] in ['coulombic_efficiency', 'energy_efficiency']:
data['cycles'].loc[mask].plot(x='cycle', y=options['x_vals'], ax=ax, color=colours[0][1], kind='scatter', s=plt.rcParams['lines.markersize']*10, marker=markers[0], edgecolor=plt.rcParams['lines.markeredgecolor'])
if options['limit']:
ax.axhline(y=options['limit'], ls='--', c='black')
else:
if options['charge']:
yval = 'charge_' + options['x_vals']
data['cycles'].loc[mask].plot(x='cycle', y=yval, ax=ax, color=colours[0][0], kind='scatter', s=plt.rcParams['lines.markersize']*10, marker=markers[0], edgecolor=plt.rcParams['lines.markeredgecolor'])
if options['discharge']:
yval = 'discharge_' + options['x_vals']
data['cycles'].loc[mask].plot(x='cycle', y=yval, ax=ax, color=colours[0][1], kind='scatter', s=plt.rcParams['lines.markersize']*10, marker=markers[1], edgecolor=plt.rcParams['lines.markeredgecolor'])
if options['limit']:
ax.axhline(y=options['limit'], ls='--', c='black')
if options['interactive_session_active']:
update_labels(options, force=True)
else:
update_labels(options)
if options['show_plot']:
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
return data['cycles'], fig, ax
else:
return data['cycles'], None, None
def plot_gc_interactive(data, options):
w = widgets.interactive(btp.ipywidgets_update, func=widgets.fixed(plot_gc), data=widgets.fixed(data), options=widgets.fixed(options),
charge=widgets.ToggleButton(value=True),
discharge=widgets.ToggleButton(value=True),
x_vals=widgets.Dropdown(options=['specific_capacity', 'capacity', 'ions', 'voltage', 'time', 'energy'], value='specific_capacity', description='X-values')
)
options['widget'] = w
display(w)
def plot_cv(data, options):
# Update options
default_options = {
'force_reload': False,
'x_vals': 'voltage', 'y_vals': 'current',
'which_cycles': 'all',
'limit': None, # Limit line to be drawn
'exclude_cycles': [],
'show_plot': True,
'charge': True, 'discharge': True,
'colours': None,
'differentiate_charge_discharge': True,
'gradient': False,
'interactive': False,
'interactive_session_active': False,
'rc_params': {},
'format_params': {},
'save_gif': False,
'save_path': 'animation.gif',
'fps': 1,
'plot_every': 1,
'fig': None,
'ax': None
}
options = aux.update_options(options=options, default_options=default_options)
# Read data if not already loaded
if not 'cycles' in data.keys() or options['force_reload']:
data['cycles'] = ec.io.read_data(data=data, options=options)
# Update list of cycles to correct indices
update_cycles_list(data=data, options=options)
colours = generate_colours(options=options)
if options['show_plot']:
# Prepare plot
if not options['fig'] and not options['ax']:
fig, ax = btp.prepare_plot(options=options)
else:
fig, ax = options['fig'], options['ax']
for i, cycle in enumerate(options['which_cycles']):
if options['charge']:
data['cycles'][cycle][0].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][0])
if options['discharge']:
data['cycles'][cycle][1].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][1])
# for i, cycle in enumerate(data['cycles']):
# if i in options['which_cycles']:
# if options['charge']:
# cycle[0].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][0])
# if options['discharge']:
# cycle[1].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][1])
update_labels(options)
if options['save_gif'] and not options['interactive_session_active']:
if not os.path.isdir('tmp'):
os.makedirs('tmp')
# Scale image to make GIF smaller
options['format_params']['width'] = 7.5
options['format_params']['height'] = 3
options['format_params']['dpi'] = 200
for i, cycle in enumerate(data['cycles']):
if i in options['which_cycles']:
giffig, gifax = btp.prepare_plot(options=options)
if options['charge']:
cycle[0].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][0])
if options['discharge']:
cycle[1].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][1])
gifax.text(x=gifax.get_xlim()[1]*0.8, y=3, s=f'{i+1}')
update_labels(options)
giffig, gifax = btp.adjust_plot(fig=giffig, ax=gifax, options=options)
plt.savefig(os.path.join('tmp', str(i+1).zfill(4)+'.png'))
plt.close()
img_paths = [os.path.join('tmp', path) for path in os.listdir('tmp') if path.endswith('png')]
frames = []
for path in img_paths:
frame = Image.open(path)
frames.append(frame)
frames[0].save(options['save_path'], format='GIF', append_images=frames[1:], save_all=True, duration=(1/options['fps'])*1000, loop=0)
shutil.rmtree('tmp')
if options['show_plot']:
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
return data['cycles'], fig, ax
else:
return data['cycles'], None, None
def update_labels(options, force=False):
if 'xlabel' not in options.keys() or force:
options['xlabel'] = options['x_vals'].capitalize().replace('_', ' ')
if 'ylabel' not in options.keys() or force:
options['ylabel'] = options['y_vals'].capitalize().replace('_', ' ')
if 'xunit' not in options.keys() or force:
if options['x_vals'] == 'capacity':
options['xunit'] = options['units']['capacity']
elif options['x_vals'] == 'specific_capacity':
options['xunit'] = f"{options['units']['capacity']} {options['units']['mass']}$^{{-1}}$"
elif options['x_vals'] == 'time':
options['xunit'] = options['units']['time']
elif options['x_vals'] == 'ions':
options['xunit'] = None
if 'yunit' not in options.keys() or force:
if options['y_vals'] == 'voltage':
options['yunit'] = options['units']['voltage']
def update_cycles_list(data, options: dict) -> None:
if options['which_cycles'] == 'all':
options['which_cycles'] = [i for i in range(len(data['cycles']))]
elif isinstance(options['which_cycles'], list):
cycles =[]
for cycle in options['which_cycles']:
if isinstance(cycle, int):
cycles.append(cycle-1)
elif isinstance(cycle, tuple):
interval = [i-1 for i in range(cycle[0], cycle[1]+1)]
cycles.extend(interval)
options['which_cycles'] = cycles
# Tuple is used to define an interval - as elements tuples can't be assigned, I convert it to a list here.
elif isinstance(options['which_cycles'], tuple):
which_cycles = list(options['which_cycles'])
if which_cycles[0] <= 0:
which_cycles[0] = 1
elif which_cycles[1] < 0:
which_cycles[1] = len(options['which_cycles'])
options['which_cycles'] = [i-1 for i in range(which_cycles[0], which_cycles[1]+1)]
for i, cycle in enumerate(options['which_cycles']):
if cycle in options['exclude_cycles']:
del options['which_cycles'][i]
options['which_cycles'] = options['which_cycles'][::options['plot_every']]
def prettify_gc_plot(fig, ax, options=None):
##################################################################
######################### UPDATE OPTIONS #########################
##################################################################
# Define the required options
required_options = [
'columns',
'xticks', 'yticks',
'show_major_ticks', 'show_minor_ticks',
'xlim', 'ylim',
'hide_x_axis', 'hide_y_axis',
'positions',
'x_vals', 'y_vals',
'xlabel', 'ylabel',
'units', 'sizes',
'title'
]
# Define the default options
default_options = {
'columns': 1,
'xticks': None, 'yticks': None,
'show_major_ticks': [True, True, True, True], 'show_minor_ticks': [True, True, True, True],
'xlim': None,'ylim': None,
'hide_x_axis': False, 'hide_y_axis': False,
'positions': {'xaxis': 'bottom', 'yaxis': 'left'},
'x_vals': 'specific_capacity', 'y_vals': 'voltage',
'xlabel': None, 'ylabel': None,
'units': {'capacity': 'mAh', 'specific_capacity': r'mAh g$^{-1}$', 'time': 's', 'current': 'mA', 'energy': 'mWh', 'mass': 'g', 'voltage': 'V'},
'sizes': None,
'title': None
}
aux.update_options(options, required_options, default_options)
##################################################################
########################## DEFINE SIZES ##########################
##################################################################
# Define the required sizes
required_sizes = [
'labels',
'legend',
'title',
'line', 'axes',
'tick_labels',
'major_ticks', 'minor_ticks']
# Define default sizes
default_sizes = {
'labels': 30*options['columns'],
'legend': 30*options['columns'],
'title': 30*options['columns'],
'line': 3*options['columns'],
'axes': 3*options['columns'],
'tick_labels': 30*options['columns'],
'major_ticks': 20*options['columns'],
'minor_ticks': 10*options['columns']
}
# Initialise dictionary if it doesn't exist
if not options['sizes']:
options['sizes'] = {}
# Update dictionary with default values where none is supplied
for size in required_sizes:
if size not in options['sizes']:
options['sizes'][size] = default_sizes[size]
##################################################################
########################## AXIS LABELS ###########################
##################################################################
if not options['xlabel']:
options['xlabel'] = prettify_labels(options['x_vals']) + ' [{}]'.format(options['units'][options['x_vals']])
else:
options['xlabel'] = options['xlabel'] + ' [{}]'.format(options['units'][options['x_vals']])
if not options['ylabel']:
options['ylabel'] = prettify_labels(options['y_vals']) + ' [{}]'.format(options['units'][options['y_vals']])
else:
options['ylabel'] = options['ylabel'] + ' [{}]'.format(options['units'][options['y_vals']])
ax.set_xlabel(options['xlabel'], size=options['sizes']['labels'])
ax.set_ylabel(options['ylabel'], size=options['sizes']['labels'])
##################################################################
###################### TICK MARKS & LABELS #######################
##################################################################
ax.tick_params(direction='in', which='major', bottom=options['show_major_ticks'][0], left=options['show_major_ticks'][1], top=options['show_major_ticks'][2], right=options['show_major_ticks'][0], length=options['sizes']['major_ticks'], width=options['sizes']['axes'])
ax.tick_params(direction='in', which='minor', bottom=options['show_minor_ticks'][0], left=options['show_minor_ticks'][1], top=options['show_minor_ticks'][2], right=options['show_minor_ticks'][0], length=options['sizes']['minor_ticks'], width=options['sizes']['axes'])
# DEFINE AND SET TICK DISTANCES
from . import unit_tables
# Define default ticks and scale to desired units
default_ticks = {
'specific_capacity': [100 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']] / unit_tables.mass()['g'].loc[options['units']['mass']]), 50 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']] / unit_tables.mass()['g'].loc[options['units']['mass']])],
'capacity': [0.1 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']]), 0.05 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']])],
'voltage': [0.5 * (unit_tables.voltage()['V'].loc[options['units']['voltage']]), 0.25 * (unit_tables.voltage()['V'].loc[options['units']['voltage']])],
'time': [10 * (unit_tables.time()['h'].loc[options['units']['time']]), 5 * (unit_tables.time()['h'].loc[options['units']['time']])]
}
if options['positions']['yaxis'] == 'right':
ax.yaxis.set_label_position("right")
ax.yaxis.tick_right()
# Set default tick distances for x-axis if not specified
if not options['xticks']:
major_xtick = default_ticks[options['x_vals']][0]
minor_xtick = default_ticks[options['x_vals']][1]
# Otherwise apply user input
else:
major_xtick = options['xticks'][0]
minor_xtick = options['xticks'][1]
# Set default tick distances for x-axis if not specified
if not options['yticks']:
major_ytick = default_ticks[options['y_vals']][0]
minor_ytick = default_ticks[options['y_vals']][1]
# Otherwise apply user input
else:
major_ytick = options['yticks'][0]
minor_ytick = options['yticks'][1]
# Apply values
ax.xaxis.set_major_locator(MultipleLocator(major_xtick))
ax.xaxis.set_minor_locator(MultipleLocator(minor_xtick))
ax.yaxis.set_major_locator(MultipleLocator(major_ytick))
ax.yaxis.set_minor_locator(MultipleLocator(minor_ytick))
# SET FONTSIZE OF TICK LABELS
plt.xticks(fontsize=options['sizes']['tick_labels'])
plt.yticks(fontsize=options['sizes']['tick_labels'])
##################################################################
########################## AXES LIMITS ###########################
##################################################################
if options['xlim']:
plt.xlim(options['xlim'])
if options['ylim']:
plt.ylim(options['ylim'])
##################################################################
############################# TITLE ##############################
##################################################################
if options['title']:
ax.set_title(options['title'], size=options['sizes']['title'])
##################################################################
############################# LEGEND #############################
##################################################################
if ax.get_legend():
ax.get_legend().remove()
return fig, ax
def prettify_labels(label):
labels_dict = {
'capacity': 'Capacity',
'specific_capacity': 'Specific capacity',
'voltage': 'Voltage',
'current': 'Current',
'energy': 'Energy',
'time': 'Time'
}
return labels_dict[label]
def generate_colours(options):
default_options = {
'gradient_colours': None,
}
aux.update_options(options=options, default_options=default_options)
# Assign colours from the options dictionary if it is defined, otherwise use standard colours.
if options['colours']:
charge_colour = options['colours'][0]
discharge_colour = options['colours'][1]
if isinstance(charge_colour, tuple):
charge_colour = list(charge_colour)
if isinstance(discharge_colour, tuple):
discharge_colour = list(discharge_colour)
else:
charge_colour = [(40/255, 70/255, 75/255)] # Dark Slate Gray #28464B, coolors.co
discharge_colour = [(239/255, 160/255, 11/255)] # Marigold #EFA00B, coolors.co
if not options['differentiate_charge_discharge']:
discharge_colour = charge_colour
# If gradient is enabled, find start and end points for each colour
if options['gradient']:
if not options['gradient_colours']:
options['gradient_colours'] = [[None, None], [None, None]]
add_charge = min([(1-x)*0.75 for x in charge_colour])
add_discharge = min([(1-x)*0.75 for x in discharge_colour])
options['gradient_colours'][0][0] = charge_colour
options['gradient_colours'][0][1] = [x+add_charge for x in charge_colour]
options['gradient_colours'][1][0] = discharge_colour
options['gradient_colours'][1][1] = [x+add_discharge for x in discharge_colour]
# Generate lists of colours
colours = []
if len(charge_colour) != len(options['which_cycles']):
if options['gradient']:
options['number_of_colours'] = len(options['which_cycles'])
charge_colours = btp.mix_colours(colour1=options['gradient_colours'][0][0], colour2=options['gradient_colours'][0][1], options=options)
discharge_colours = btp.mix_colours(colour1=options['gradient_colours'][1][0], colour2=options['gradient_colours'][1][1], options=options)
for chg, dchg in zip(charge_colours, discharge_colours):
colours.append([chg, dchg])
else:
for i in options['which_cycles']:
colours.append([charge_colour, discharge_colour])
else:
for chg, dchg in zip(charge_colour, discharge_colour):
colours.append([chg, dchg])
return colours
def generate_markers(options):
if not options['markers']:
markers = ['o', 'v']
else:
markers = [options['markers'][0], options['markers'][1]]
return markers
def get_tickmarks(df: pd.DataFrame, ticks: list, value: str, exclude=None):
min_val = df[value].min()
max_val = df[value].max()
# Get major ticks
major_ticks = [np.round((min_val + ticks[0]*i),2) for i in range(int(np.floor((max_val-min_val)/ticks[0]))+1)]
major_ticks.append(np.round(max_val, 2))
major_ticks = aux.get_unique(major_ticks)
major_ticklabels = [i*ticks[0] for i in range(len(major_ticks)-1)]
major_ticklabels.append(np.round((max_val-min_val),1))
if exclude:
for i, tick in enumerate(major_ticklabels):
if tick in exclude:
del major_ticks[i]
del major_ticklabels[i]
# Get minor ticks
minor_ticks = [np.round((min_val + ticks[1]*i),2) for i in range(int(np.floor((max_val-min_val)/ticks[1]))+1) if np.round((min_val + ticks[1]*i),2) not in major_ticks]
minor_ticklabels = [np.round(tick - min_val, 2) for tick in minor_ticks]
return major_ticks, major_ticklabels, minor_ticks, minor_ticklabels
def assign_tickmarks(dfs: list, options, fig, ax, exclude=None):
major_ticks, major_ticklabels, minor_ticks = [], [], []
if not exclude:
exclude = [[None, None] for i in range(len(options['which_cycles']))]
for i, cycle in enumerate(options['which_cycles']):
#Get ticks from charge cycle
major_tick, major_ticklabel, minor_tick, minor_ticklabel = ec.plot.get_tickmarks(dfs[cycle][0], ticks=options['x_tick_locators'], value=options['x_vals'], exclude=exclude[i][0])
major_ticks += major_tick
major_ticklabels += major_ticklabel
minor_ticks += minor_tick
# Get ticks from discharge cycle
major_tick, major_ticklabel, minor_tick, minor_ticklabel = ec.plot.get_tickmarks(dfs[cycle][1], ticks=[1, 0.25], value='ions', exclude=exclude[i][1])
major_ticks += major_tick
major_ticklabels += major_ticklabel
minor_ticks += minor_tick
ax.set_xticks(major_ticks, minor=False)
ax.set_xticklabels(major_ticklabels)
ax.set_xticks(minor_ticks, minor=True)
return fig, ax

View file

@ -0,0 +1,53 @@
import pandas as pd
def time():
# Define matrix for unit conversion for time
time = {'h': [1, 60, 3600, 3600000], 'min': [1/60, 1, 60, 60000], 's': [1/3600, 1/60, 1, 1000], 'ms': [1/3600000, 1/60000, 1/1000, 1]}
time = pd.DataFrame(time)
time.index = ['h', 'min', 's', 'ms']
return time
def current():
# Define matrix for unit conversion for current
current = {'A': [1, 1000, 1000000], 'mA': [1/1000, 1, 1000], 'uA': [1/1000000, 1/1000, 1]}
current = pd.DataFrame(current)
current.index = ['A', 'mA', 'uA']
return current
def voltage():
# Define matrix for unit conversion for voltage
voltage = {'V': [1, 1000, 1000000], 'mV': [1/1000, 1, 1000], 'uV': [1/1000000, 1/1000, 1]}
voltage = pd.DataFrame(voltage)
voltage.index = ['V', 'mV', 'uV']
return voltage
def capacity():
# Define matrix for unit conversion for capacity
capacity = {'Ah': [1, 1000, 1000000], 'mAh': [1/1000, 1, 1000], 'uAh': [1/1000000, 1/1000, 1]}
capacity = pd.DataFrame(capacity)
capacity.index = ['Ah', 'mAh', 'uAh']
return capacity
def mass():
# Define matrix for unit conversion for capacity
mass = {'kg': [1, 1000, 1000000, 1000000000], 'g': [1/1000, 1, 1000, 1000000], 'mg': [1/1000000, 1/1000, 1, 1000], 'ug': [1/1000000000, 1/1000000, 1/1000, 1]}
mass = pd.DataFrame(mass)
mass.index = ['kg', 'g', 'mg', 'ug']
return mass
def energy():
energy = {'kWh': [1, 1000, 1000000], 'Wh': [1/1000, 1, 1000], 'mWh': [1/100000, 1/1000, 1]}
energy = pd.DataFrame(energy)
energy.index = ['kWh', 'Wh', 'mWh']
return energy

0
nafuma/pdf/__init__.py Normal file
View file

520
nafuma/plotting.py Normal file
View file

@ -0,0 +1,520 @@
import nafuma.auxillary as aux
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator)
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition, BboxPatch, BboxConnector)
from matplotlib.transforms import TransformedBbox
from matplotlib.patches import Rectangle
import importlib
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import itertools
from PIL import Image
import os
import numpy as np
def prepare_plot(options={}):
''' A general function to prepare a plot based on contents of options['rc_params'] and options['format_params'].
rc_params is a dictionary with keyval-pairs corresponding to rcParams in matplotlib, to give the user full control over this. Please consult the matplotlib-documentation
format_params will determine the size, aspect ratio, resolution etc. of the figure. Should be modified to conform with any requirements from a journal.'''
if 'rc_params' in options.keys():
rc_params = options['rc_params']
else:
rc_params = {}
if 'format_params' in options.keys():
format_params = options['format_params']
else:
format_params = {}
default_format_params = {
'single_column_width': 8.3,
'double_column_width': 17.1,
'column_type': 'single',
'width_ratio': '1:1',
'aspect_ratio': '1:1',
'width': None,
'height': None,
'compress_width': 1,
'compress_height': 1,
'upscaling_factor': 1.0,
'dpi': 600,
'nrows': 1,
'ncols': 1,
'grid_ratio_height': None,
'grid_ratio_width': None
}
format_params = aux.update_options(options=format_params, default_options=default_format_params)
# Reset run commands
plt.rcdefaults()
# Update run commands if any is passed (will pass an empty dictionary if not passed)
update_rc_params(rc_params)
if not format_params['width']:
format_params['width'] = determine_width(format_params=format_params)
if not format_params['height']:
format_params['height'] = determine_height(format_params=format_params, width=format_params['width'])
format_params['width'], format_params['height'] = scale_figure(format_params=format_params, width=format_params['width'], height=format_params['height'])
if format_params['nrows'] == 1 and format_params['ncols'] == 1:
fig, ax = plt.subplots(figsize=(format_params['width'], format_params['height']), dpi=format_params['dpi'])
return fig, ax
else:
if not format_params['grid_ratio_height']:
format_params['grid_ratio_height'] = [1 for i in range(format_params['nrows'])]
if not format_params['grid_ratio_width']:
format_params['grid-ratio_width'] = [1 for i in range(format_params['ncols'])]
fig, axes = plt.subplots(nrows=format_params['nrows'], ncols=format_params['ncols'], figsize=(format_params['width'],format_params['height']),
gridspec_kw={'height_ratios': format_params['grid_ratio_height'], 'width_ratios': format_params['grid_ratio_width']},
facecolor='w', dpi=format_params['dpi'])
return fig, axes
def adjust_plot(fig, ax, options):
''' A general function to adjust plot according to contents of the options-dictionary '''
default_options = {
'plot_kind': None, # defaults to None, but should be utilised when requiring special formatting for a particular plot
'xlabel': None, 'ylabel': None,
'xunit': None, 'yunit': None,
'xlabel_pad': 4.0, 'ylabel_pad': 4.0,
'hide_x_labels': False, 'hide_y_labels': False, # Whether the main labels on the x- and/or y-axes should be hidden
'hide_x_ticklabels': False, 'hide_y_ticklabels': False, # Whether ticklabels on the x- and/or y-axes should be hidden
'hide_x_ticks': False, 'hide_y_ticks': False, # Whether the ticks on the x- and/or y-axes should be hidden
'x_tick_locators': None, 'y_tick_locators': None, # The major and minor tick locators for the x- and y-axes
'rotation_x_ticks': 0, 'rotation_y_ticks': 0, # Degrees the x- and/or y-ticklabels should be rotated
'xticks': None, 'yticks': None, # Custom definition of the xticks and yticks. This is not properly implemented now.
'xlim': None, 'ylim': None, # Limits to the x- and y-axes
'xlim_reset': False, 'ylim_reset': False, # For use in setting limits of backgrounds - forcing reset of xlim and ylim, useful when more axes
'title': None, # Title of the plot
'backgrounds': [],
'legend': False, 'legend_position': ['lower center', (0.5, -0.1)], 'legend_ncol': 1, # Toggles on/off legend. Specifices legend position and the number of columns the legend should appear as.
'subplots_adjust': {'left': None, 'right': None, 'top': None, 'bottom': None, 'wspace': None, 'hspace': None}, # Adjustment of the Axes-object within the Figure-object. Fraction of the Figure-object the left, bottom, right and top edges of the Axes-object will start.
'marker_edges': None,
'text': None # Text to show in the plot. Should be a list where the first element is the string, and the second is a tuple with x- and y-coordinates. Could also be a list of lists to show more strings of text.
}
options = aux.update_options(options=options, default_options=default_options)
# Set labels on x- and y-axes
if not options['hide_y_labels']:
if not options['yunit']:
ax.set_ylabel(f'{options["ylabel"]}', labelpad=options['ylabel_pad'])
else:
ax.set_ylabel(f'{options["ylabel"]} [{options["yunit"]}]', labelpad=options['ylabel_pad'])
else:
ax.set_ylabel('')
if not options['hide_x_labels']:
if not options['xunit']:
ax.set_xlabel(f'{options["xlabel"]}', labelpad=options['xlabel_pad'])
else:
ax.set_xlabel(f'{options["xlabel"]} [{options["xunit"]}]', labelpad=options['xlabel_pad'])
else:
ax.set_xlabel('')
# Set multiple locators
if options['y_tick_locators']:
ax.yaxis.set_major_locator(MultipleLocator(options['y_tick_locators'][0]))
ax.yaxis.set_minor_locator(MultipleLocator(options['y_tick_locators'][1]))
if options['x_tick_locators']:
ax.xaxis.set_major_locator(MultipleLocator(options['x_tick_locators'][0]))
ax.xaxis.set_minor_locator(MultipleLocator(options['x_tick_locators'][1]))
# FIXME THIS NEEDS REWORK FOR IT TO FUNCTION PROPERLY!
#if options['xticks']:
# ax.set_xticks(np.arange(plot_data['start'], plot_data['end']+1))
# ax.set_xticklabels(options['xticks'])
# else:
# ax.set_xticks(np.arange(plot_data['start'], plot_data['end']+1))
# ax.set_xticklabels([x/2 for x in np.arange(plot_data['start'], plot_data['end']+1)])
# Hide x- and y- ticklabels
if options['hide_y_ticklabels']:
ax.tick_params(axis='y', direction='in', which='both', labelleft=False, labelright=False)
else:
plt.xticks(rotation=options['rotation_x_ticks'])
#ax.set_xticklabels(ax.get_xticks(), rotation = options['rotation_x_ticks'])
if options['hide_x_ticklabels']:
ax.tick_params(axis='x', direction='in', which='both', labelbottom=False, labeltop=False)
else:
pass
#ax.set_yticklabels(ax.get_yticks(), rotation = options['rotation_y_ticks'])
# Hide x- and y-ticks:
if options['hide_y_ticks']:
ax.tick_params(axis='y', direction='in', which='both', left=False, right=False)
else:
ax.tick_params(axis='y', direction='in', which='both', left=True, right=True)
if options['hide_x_ticks']:
ax.tick_params(axis='x', direction='in', which='both', bottom=False, top=False)
else:
ax.tick_params(axis='x', direction='in', which='both', bottom=True, top=True)
# Set title
if options['title']:
ax.set_title(options['title'], fontsize=plt.rcParams['font.size'])
#### DRAW/REMOVE LEGEND ####
# Options:
# 'legend_position': (default ['lower center', (0.5, -0.1)]) - Follows matplotlib's way of specifying legend position
# 'legend_ncol': (default 1) # Number of columns to write the legend in
# Also requires options to contain values in colours, markers and labels. (No defaults)
if ax.get_legend():
ax.get_legend().remove()
if options['legend']:
# Make palette and linestyles from original parameters
if not options['colours']:
colours = generate_colours(palettes=options['palettes'])
else:
colours = itertools.cycle(options['colours'])
markers = itertools.cycle(options['markers'])
# Create legend
active_markers = []
active_labels = []
for label in options['labels']:
# Discard next linestyle and colour if label is _
if label == '_':
_ = next(colours)
_ = next(markers)
else:
marker = next(markers)
if not marker:
active_markers.append(mlines.Line2D([], [], color=next(colours)))
else:
active_markers.append(mlines.Line2D([], [], markerfacecolor=next(colours), markeredgecolor=options['marker_edges'], markersize=10, color=(1,1,1,0), marker=marker))
active_labels.append(label)
ax.legend(active_markers, active_labels, frameon=False, loc=options['legend_position'][0], bbox_to_anchor=options['legend_position'][1], ncol=options['legend_ncol'])
#fig.legend(handles=patches, loc=options['legend_position'][0], bbox_to_anchor=options['legend_position'][1], frameon=False)
# Adjust where the axes start within the figure. Default value is 10% in from the left and bottom edges. Used to make room for the plot within the figure size (to avoid using bbox_inches='tight' in the savefig-command, as this screws with plot dimensions)
plt.subplots_adjust(**options['subplots_adjust'])
# If limits for x- and y-axes is passed, sets these.
if options['xlim'] is not None:
ax.set_xlim(options['xlim'])
if options['ylim'] is not None:
ax.set_ylim(options['ylim'])
#### DRAW BACKGROUNDS ####
# options['backgrounds'] should contain a dictionary or a list of dictionaries. Options to be specified are listed below.
if options['backgrounds']:
if not isinstance(options['backgrounds'], list):
options['backgrounds'] = [options['backgrounds']]
for background in options['backgrounds']:
default_background_options = {
'colour': (0,0,0),
'alpha': 0.2,
'xlim': list(ax.get_xlim()),
'ylim': list(ax.get_ylim()),
'zorder': 0,
'edgecolour': None,
'linewidth': None
}
background = aux.update_options(options=background, default_options=default_background_options)
if options['xlim_reset']:
background['xlim'] = list(ax.get_xlim())
if options['ylim_reset']:
background['ylim'] = list(ax.get_ylim())
if not background['xlim'][0]:
background['xlim'][0] = ax.get_xlim()[0]
if not background['xlim'][1]:
background['xlim'][1] = ax.get_xlim()[1]
if not background['ylim'][0]:
background['ylim'][0] = ax.get_ylim()[0]
if not background['ylim'][1]:
background['ylim'][1] = ax.get_ylim()[1]
ax.add_patch(Rectangle(
xy=(background['xlim'][0], background['ylim'][0]), # Anchor point
width=background['xlim'][1]-background['xlim'][0], # Width of background
height=background['ylim'][1]-background['ylim'][0], # Height of background
zorder=background['zorder'], # Placement in stack
facecolor=(background['colour'][0], background['colour'][1], background['colour'][2], background['alpha']), # Colour
edgecolor=background['edgecolour'], # Edgecolour
linewidth=background['linewidth']) # Linewidth
)
# Add custom text
if options['text']:
# If only a single element, put it into a list so the below for-loop works.
if isinstance(options['text'][0], str):
options['text'] = [options['text']]
# Plot all passed texts
for text in options['text']:
ax.text(x=text[1][0], y=text[1][1], s=text[0])
return fig, ax
def ipywidgets_update(func, data, options={}, **kwargs):
''' A general ipywidgets update function that can be passed to ipywidgets.interactive. To use this, you can run:
import ipywidgets as widgets
import beamtime.plotting as btp
w = widgets.interactive(btp.ipywidgets_update, func=widgets.fixed(my_func), plot_data=widgets.fixed(plot_data), options=widgets.fixed(options), key1=widget1, key2=widget2, key3=widget3)
where key1, key2, key3 etc. are the values in the options-dictionary you want widget control of, and widget1, widget2, widget3 etc. are widgets to control these values, e.g. widgets.IntSlider(value=1, min=0, max=10)
'''
# Update the options-dictionary with the values from the widgets
for key in kwargs:
options[key] = kwargs[key]
# Call the function with the plot_data and options-dictionaries
func(data=data, options=options)
def determine_width(format_params):
''' '''
conversion_cm_inch = 0.3937008 # cm to inch
if format_params['column_type'] == 'single':
column_width = format_params['single_column_width']
elif format_params['column_type'] == 'double':
column_width = format_params['double_column_width']
column_width *= conversion_cm_inch
width_ratio = [float(num) for num in format_params['width_ratio'].split(':')]
width = column_width * width_ratio[0]/width_ratio[1]
return width
def determine_height(format_params, width):
aspect_ratio = [float(num) for num in format_params['aspect_ratio'].split(':')]
height = width/(aspect_ratio[0] / aspect_ratio[1])
return height
def scale_figure(format_params, width, height):
width = width * format_params['upscaling_factor'] * format_params['compress_width']
height = height * format_params['upscaling_factor'] * format_params['compress_height']
return width, height
def update_rc_params(rc_params):
''' Update all passed run commands in matplotlib'''
if rc_params:
for key in rc_params.keys():
plt.rcParams.update({key: rc_params[key]})
def generate_colours(palettes, kind=None):
if kind == 'single':
colour_cycle = itertools.cycle(palettes)
else:
# Creates a list of all the colours that is passed in the colour_cycles argument. Then makes cyclic iterables of these.
colour_collection = []
for palette in palettes:
mod = importlib.import_module("palettable.colorbrewer.%s" % palette[0])
colour = getattr(mod, palette[1]).mpl_colors
colour_collection = colour_collection + colour
colour_cycle = itertools.cycle(colour_collection)
return colour_cycle
def prepare_inset_axes(parent_ax, options):
default_options = {
'hide_inset_x_labels': False, # Whether x labels should be hidden
'hide_inset_x_ticklabels': False,
'hide_inset_x_ticks': False,
'rotation_inset_x_ticks': 0,
'hide_inset_y_labels': False, # whether y labels should be hidden
'hide_inset_y_ticklabels': False,
'hide_inset_y_ticks': False,
'rotation_inset_y_ticks': 0,
'inset_x_tick_locators': [100, 50], # Major and minor tick locators
'inset_y_tick_locators': [10, 5],
'inset_position': [0.1,0.1,0.3,0.3],
'inset_bounding_box': [0,0,0.1, 0.1],
'inset_marks': [None, None],
'legend_position': ['upper center', (0.20, 0.90)], # the position of the legend passed as arguments to loc and bbox_to_anchor respectively,
'connecting_corners': [1,2]
}
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
# Create a set of inset Axes: these should fill the bounding box allocated to
# them.
inset_ax = plt.axes(options["inset_bounding_box"])
# Manually set the position and relative size of the inset axes within ax1
ip = InsetPosition(parent_ax, options['inset_position'])
inset_ax.set_axes_locator(ip)
if options['connecting_corners'] and len(options["connecting_corners"]) == 2:
connect_inset(parent_ax, inset_ax, loc1a=options['connecting_corners'][0], loc2a=options['connecting_corners'][1], loc1b=options['connecting_corners'][0], loc2b=options['connecting_corners'][1], fc='none', ec='black')
elif options['connecting_corners'] and len(options['connecting_corners']) == 4:
connect_inset(parent_ax, inset_ax, loc1a=options['connecting_corners'][0], loc2a=options['connecting_corners'][1], loc1b=options['connecting_corners'][2], loc2b=options['connecting_corners'][3], fc='none', ec='black', ls='--')
inset_ax.xaxis.set_major_locator(MultipleLocator(options['inset_x_tick_locators'][0]))
inset_ax.xaxis.set_minor_locator(MultipleLocator(options['inset_x_tick_locators'][1]))
inset_ax.yaxis.set_major_locator(MultipleLocator(options['inset_y_tick_locators'][0]))
inset_ax.yaxis.set_minor_locator(MultipleLocator(options['inset_y_tick_locators'][1]))
return inset_ax
def connect_inset(parent_axes, inset_axes, loc1a=1, loc1b=1, loc2a=2, loc2b=2, **kwargs):
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, fill=False, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1a, loc2=loc1b, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2a, loc2=loc2b, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
def make_animation(paths, options={}):
default_options = {
'save_folder': '.',
'save_filename': 'animation.gif',
'fps': 5
}
options = aux.update_options(options=options, default_options=default_options)
frames = []
for path in paths:
frame = Image.open(path)
frames.append(frame)
frames[0].save(os.path.join(options['save_folder'], options['save_filename']), format='GIF', append_images=frames[1:], save_all=True, duration=(1/options['fps'])*1000, loop=0)
def mix_colours(colour1, colour2, options):
default_options = {
'number_of_colours': 10,
'weights': None
}
options = aux.update_options(options=options, default_options=default_options)
if not options['weights']:
options['weights'] = [x/options['number_of_colours'] for x in range(options['number_of_colours'])]
colours = []
for weight in options['weights']:
colour = []
for c1, c2 in zip(colour1, colour2):
colour.append(np.round(((1-weight)*c1 + weight*c2), 5))
colours.append(colour)
return colours

1
nafuma/ppms/__init__.py Normal file
View file

@ -0,0 +1 @@
from . import io, plot

129
nafuma/ppms/io.py Normal file
View file

@ -0,0 +1,129 @@
import pandas as pd
import numpy as np
import nafuma.auxillary as aux
def read_data(path, options={}):
default_options = {
'split': False,
}
options = aux.update_options(options=options, default_options=default_options)
index = find_start(path)
df = pd.read_csv(path, skiprows=index+1)
df = df[['Comment', 'Time Stamp (sec)', 'Temperature (K)', 'Magnetic Field (Oe)',
'DC Moment (emu)', 'DC Std. Err. (emu)', 'DC Quad. Moment (emu)',
'AC=1 DC=2 Locate=3', 'Max. Field (Oe)', 'Pressure (Torr)', 'Temp. Status (code)',
]]
new_columns = ['Comment', 'Time', 'Temperature', 'Magnetic_Field',
'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment',
'Status', 'Max_Field', 'Pressure', 'Temperature_Status']
df.columns = new_columns
df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']] = df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']].astype(float)
df = df.loc[df['DC_Std_Err'] < 0.001]
if all([option in options.keys() for option in ['molar_mass', 'sample_mass']]):
df = calculate_emu_per_mol_oe(df, options)
df = calculate_bohr_magnetons(df, options)
df = calculate_chi_inverse(df, options)
if options['split']:
mask = df.loc[df['Comment'].notna()]
dfs = []
for i in range(1,len(mask.index)):
dfs.append(df.iloc[mask.index[i-1]:mask.index[i]])
return dfs
return df
def read_hysteresis(path):
index = find_start(path)
df = pd.read_csv(path, skiprows=index+1)
df = df[['Comment', 'Time Stamp (sec)', 'Temperature (K)', 'Magnetic Field (Oe)',
'DC Moment (emu)', 'DC Std. Err. (emu)', 'DC Quad. Moment (emu)',
'AC=1 DC=2 Locate=3', 'Max. Field (Oe)', 'Pressure (Torr)', 'Temp. Status (code)',
]]
new_columns = ['Comment', 'Time', 'Temperature', 'Magnetic_Field',
'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment',
'Status', 'Max_Field', 'Pressure', 'Temperature_Status']
df.columns = new_columns
df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']] = df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']].astype(float)
df = df.loc[df['DC_Std_Err'] < 0.001]
return df
def find_start(path):
with open(path, 'r') as f:
i = 0
line = f.readline()
while '[Data]' not in line:
line = f.readline()
i += 1
if i > 1000:
break
return i
def calculate_emu_per_mol_oe(df, options={}):
m = options['sample_mass'] / 1000 # convert from mg to g
n = m / options['molar_mass']
df['DC_Moment_emu_per_mol'] = df['DC_Moment'] / n
df['DC_Moment_emu_per_mol_oe'] = df['DC_Moment'] / (n * df['Magnetic_Field'])
return df
def calculate_bohr_magnetons(df, options={}):
default_options = {
'units': 'cgs',
}
options = aux.update_options(options=options, default_options=default_options)
if options['units'] == 'cgs':
df['bohr_magnetons'] = df['DC_Moment_emu_per_mol'] * 1.07828E20 / 6.023E23 ## mu_B per emu divided by Avogadro's number
return df
def calculate_chi_inverse(df, options={}):
df['chi_inverse'] = 1/ df['DC_Moment_emu_per_mol']
return df

0
nafuma/ppms/plot.py Normal file
View file

0
nafuma/test/__init__.py Normal file
View file

9
nafuma/test/pytest.ini Normal file
View file

@ -0,0 +1,9 @@
# pytest.ini
[pytest]
minversion = 6.0
testpaths =
.
filterwarnings =
ignore::DeprecationWarning

View file

@ -0,0 +1,76 @@
import nafuma.auxillary as aux
import os
def test_swap_values():
test_dict = {'test1': 1, 'test2': 2}
key1 = 'test1'
key2 = 'test2'
oldval1 = test_dict[key1]
oldval2 = test_dict[key2]
new_dict = aux.swap_values(options=test_dict, key1=key1, key2=key2)
assert (test_dict[key1] == oldval2) and (test_dict[key2] == oldval1)
def test_ceil() -> None:
assert aux.ceil(1.05, 0.5) == 1.5
assert aux.ceil(1.05, 1) == 2.0
assert aux.ceil(1.1, 0.2) == 1.2
def test_floor() -> None:
assert aux.floor(2.02, 1) == 2.0
assert aux.floor(2.02, 0.01) == 2.02
assert aux.floor(2.013, 0.01) == 2.01
def test_options() -> None:
options = {}
default_options = {
'test1': 1,
'test2': 2,
'test3': 3,
'test4': 4,
'test5': 5,
}
options = aux.update_options(options=options, default_options=default_options)
assert options['test1'] == default_options['test1']
def test_save_options() -> None:
options = {'test1': 1, 'test2': 2}
path = 'tmp.dat'
aux.save_options(options, path)
assert os.path.isfile(path)
os.remove(path)
def test_load_options() -> None:
options = {'test1': 1, 'test2': 2}
path = 'tmp.dat'
aux.save_options(options, path)
loaded_options = aux.load_options(path)
assert (loaded_options['test1'] == 1) and (loaded_options['test2'] == 2)
os.remove(path)

View file

@ -0,0 +1,181 @@
import nafuma.plotting as btp
from cycler import cycler
import itertools
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
def test_generate_colours() -> None:
assert type(btp.generate_colours('black', kind='single')) == itertools.cycle
palettes = [('qualitative', 'Dark2_8')]
colour_cycle = btp.generate_colours(palettes)
assert type(colour_cycle) == itertools.cycle
# Test that it actually loaded 8 colours when given a set of 8 colours to
same_colour = None
for i in range(10):
colour = next(colour_cycle)
if i == 0:
first_colour = colour
if colour == first_colour:
repeat_colour_index = i
assert repeat_colour_index == 8
def test_update_rc_params() -> None:
rc_params = {
'lines.linewidth': 100
}
prev_params = plt.rcParams['lines.linewidth']
# Update run commands if any is passed (will pass an empty dictionary if not passed)
btp.update_rc_params(rc_params)
new_params = plt.rcParams['lines.linewidth']
assert new_params == 100
assert prev_params != new_params
# Reset run commands
plt.rcdefaults()
def test_scale_figure() -> None:
width, height = 1, 1
format_params = {
'upscaling_factor': 2,
'compress_width': 1,
'compress_height': 1
}
width1, height1 = btp.scale_figure(format_params=format_params, width=width, height=height)
assert width1 == 2 and height1 == 2
format_params = {
'upscaling_factor': 1,
'compress_width': 0.5,
'compress_height': 1
}
width2, height2 = btp.scale_figure(format_params=format_params, width=width, height=height)
assert width2 == 0.5 and height2 == 1
format_params = {
'upscaling_factor': 2,
'compress_width': 0.5,
'compress_height': 0.2
}
width2, height2 = btp.scale_figure(format_params=format_params, width=width, height=height)
assert width2 == 1 and height2 == 0.4
def test_determine_width() -> None:
conversion_cm_inch = 0.3937008 # cm to inch
format_params = {
'column_type': 'single',
'single_column_width': 5,
'double_column_width': 10,
'width_ratio': '1:1'
}
assert np.round(btp.determine_width(format_params),6) == np.round(5*conversion_cm_inch,6)
format_params['column_type'] = 'double'
assert np.round(btp.determine_width(format_params), 6) == np.round(10*conversion_cm_inch, 6)
format_params['column_type'] = 'single'
format_params['width_ratio'] = '1:2'
assert np.round(btp.determine_width(format_params), 6) == np.round(2.5*conversion_cm_inch, 6)
def test_determine_height() -> None:
width = 1
format_params = {
'aspect_ratio': '1:1'
}
assert btp.determine_height(format_params=format_params, width=width) == 1
format_params['aspect_ratio'] = '3:1'
assert (btp.determine_height(format_params=format_params, width=width) - 0.333333333333333) < 10e-7
assert True
def test_prepare_plot() -> None:
fig, ax = btp.prepare_plot()
assert type(fig) == plt.Figure
assert fig.get_dpi() == 600
assert ax.get_xlim() == (0.0, 1.0)
def test_adjust_plot() -> None:
fig, ax = btp.prepare_plot()
options = {
'xlim': (0.0, 2.0),
'title': 'Test'
}
fig, ax = btp.adjust_plot(fig, ax, options)
assert ax.get_xlim() == (0.0, 2.0)
assert ax.get_title() == 'Test'
def test_ipywidgets_update() -> None:
def test_func(data, options):
test1 = options['test1']
test2 = options['test2']
assert type(data) == dict
assert test1 == 1
assert test2 == 2
data = {}
options = {}
btp.ipywidgets_update(func=test_func, data=data, options=options, test1=1, test2=2)

View file

View file

1
nafuma/xanes/__init__.py Normal file
View file

@ -0,0 +1 @@
from . import io, calib, plot, edges

1420
nafuma/xanes/calib.py Normal file

File diff suppressed because it is too large Load diff

30
nafuma/xanes/edges.py Normal file
View file

@ -0,0 +1,30 @@
import pandas as pd
import numpy as np
from scipy.constants import c, h
# From 2019 redefinition of SI base units: https://en.wikipedia.org/wiki/2019_redefinition_of_the_SI_base_units
keV_per_J = (1 / 1.602176634e-19) / 1000
# kXu values taken from International Tables for Crystallography Volume , Kulwer Academic Publishers - Dordrect / Boston / London (1992)
K = { 'Z': [ 1, 2,
3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48],
'Atom': [ 'H', 'He',
'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr',
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd'],
'kXu': [ np.nan, np.nan,
226.5, np.nan, np.nan, 43.68, 30.99, 23.32, np.nan, np.nan,
np.nan, 9.5117, 7.9511, 6.7446, 5.7866, 5.0182, 4.3969, 3.8707,
3.43645, 3.07016, 2.7573, 2.49730, 2.26902, 2.07012, 1.89636, 1.74334, 1.60811, 1.48802, 1.38043, 1.2833, 1.19567, 1.11652, 1.04497, 0.97978, 0.91995, 0.86547,
0.81549, 0.76969, 0.72762, 0.68877, 0.65291, 0.61977, 0.5891, 0.56047, 0.53378, 0.50915, 0.48582, 0.46409]}
K = pd.DataFrame(K)
K['keV'] = np.round(h*c/(K['kXu']*10**-10) * keV_per_J, 3)
# FIXME If needed, add energies for L-edges as well.

523
nafuma/xanes/io.py Normal file
View file

@ -0,0 +1,523 @@
import pandas as pd
import matplotlib.pyplot as plt
import os
import numpy as np
import nafuma.auxillary as aux
from nafuma.xanes.calib import find_element
import datetime
def split_scan_data(data: dict, options={}) -> list:
''' Splits a XANES-file from BM31 into different files depending on the edge. Has the option to add intensities of all scans of same edge into the same file.
As of now only picks out xmap_rois (fluoresence mode) and for Mn, Fe, Co and Ni K-edges.'''
required_options = ['log', 'logfile', 'save', 'save_folder', 'replace', 'active_roi', 'add_rois', 'return', 'skip_if_no_roi']
default_options = {
'log': False,
'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}_split_edges.log',
'save': False, # whether to save the files or not
'save_folder': '.', # root folder of where to save the files
'replace': False, # whether to replace the files if they already exist
'active_roi': None,
'add_rois': False, # Whether to add the rois of individual scans of the same edge together
'return': True,
'skip_if_no_roi': True
}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
if not isinstance(data['path'], list):
data['path'] = [data['path']]
all_scans = []
if options['log']:
aux.write_log(message='Starting file splitting...', options=options)
for filename in data['path']:
if options['log']:
aux.write_log(message=f'Reading {filename}...', options=options)
with open(filename, 'r') as f:
lines = f.readlines()
timestamps = []
scan_datas, scan_data = [], []
headers, header = [], ''
read_data = False
for i, line in enumerate(lines):
# Header line starts with #L - reads headers, and toggles data read-in on
if 'zapline mono' in line:
timestamps.append(lines[i+1].strip('#D'))
elif line[0:2] == "#L":
header, read_data = line[2:].split(), True
if options['log']:
aux.write_log(message='... Found scan data. Starting read-in...', options=options)
continue
# First line after data started with #C - stops data read-in
elif line[0:2] == "#C" or line[0:2] == '#S':
read_data = False
if scan_data:
scan_datas.append(scan_data); scan_data = []
if header:
headers.append(header); header = ''
# Ignore line if read-in not toggled
if read_data == False:
continue
# Read in data if it is
else:
scan_data.append(line.split())
edges = {'Mn': [], 'Fe': [], 'Co': [], 'Ni': []}
for i, scan_data in enumerate(scan_datas):
if 'ZapEnergy' not in headers[i]:
if options['log']:
aux.write_log(message=f'... No valid scan data found... ({i+1}/{len(scan_datas)})', options=options)
continue
xanes_df = pd.DataFrame(scan_data).apply(pd.to_numeric)
xanes_df.columns = headers[i]
edge = find_element({'xanes_data_original': xanes_df})
if options['log']:
aux.write_log(message=f'... Starting data clean-up ({edge}-edge)... ({i+1}/{len(scan_datas)})', options=options)
if not ('xmap_roi00' in headers[i]) and (not 'xmap_roi01' in headers[i]):
if options['skip_if_no_roi']:
if options['log']:
aux.write_log(message='... ... Did not find fluoresence data. Skipping...', options=options)
continue
if options['log']:
aux.write_log(message='... ... Did not find fluoresence data, but still proceeding ...', options=options)
edges[edge].append(xanes_df)
if options['add_rois']:
if options['log']:
aux.write_log(message=f'... Addition of rois enabled. Starting addition...', options=options)
added_edges = {'Mn': [], 'Fe': [], 'Co': [], 'Ni': []}
for edge, scans in edges.items():
if options['log']:
aux.write_log(message=f'... ... Adding rois of the {edge}-edge...', options=options)
if scans:
xanes_df = scans[0]
for i, scan in enumerate(scans):
if i > 0:
if options['log']:
aux.write_log(message=f'... ... ... Adding {i+1}/{len(scans)}', options=options)
if 'xmap_roi00' in xanes_df.columns:
xanes_df['xmap_roi00'] += scan['xmap_roi00']
if 'xmap_roi01' in xanes_df.columns:
xanes_df['xmap_roi01'] += scan['xmap_roi01']
added_edges[edge].append(xanes_df)
edges = added_edges
if options['save']:
#FIXME If there is something wrong with the input file, the file will not be saved but log-file still sais it is saved. Goes from "Saving data to ..." to "All done!" no matter if it fals or not.
if options['log']:
aux.write_log(message=f'... Saving data to {options["save_folder"]}', options=options)
if not os.path.isdir(options['save_folder']):
if options['log']:
aux.write_log(message=f'... ... {options["save_folder"]} does not exist. Creating folder.', options=options)
os.makedirs(options['save_folder'])
filename = os.path.basename(filename).split('.')[0]
for edge, scans in edges.items():
for i, scan in enumerate(scans):
count = '' if options['add_rois'] else '_'+str(i).zfill(4)
path = os.path.join(options['save_folder'], f'{filename}_{edge}{count}.dat')
if not os.path.isfile(path):
with open(path, 'w', newline = '\n') as f:
f.write(f'# Time: {timestamps[i]}')
scan.to_csv(f)
if options['log']:
aux.write_log(message=f'... ... Scan saved to {path}', options=options)
elif options['replace'] and os.path.isfile(path):
with open(path, 'w', newline = '\n') as f:
scan.to_csv(f)
if options['log']:
aux.write_log(message=f'... ... File already exists. Overwriting to {path}', options=options)
elif not options['replace'] and os.path.isfile(path):
if options['log']:
aux.write_log(message=f'... ... File already exists. Skipping...', options=options)
all_scans.append(edges)
if options['log']:
aux.write_log(message=f'All done!', options=options)
if options['return']:
return all_scans
else:
return
def save_data(data: dict, options={}) -> None:
required_options = ['save_folder', 'overwrite', 'log', 'logfile', 'filename']
default_options = {
'log': False,
'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}_save_files.log',
'save_folder': 'saved_scans',
'overwrite': False,
'filename': f'{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}_exported_data.dat',
}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
# Check if there is any data to be saved
if not 'xanes_data' in data.keys():
if options['log']:
aux.write_log(message=f'There is not saved scan data in data. Exiting without saving...', options=options)
return None
if not isinstance(data['xanes_data'], pd.DataFrame):
if options['log']:
aux.write_log(message=f'data["xanes_data"] has an invalid format. Exiting without saving...', options=options)
return None
# Make folder(s) if it/they do(es)n't exist
if not os.path.exists(options['save_folder']):
if options['log']:
aux.write_log(message=f'Destination folder does not exist. Creating folder...', options=options)
os.makedirs(options['save_folder'])
if os.path.exists(os.path.join('save_folder', options['filename'])):
if not options['overwrite']:
if options['log']:
aux.write_log(message=f'File already exists and overwrite disabled. Exiting without saving...', options=options)
return None
with open(os.path.join(options['save_folder'], options['filename']), 'w') as f:
if 'e0_diff' in data.keys():
f.write(f'# Number of header lines: {len(data["path"])+1} \n')
for i, (path, e0) in enumerate(data['e0_diff'].items()):
f.write(f'# Scan_{i} \t {e0} \n')
else:
f.write(f'# Number of header lines: {1}')
data['xanes_data'].to_csv(f, sep='\t', index=False)
#data['xanes_data'].to_csv(os.path.join(options['save_folder'], options['filename']), sep='\t', index=False)
def load_data(path: str) -> dict:
# FIXME Let this function be called by read_data() if some criterium is passed
data = {}
with open(path, 'r') as f:
line = f.readline()
header_lines = int(line.split()[-1])
if header_lines > 1:
edge_positions = []
line = f.readline()
while line[0] == '#':
edge_positions.append(line.split()[-1])
line = f.readline()
data['xanes_data'] = pd.read_csv(path, sep='\t', skiprows=header_lines)
data['path'] = data['xanes_data'].columns.to_list()
data['path'].remove('ZapEnergy')
if header_lines > 1:
data['e0_diff'] = {}
for path, edge_position in zip(data['path'], edge_positions):
data['e0_diff'][path] = float(edge_position)
return data
def read_data(data: dict, options={}) -> pd.DataFrame:
# FIXME Handle the case when dataseries are not the same size
# FIXME Add possibility to extract TIME (for operando runs) and Blower Temp (for variable temperature runs)
# FIXME Add possibility to iport transmission data
required_options = ['adjust', 'mode']
default_options = {
'adjust': 0,
'mode': 'fluoresence',
'active_roi': None
}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
columns = ['ZapEnergy']
if not isinstance(data['path'], list):
data['path'] = [data['path']]
# Initialise DataFrame with only ZapEnergy-column
xanes_data = pd.read_csv(data['path'][0], skiprows=1)[['ZapEnergy']]
xanes_data['ZapEnergy'] += options['adjust']
for filename in data['path']:
columns.append(filename)
scan_data = pd.read_csv(filename, skiprows=1)
if options['mode'] == 'fluoresence':
if not options['active_roi']:
scan_data = scan_data[[determine_active_roi(scan_data)]]
else:
scan_data = scan_data[options['active_roi']]
elif options['mode'] == 'transmission':
scan_data = scan_data['MonEx'] / scan_data['Ion1']
xanes_data = pd.concat([xanes_data, scan_data], axis=1)
xanes_data.columns = columns
return xanes_data
def read_metadata(data: dict, options={}) -> dict:
required_options = ['get_temperature', 'get_timestamp', 'adjust_time', 'convert_time', 'time_unit', 'reference_time']
default_options = {
'get_temperature': True,
'get_timestamp': True,
'adjust_time': False,
'convert_time': False,
'reference_time': None,
'time_unit': 's'
}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
temperatures = []
timestamps = []
for filename in data['path']:
scan_data = pd.read_csv(filename, skiprows=1)
if options['get_temperature']:
temperatures.append(scan_data['ZBlower2'].mean())
if options['get_timestamp']:
with open(filename, 'r') as f:
#time = f.readline().strip('# Time: ') #<-- Previous code
time = f.readline().split('# Time: ')[-1] #Hope this does not fuck you up, Rasmus - but I needed another space here
split_operator=time[-9] #This should be the operator that splits hours, minutes and seconds
if split_operator == ".":
time = datetime.datetime.strptime(time, "%a %b %d %H.%M.%S %Y ")
if split_operator == ":":
time = datetime.datetime.strptime(time, "%a %b %d %H:%M:%S %Y ")
if options['adjust_time']:
time_elapsed = scan_data['Htime'].iloc[-1] - scan_data['Htime'].iloc[0]
time += datetime.timedelta(microseconds=time_elapsed)/2
timestamps.append(time)
if options['reference_time'] and options['convert_time']:
from . import unit_tables
new_times = []
if isinstance(options['reference_time'], str):
options['reference_time'] = datetime.datetime.strptime(options['reference_time'], "%d.%b %y %H.%M.%S")
for time in timestamps:
new_time = (time.timestamp() - options['reference_time'].timestamp()) * unit_tables.time()['s'].loc[options['time_unit']]
new_times.append(new_time)
timestamps = new_times
metadata = {'time': timestamps, 'temperature': temperatures}
# Match timestamps against electrochemistry-data
# TODO This could be generalised to match up against any other dataset with timestamps.
if 'cycles' in data.keys():
ions, specific_capacity = [], []
i = 0
for timestamp in timestamps:
if timestamp < 0:
ions.append(0)
else:
closest_chg = aux.find_neighbours(value=timestamp, df=data['cycles'][i][0], colname='time')
closest_dchg = aux.find_neighbours(value=timestamp, df=data['cycles'][i][1], colname='time')
if not isinstance(closest_chg, list):
closest_chg = [closest_chg, closest_chg]
if not isinstance(closest_dchg, list):
closest_dchg = [closest_dchg, closest_dchg]
if all([x==x for x in closest_chg]):
ions.append(np.mean([data['cycles'][i][0]['ions'].loc[data['cycles'][i][0].index == closest_chg[0]], data['cycles'][i][0]['ions'].loc[data['cycles'][i][0].index == closest_chg[1]]]))
specific_capacity.append(np.mean([data['cycles'][i][0]['specific_capacity'].loc[data['cycles'][i][0].index == closest_chg[0]], data['cycles'][i][0]['specific_capacity'].loc[data['cycles'][i][0].index == closest_chg[1]]]))
continue
elif all([x==x for x in closest_dchg]):
ions.append(np.mean([data['cycles'][i][1]['ions'].loc[data['cycles'][i][1].index == closest_dchg[0]], data['cycles'][i][1]['ions'].loc[data['cycles'][i][1].index == closest_dchg[1]]]))
specific_capacity.append(np.mean([data['cycles'][i][1]['specific_capacity'].loc[data['cycles'][i][1].index == closest_dchg[0]], data['cycles'][i][1]['specific_capacity'].loc[data['cycles'][i][1].index == closest_dchg[1]]]))
continue
elif aux.isnan(closest_chg[1]) and aux.isnan(closest_dchg[0]):
ions.append(np.nan)
specific_capacity.append(np.nan)
continue
else:
ions.append(np.nan)
specific_capacity.append(np.nan)
i += 1
if i > len(data['cycles'])-1:
break
for i, (ion, cap) in enumerate(zip(ions, specific_capacity)):
if aux.isnan(ion): # if a resting step, assign a meaningful value
if i < len(ions)-1: # if resting step in the middle of the run, take the mean between the last of previous and first of next run
ions[i] = np.mean([ions[i-1], ions[i+1]])
else: # If last element, set to last values plus the delta between the last two previous measurements
ions[i] = ions[i-1] + (ions[i-1]-ions[i-2])
if aux.isnan(cap) and i < len(specific_capacity)-1: # do same thing for specific capacity
if i < len(specific_capacity)-1:
specific_capacity[i] = np.mean([specific_capacity[i-1], specific_capacity[i+1]])
else:
specific_capacity[i] = specific_capacity[i-1] + (specific_capacity[i-1]-specific_capacity[i-2])
metadata['ions'] = ions
metadata['specific_capacity'] = specific_capacity
return metadata
def determine_active_roi(scan_data):
# FIXME For Co-edge, this gave a wrong scan
#Trying to pick the roi with the highest difference between maximum and minimum intensity --> biggest edge shift
# if max(scan_data["xmap_roi00"])-min(scan_data["xmap_roi00"])>max(scan_data["xmap_roi01"])-min(scan_data["xmap_roi01"]):
# active_roi = 'xmap_roi00'
# else:
# active_roi = 'xmap_roi01'
if not ('xmap_roi00' in scan_data.columns) or not ('xmap_roi01' in scan_data.columns):
if 'xmap_roi00' in scan_data.columns:
active_roi = 'xmap_roi00'
elif 'xmap_roi01' in scan_data.columns:
active_roi = 'xmap_roi01'
elif (scan_data['xmap_roi00'].iloc[0:100].mean() < scan_data['xmap_roi00'].iloc[-100:].mean()) and (scan_data['xmap_roi01'].iloc[0:100].mean() < scan_data['xmap_roi01'].iloc[-100:].mean()):
if (scan_data['xmap_roi00'].iloc[:int(scan_data.shape[0]/2)].max() - scan_data['xmap_roi00'].iloc[0])/scan_data['xmap_roi00'].max() > (scan_data['xmap_roi01'].iloc[:int(scan_data.shape[0]/2)].max() - scan_data['xmap_roi01'].iloc[0])/scan_data['xmap_roi01'].max():
active_roi = 'xmap_roi00'
else:
active_roi = 'xmap_roi01'
elif scan_data['xmap_roi00'].iloc[0:100].mean() < scan_data['xmap_roi00'].iloc[-100:].mean():
active_roi = 'xmap_roi00'
elif scan_data['xmap_roi01'].iloc[0:100].mean() < scan_data['xmap_roi01'].iloc[-100:].mean():
active_roi = 'xmap_roi01'
else:
active_roi = None
return active_roi
def write_data(data: dict, options={}):
default_options = {
'save_filenames': None,
'save_dir': '.',
}
options = aux.update_options(options=options, default_options=default_options, required_options=default_options.keys())
if not options['save_filenames']:
options['save_filenames'] = [os.path.basename(col).split('.')[0]+'_exported.dat' for col in data['xanes_data'].columns if 'ZapEnergy' not in col]
print(options['save_filenames'])

182
nafuma/xanes/plot.py Normal file
View file

@ -0,0 +1,182 @@
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
import pandas as pd
import numpy as np
import math
import datetime
#import ipywidgets as widgets
#from IPython.display import display
import nafuma.xanes as xas
import nafuma.plotting as btp
import nafuma.auxillary as aux
def plot_xanes(data, options={}):
# Update options
default_options = {
'which_scans': 'all', # Use real numbers, not indices - update_scans_list() will adjust.
'highlight': [],
'xlabel': 'Energy', 'ylabel': 'Intensity',
'xunit': 'keV', 'yunit': 'arb. u.',
'exclude_scans': [],
'colours': None,
'gradient': False,
'rc_params': {},
'format_params': {}}
options = aux.update_options(options=options, default_options=default_options)
if not 'xanes_data' in data.keys():
data['xanes_data'] = xas.io.load_data(data=data, options=options)
# Update list of cycles to correct indices
update_scans_list(data=data, options=options)
colours = generate_colours(scans=options['which_scans'], options=options)
# Prepare plot, and read and process data
fig, ax = btp.prepare_plot(options=options)
# Add counter to pick out correct colour
counter = 0
for i, path in enumerate(data['path']):
if i in options['which_scans']:
lw = plt.rcParams['lines.linewidth']*5 if i in options['highlight'] else plt.rcParams['lines.linewidth']
data['xanes_data'].plot(x='ZapEnergy', y=path, ax=ax, c=colours[counter], lw=lw)
counter += 1
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
#if options['interactive_session_active']:
return fig, ax
def pick_out_scans(metadata: dict, timestamp: list):
# If either start or end are None, set to way back when or way into the future
split_operator=timestamp[0][-3] #Adding this to enable reading of both "." and ":" as operators to split hour:minute:second
if not timestamp[0]:
timestamp[0] = datetime.datetime.strptime('1970 01 01 00:00:00', '%Y %m %d %H:%M:%S')
else:
if split_operator == ".":
timestamp[0] = datetime.datetime.strptime(timestamp[0], "%d.%b %y %H.%M.%S")
if split_operator == ":":
timestamp[0] = datetime.datetime.strptime(timestamp[0], "%d.%b %y %H:%M:%S")
if not timestamp[1]:
timestamp[1] = datetime.datetime.strptime('3000 01 01 00:00:00', '%Y %m %d %H:%M:%S')
else:
if split_operator == ".":
timestamp[1] = datetime.datetime.strptime(timestamp[1], "%d.%b %y %H.%M.%S")
if split_operator == ":":
timestamp[1] = datetime.datetime.strptime(timestamp[1], "%d.%b %y %H:%M:%S")
scans = []
for i, time in enumerate(metadata['time']):
if time >= timestamp[0] and time <= timestamp[1]:
scans.append(i)
return scans
def update_scans_list(data, options: dict) -> None:
if options['which_scans'] == 'all':
options['which_scans'] = [i for i in range(len(data['path']))]
elif isinstance(options['which_scans'], list):
scans =[]
for scan in options['which_scans']:
if isinstance(scan, int):
scans.append(scan-1)
elif isinstance(scan, tuple):
interval = [i-1 for i in range(scan[0], scan[1]+1)]
scans.extend(interval)
options['which_scans'] = scans
# Tuple is used to define an interval - as elements tuples can't be assigned, I convert it to a list here.
elif isinstance(options['which_scans'], tuple):
which_scans = list(options['which_scans'])
if which_scans[0] <= 0:
which_scans[0] = 1
elif which_scans[1] < 0:
which_scans[1] = len(options['which_scans'])
options['which_scans'] = [i-1 for i in range(which_scans[0], which_scans[1]+1)]
for i, scan in enumerate(options['which_scans']):
if scan in options['exclude_scans']:
del options['which_scans'][i]
def generate_colours(scans, options):
# FIXME Make this a generalised function and use this instead of this and in the electrochemsitry submodule
# Assign colours from the options dictionary if it is defined, otherwise use standard colours.
if options['colours']:
colour = options['colours']
else:
#colour = (214/255, 143/255, 214/255) # Plum Web (#D68FD6), coolors.co
colour = (90/255, 42/255, 39/255) # Caput Mortuum(#5A2A27), coolors.co
# If gradient is enabled, find start and end points for each colour
if options['gradient']:
if isinstance(colour, list) and len(colour) == 2:
options['number_of_colours'] = len(scans)
colours = btp.mix_colours(colour1=colour[0], colour2=colour[1], options=options)
else:
add = min([(1-x)*0.75 for x in colour])
colour_start = colour
colour_end = [x+add for x in colour]
# Generate lists of colours
if not isinstance(colour, list):
colours = []
for scan_number in range(0, len(scans)):
if options['gradient']:
weight_start = (len(scans) - scan_number)/len(scans)
weight_end = scan_number/len(scans)
colour = [weight_start*start_colour + weight_end*end_colour for start_colour, end_colour in zip(colour_start, colour_end)]
colours.append(colour)
return colours

View file

@ -0,0 +1,11 @@
import pandas as pd
def time():
# Define matrix for unit conversion for time
time = {'h': [1, 60, 3600, 3600000], 'min': [1/60, 1, 60, 60000], 's': [1/3600, 1/60, 1, 1000], 'ms': [1/3600000, 1/60000, 1/1000, 1]}
time = pd.DataFrame(time)
time.index = ['h', 'min', 's', 'ms']
return time

1
nafuma/xrd/__init__.py Normal file
View file

@ -0,0 +1 @@
from . import io, plot, refinement

939
nafuma/xrd/io.py Normal file
View file

@ -0,0 +1,939 @@
from sympy import re
import fabio, pyFAI
import pandas as pd
import numpy as np
import os
import shutil
import sys
import datetime
import zipfile
import xml.etree.ElementTree as ET
import nafuma.auxillary as aux
def get_image_array(path):
beamline_extension = ['.edf', '.cbf', '.mar3450']
if path.endswith(tuple(beamline_extension)):
image = fabio.open(path)
image_array = image.data
elif path.endswith('.dat'):
image_array = np.loadtxt(path, skiprows=1, delimiter=';')
return image_array
def get_image_headers(path):
image = fabio.open(path)
return image.header
def integrate_scans(data: dict, options={}):
default_options = {
'extension': '.dat',
'save': True,
'integration_save_folder': './integrated/',
'filename_base': 'integrated',
}
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
if not isinstance(data['path'], list):
imgs = aux.get_filenames(data['path'], ext=options['extension'])
diffractograms, wavelengths = [], []
for i, img in enumerate(imgs):
data['image'] = get_image_array(img)
options['integration_save_filename'] = options['filename_base'] + '_' + f'{i}'.zfill(4) + '.xy'
diff, wl = integrate_1d(data=data, options=options)
diffractograms.append(diff)
wavelengths.append(wl)
return diffractograms, wavelengths
def integrate_1d(data, options={}, index=0):
''' Integrates an image file to a 1D diffractogram.
Required content of data:
calibrant (str): path to .poni-file
nbins (int): Number of bins to divide image into
path (str) (optional, dependent on image): path to image file - either this or image must be specified. If both is passed, image is prioritsed
image (NumPy 2D Array) (optional, dependent on path): image array as extracted from get_image_array
Output:
df: DataFrame contianing 1D diffractogram if option 'return' is True
'''
required_options = ['unit', 'npt', 'save', 'integration_save_filename', 'save_extension', 'integration_save_folder', 'overwrite', 'extract_folder', 'error_model']
default_options = {
'unit': '2th_deg',
'npt': 5000,
'extract_folder': 'tmp',
'error_model': None,
'save': False,
'integration_save_filename': None,
'save_extension': '_integrated.xy',
'integration_save_folder': '.',
'overwrite': False}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
if not isinstance(data['path'], list):
data['path'] = [data['path']]
# Get image array from filename if not passed
if 'image' not in data.keys() or not isinstance(data['image'], np.ndarray):
data['image'] = get_image_array(data['path'][index])
# Load mask
if 'mask' in data.keys():
mask = get_image_array(data['mask'])
else:
mask = None
# Instanciate the azimuthal integrator from pyFAI from the calibrant (.poni-file)
ai = pyFAI.load(data['calibrant'])
# Determine filename
filename = make_filename(options=options, path=data['path'][index])
# Make save_folder if this does not exist already
if not os.path.isdir(options['extract_folder']):
os.makedirs(options['extract_folder'])
if not os.path.isdir(options['integration_save_folder']):
os.makedirs(options['integration_save_folder'])
res = ai.integrate1d(data['image'], npt=options['npt'], mask=mask, error_model=options['error_model'], unit=options['unit'], filename=filename)
data['path'][index] = filename
diffractogram, _ = read_xy(data=data, options=options, index=index)
wavelength = find_wavelength_from_poni(path=data['calibrant'])
if not options['save']:
os.remove(filename)
shutil.rmtree(f'tmp')
return diffractogram, wavelength
def make_filename(options, path=None):
# Define save location for integrated diffractogram data
if not options['save']:
filename = os.path.join(options['extract_folder'], 'tmp_diff.dat')
elif options['save']:
# Case 1: No filename is given.
if not options['integration_save_filename']:
# If a path is given instead of an image array, the path is taken as the trunk of the savename
if path:
# Make filename by joining the save_folder, the filename (with extension deleted) and adding the save_extension
filename = os.path.join(options['integration_save_folder'], os.path.split(path)[-1].split('.')[0] + options['save_extension'])
else:
# Make filename just "integrated.dat" in the save_folder
filename = os.path.join(options['integration_save_folder'], 'integrated.xy')
else:
filename = os.path.join(options['integration_save_folder'], options['integration_save_filename'])
if not options['overwrite']:
trunk = filename.split('.')[0]
extension = filename.split('.')[-1]
counter = 0
while os.path.isfile(filename):
# Rename first file to match naming scheme if already exists
if counter == 0:
os.rename(filename, trunk + '_' + str(counter).zfill(4) + '.' + extension)
# Increment counter and make new filename
counter += 1
counter_string = str(counter)
filename = trunk + '_' + counter_string.zfill(4) + '.' + extension
return filename
def generate_image_list(path, options=None):
''' Generates a list of paths to pass to the average_images() function'''
required_options = ['scans_per_image']
default_options = {
'scans_per_image': 5
}
def process_2d_scans(data: dict, options={}):
default_options = {
'scans': 15, # number of scans per image
'img_filename': 'img_',
'extension': '.edf',
'darks': True, # whether there are darks
'dark_filename': 'dark_',
'save': False,
'save_folder': './average/',
'save_filename': 'avg_',
'save_extension': '.dat'
}
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
all_imgs = [os.path.join(data['path'], img) for img in os.listdir(data['path']) if img.endswith(options['extension']) and img.startswith(options['img_filename'])]
if options['darks']:
all_darks = [os.path.join(data['path'], img) for img in os.listdir(data['path']) if img.endswith(options['extension']) and img.startswith(options['dark_filename'])]
scans = int(len(all_imgs) / options['scans'])
assert scans - (len(all_imgs) / options['scans']) == 0
imgs = []
darks = []
for i in range(scans):
img = []
dark = []
for j in range(options['scans']):
img.append(all_imgs.pop(0))
if options['darks']:
dark.append(all_darks.pop(0))
imgs.append(img)
if options['darks']:
darks.append(dark)
img_avgs = []
headers = []
for img, dark in zip(imgs,darks):
img_avg = average_images(img)
header = get_image_headers(img[0])
if options['darks']:
dark_avg = average_images(dark)
img_avg = subtract_dark(img_avg, dark_avg)
img_avgs.append(img_avg)
headers.append(header)
if options['save']:
if not os.path.isdir(options['save_folder']):
os.makedirs(options['save_folder'])
for i, img in enumerate(img_avgs):
if options['save_extension'] == '.dat':
with open(os.path.join(options['save_folder'], options['save_filename']+f'{i}'.zfill(4)+options['save_extension']), 'w') as f:
f.write(f'# Time: {headers[i]["time"]}\n')
np.savetxt(f, img, fmt='%.2f', delimiter=";")
return img_avgs
def average_images(images):
''' Takes a list of path to image files, reads them and averages them before returning the average image'''
image_arrays = []
for image in images:
image_array = get_image_array(image)
image_arrays.append(image_array)
image_arrays = np.array(image_arrays)
image_average = image_arrays.mean(axis=0)
return image_average
def subtract_dark(image, dark):
return image - dark
def view_integrator(calibrant):
''' Prints out information about the azimuthal integrator
Input:
calibrant: Path to the azimuthal integrator file (.PONI)
Output:
None'''
ai = pyFAI.load(calibrant)
print("pyFAI version:", pyFAI.version)
print("\nIntegrator: \n", ai)
def read_brml(data, options={}, index=0):
# FIXME: Can't read RECX1-data, apparently must be formatted differently from RECX2. Check the RawData-files and compare between the two files.
required_options = ['extract_folder', 'save_folder']
default_options = {
'extract_folder': 'tmp',
'save_folder': None
}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
if not os.path.isdir(options['extract_folder']):
os.mkdir(options['extract_folder'])
# Extract the RawData0.xml file from the brml-file
with zipfile.ZipFile(data['path'][index], 'r') as brml:
for info in brml.infolist():
if "RawData" in info.filename:
brml.extract(info.filename, options['extract_folder'])
# Parse the RawData0.xml file
path = os.path.join(options['extract_folder'], 'Experiment0/RawData0.xml')
tree = ET.parse(path)
root = tree.getroot()
shutil.rmtree(options['extract_folder'])
diffractogram = []
for chain in root.findall('./DataRoutes/DataRoute'):
# Get the scan type to be able to handle different data formats
scantype = chain.findall('ScanInformation')[0].get('VisibleName')
# Check if the chain is the right one to extract the data from
if chain.get('Description') == 'Originally measured data.':
if scantype == 'TwoTheta':
for scandata in chain.findall('Datum'):
scandata = scandata.text.split(',')
twotheta, intensity = float(scandata[2]), float(scandata[3])
if twotheta > 0:
diffractogram.append({'2th': twotheta, 'I': intensity})
elif scantype == 'Coupled TwoTheta/Theta':
for scandata in chain.findall('Datum'):
scandata = scandata.text.split(',')
twotheta, intensity = float(scandata[2]), float(scandata[4])
if twotheta > 0:
diffractogram.append({'2th': twotheta, 'I': intensity})
elif scantype == 'Still (Eiger2R_500K (1D mode))':
start = float(chain.findall('ScanInformation/ScaleAxes/ScaleAxisInfo/Start')[0].text)
stop = float(chain.findall('ScanInformation/ScaleAxes/ScaleAxisInfo/Stop')[0].text)
for scandata in chain.findall('Datum'):
scandata = scandata.text.split(',')
raw = [float(i) for i in scandata]
intensity = []
for r in raw:
if r > 601:
intensity.append(r)
intensity = np.array(intensity)
twotheta = np.linspace(start, stop, len(intensity))
diffractogram = {'2th': twotheta, 'I': intensity}
#if 'wavelength' not in data.keys():
# Find wavelength
if not data['wavelength'][index]:
for chain in root.findall('./FixedInformation/Instrument/PrimaryTracks/TrackInfoData/MountedOptics/InfoData/Tube/WaveLengthAlpha1'):
wavelength = float(chain.attrib['Value'])
else:
wavelength = data['wavelength'][index]
diffractogram = pd.DataFrame(diffractogram)
if options['save_folder']:
if not os.path.isdir(options['save_folder']):
os.makedirs(options['save_folder'])
diffractogram.to_csv(options['save_folder'])
return diffractogram, wavelength
def read_htxrd(data, options={}, index=0):
required_options = ['extract_folder', 'save_folder', 'save_filename', 'adjust_time']
default_options = {
'extract_folder': 'tmp',
'save_folder': None,
'save_filename': None,
'adjust_time': True
}
if not isinstance(data['path'], list):
data['path'] = [data['path']]
if 'wavelength' not in data.keys():
data['wavelength'] = [None for i in range(len(data['path']))]
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
# Extract the RawData0.xml file from the brml-file
with zipfile.ZipFile(data['path'][index], 'r') as brml:
for info in brml.infolist():
if "RawData" in info.filename:
brml.extract(info.filename, options['extract_folder'])
# Get all filenames
files = os.listdir(os.path.join(options['extract_folder'], 'Experiment0'))
# initalise empty list to store all DataFrames
diffractograms = []
wavelengths = []
active_scan = False
timestamps = []
# Loop through all RawData-files and extract all data and temperatures
for i, file in enumerate(files):
# Create all filenames as strings
filename = os.path.join('tmp/Experiment0/', f'RawData{i}.xml')
# Parse the .xml-files
tree = ET.parse(filename)
root = tree.getroot()
# initalise empty list to store data from this particular scan
diffractogram = []
for chain in root.findall('./DataRoutes/DataRoute'):
scantypes = chain.findall('ScanInformation')
for scantype in scantypes:
if scantype.get('VisibleName') == 'Still (TCU1000N)':
continue
else:
active_scan = True
if chain.get('RouteFlag') == 'Final':
for scandata in chain.findall('Datum'):
scandata = scandata.text.split(',')
twotheta, intensity, temperature = float(scandata[2]), float(scandata[3]), float(scandata[5])
diffractogram.append({'2th': twotheta, 'I': intensity, 'T': temperature})
diffractogram = pd.DataFrame(diffractogram)
diffractograms.append(diffractogram)
if not data['wavelength'][index]:
for chain in root.findall('./FixedInformation/Instrument/PrimaryTracks/TrackInfoData/MountedOptics/InfoData/Tube/WaveLengthAlpha1'):
wavelength = float(chain.attrib['Value'])
else:
wavelength = data['wavelength'][index]
wavelengths.append(wavelength)
if active_scan:
for chain in root.findall('./TimeStampStarted'):
time_start = datetime.datetime.strptime(chain.text[:-7], "%Y-%m-%dT%H:%M:%S.%f")
for chain in root.findall('./TimeStampFinished'):
time_end = datetime.datetime.strptime(chain.text[:-7], "%Y-%m-%dT%H:%M:%S.%f")
time_diff = time_end - time_start
if options['adjust_time']:
timestamps.append(time_start + time_diff/2)
if options['save_folder']:
for i, (diffractogram, wavelength, timestamp) in enumerate(zip(diffractograms, wavelengths, timestamps)):
if not options['save_filename']:
filename = os.path.basename(data['path'][index]).split('.')[0] + '_' + str(i).zfill(4) +'.xy'
else:
filename = options['save_filename'] + '_' + str(i).zfill(4) +'.xy'
if not os.path.isdir(options['save_folder']):
os.makedirs(options['save_folder'])
save_htxrd_as_xy(diffractogram, wavelength, timestamp, filename, options['save_folder'])
shutil.rmtree(options['extract_folder'])
return diffractograms, wavelengths
def save_htxrd_as_xy(diffractogram, wavelength, timestamp, filename, save_path):
headers = '\n'.join(
[line for line in
[f'# Temperature {np.round(diffractogram["T"].mean())}',
f'# Wavelength {wavelength}',
f'# Time {timestamp}',
'# 2th \t I'
]
]
)
diffractogram = diffractogram.drop('T', axis=1)
with open(os.path.join(save_path, filename), 'w', newline='\n') as f:
for line in headers:
f.write(line)
f.write('\n')
diffractogram.to_csv(f, index=False, header=False, sep='\t')
def read_xy(data, options={}, index=0):
#if 'wavelength' not in data.keys():
# Get wavelength from scan
if 'wavelength' in data.keys() and not type(data['wavelength']) == list:
data['wavelength'] = [data['wavelength']]
if not 'wavelength' in data.keys() or not data['wavelength'][index]:
wavelength = read_metadata_from_xy(path=data['path'][index])['wavelength']
else:
wavelength = data['wavelength'][index]
with open(data['path'][index], 'r') as f:
position = 0
current_line = f.readline()
while current_line[0] == '#' or current_line[0] == '\'':
position = f.tell()
current_line = f.readline()
f.seek(position)
diffractogram = pd.read_csv(f, header=None, delim_whitespace=True)
if diffractogram.shape[1] == 2:
diffractogram.columns = ['2th', 'I']
elif diffractogram.shape[1] == 3:
diffractogram.columns = ['2th', 'I', 'sigma']
return diffractogram, wavelength
def read_metadata_from_xy(path):
metadata = {}
wavelength_dict = {'Cu': 1.54059, 'Mo': 0.71073}
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
# For .xy-files output from EVA
if 'Anode' in line:
anode = line.split()[8].strip('"')
metadata['wavelength'] = wavelength_dict[anode]
elif 'Wavelength' in line:
# For .xy-files output from pyFAI integration
if line.split()[-1] == 'm':
metadata['wavelength'] = float(line.split()[2])*10**10
else:
metadata['wavelength'] = float(line.split()[-1])
# Get temperature - exists in .xy-files saved from HTXRD-runs in .brml-files
if 'Temperature' in line:
metadata['temperature'] = line.split()[-1]
# Get timestamp - exists in .xy-files saved from .brml-files
if 'Time' in line:
metadata['time'] = " ".join(line.split()[2:])
if 'wavelength' not in metadata.keys():
metadata['wavelength'] = None
if 'temperature' not in metadata.keys():
metadata['temperature'] = None
if 'time' not in metadata.keys():
metadata['time'] = None
return metadata
def find_wavelength_from_poni(path):
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
if 'Wavelength' in line:
wavelength = float(line.split()[-1])*10**10
return wavelength
def strip_headers_from_xy(path: str, filename=None) -> None:
''' Strips headers from a .xy-file'''
xy = []
with open(path, 'r') as f:
lines = f.readlines()
headerlines = 0
for line in lines:
if line[0] == '#':
headerlines += 1
elif line[0] == "\'":
headerlines += 1
else:
xy.append(line)
if not filename:
ext = path.split('.')[-1]
filename = path.split(f'.{ext}')[0] + f'_noheaders.{ext}'
with open(filename, 'w') as f:
for line in xy:
f.write(line)
def read_data(data, options={}, index=0):
beamline_extensions = ['mar3450', 'edf', 'cbf']
file_extension = data['path'][index].split('.')[-1]
if file_extension in beamline_extensions:
diffractogram, wavelength = integrate_1d(data=data, options=options, index=index)
elif file_extension == 'brml':
diffractogram, wavelength = read_brml(data=data, options=options, index=index)
elif file_extension in['xy', 'xye']:
diffractogram, wavelength = read_xy(data=data, options=options, index=index)
if options['exclude']:
if not isinstance(options['exclude'], list):
options['exclude'] = [options['exclude']]
for excl in options['exclude']:
diffractogram['I'].loc[(diffractogram['2th'] > excl[0]) & (diffractogram['2th'] < excl[1])] = 0
if options['offset'] or options['normalise']:
# Make copy of the original intensities before any changes are made through normalisation or offset, to easily revert back if need to update.
diffractogram['I_org'] = diffractogram['I']
diffractogram['2th_org'] = diffractogram['2th']
diffractogram = adjust_intensities(diffractogram, wavelength, index, options)
diffractogram = translate_wavelengths(data=diffractogram, wavelength=wavelength)
return diffractogram, wavelength
def adjust_intensities(diffractogram, wavelength, index, options):
if 'current_offset_y' not in options.keys():
options['current_offset_y'] = options['offset_y']
else:
if options['current_offset_y'] != options['offset_y']:
options['offset_change'] = True
options['current_offset_y'] = options['offset_y']
options['current_offset_x'] = options['offset_x']
#Apply offset along y-axis
diffractogram['I'] = diffractogram['I_org'] # Reset intensities
if options['normalise']:
diffractogram['I'] = diffractogram['I'] / diffractogram['I'].max()
if not isinstance(options['multiply'], list):
options['multiply'] = [options['multiply']]
diffractogram['I'] = diffractogram['I'] * options['multiply'][index]
if options['drawdown']:
diffractogram['I'] = diffractogram['I'] - diffractogram['I'].mean()
diffractogram['I'] = diffractogram['I'] + index*options['offset_y']
# Apply offset along x-axis
relative_shift = (wavelength / 1.54059)*options['offset_x'] # Adjusts the offset-factor to account for wavelength, so that offset_x given is given in 2th_cuka-units
diffractogram['2th'] = diffractogram['2th_org']
diffractogram['2th'] = diffractogram['2th'] + index*relative_shift
return diffractogram
def revert_offset(diffractogram,which=None):
if which == 'both':
diffractogram['2th'] = diffractogram['2th_org']
diffractogram['I'] = diffractogram['I_org']
if which == 'y':
diffractogram['I'] = diffractogram['I_org']
if which == 'x':
diffractogram['2th'] = diffractogram['2th_org']
return diffractogram
def load_reflection_table(data: dict, reflections_params: dict, options={}):
required_options = ['ref_wavelength', 'to_wavelength']
default_options = {
'ref_wavelength': 1.54059,
'to_wavelength': None
}
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
# VESTA outputs the file with a header that has a space between the parameter and units - so there is some extra code to rectify the issue
# that ensues from this formatting
reflections = pd.read_csv(reflections_params['path'], delim_whitespace=True)
# Remove the extra column that appears from the headers issue
reflections.drop(reflections.columns[-1], axis=1, inplace=True)
with open(reflections_params['path'], 'r') as f:
line = f.readline()
headers = line.split()
# Delete the fourth element which is '(Å)'
del headers[4]
# Change name of column to avoid using greek letters
headers[7] = '2th'
# Set the new modified headers as the headers of
reflections.columns = headers
reflections = translate_wavelengths(data=reflections, wavelength=options['ref_wavelength'], to_wavelength=options['to_wavelength'])
if 'heatmap' in data.keys():
start_2th, stop_2th = data['diffractogram'][0]['2th'].min(), data['diffractogram'][0]['2th'].max()
len_2th = stop_2th - start_2th
#print(start_2th, stop_2th, len_2th)
start_heatmap, stop_heatmap = 0, data['heatmap'].shape[1]
len_heatmap = stop_heatmap - start_heatmap
#print(start_heatmap, stop_heatmap, len_heatmap)
scale = len_heatmap/len_2th
#print(scale)
#print(stop_2th * scale)
reflections['heatmap'] = (reflections['2th']-start_2th) * scale
return reflections
def translate_wavelengths(data: pd.DataFrame, wavelength: float, to_wavelength=None) -> pd.DataFrame:
# FIXME Somewhere here there is an invalid arcsin-argument. Not sure where.
pd.options.mode.chained_assignment = None
# Translate to CuKalpha
cuka = 1.54059 # Å
if cuka > wavelength:
max_2th_cuka = 2*np.arcsin(wavelength/cuka) * 180/np.pi
else:
max_2th_cuka = data['2th'].max()
data['2th_cuka'] = np.NAN
data['2th_cuka'].loc[data['2th'] <= max_2th_cuka] = 2*np.arcsin(cuka/wavelength * np.sin((data['2th'].loc[data['2th'] <= max_2th_cuka]/2) * np.pi/180)) * 180/np.pi
# Translate to MoKalpha
moka = 0.71073 # Å
if moka > wavelength:
max_2th_moka = 2*np.arcsin(wavelength/moka) * 180/np.pi
else:
max_2th_moka = data['2th'].max()
data['2th_moka'] = np.NAN
data['2th_moka'].loc[data['2th'] <= max_2th_moka] = 2*np.arcsin(moka/wavelength * np.sin((data['2th'].loc[data['2th'] <= max_2th_moka]/2) * np.pi/180)) * 180/np.pi
# Convert to other parameters
data['d'] = wavelength / (2*np.sin((2*data['2th']*np.pi/180)/2))
data['1/d'] = 1/data['d']
data['q'] = np.abs((4*np.pi/wavelength)*np.sin(data['2th']/2 * np.pi/180))
data['q2'] = data['q']**2
data['q4'] = data['q']**4
if to_wavelength:
if to_wavelength >= cuka:
max_2th = 2*np.arcsin(cuka/to_wavelength) * 180/np.pi
else:
max_2th = data['2th_cuka'].max()
data['2th'] = np.NAN
data['2th'].loc[data['2th_cuka'] <= max_2th] = 2*np.arcsin(to_wavelength/cuka * np.sin((data['2th_cuka'].loc[data['2th_cuka'] <= max_2th]/2) * np.pi/180)) * 180/np.pi
return data
def trim_xy_region(path, region):
df = pd.read_csv(path, header=None, delim_whitespace=True)
df.columns = ['2th', 'I']
df = df.loc[(df['2th'] > region[0]) & (df['2th'] < region[1])]
folder = os.path.dirname(path)
save_folder = os.path.join(folder, 'trimmed')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
df.to_csv(os.path.join(save_folder, os.path.basename(path)), sep='\t', header=None, index=None)
def raise_intensities_xy(path, region=None):
df = pd.read_csv(path, header=None, delim_whitespace=True)
df.columns = ['2th', 'I']
if region:
df = df.loc[(df['2th'] > region[0]) & (df['2th'] < region[1])]
df['I'] = df['I'] - df['I'].min()
folder = os.path.dirname(path)
save_folder = os.path.join(folder, 'raised')
if not os.path.exists(save_folder):
os.makedirs(save_folder)
df.to_csv(os.path.join(save_folder, os.path.basename(path)), sep='\t', header=None, index=None)

1114
nafuma/xrd/plot.py Normal file

File diff suppressed because it is too large Load diff

1132
nafuma/xrd/refinement.py Normal file

File diff suppressed because it is too large Load diff

44
nafuma/xrd/snippets.json Normal file
View file

@ -0,0 +1,44 @@
{
"calculation_step": "x_calculation_step = Yobs_dx_at(Xo); convolution_step {}",
"capillary": [
"local !packing_density {} min 0.1 max 1.0 'typically 0.2 to 0.5",
"local !capdia {} 'capillary diameter in mm",
"local !linab = Get(mixture_MAC) Get(mixture_density_g_on_cm3);: 100 'in cm-1",
"local muR = (capdia/20)*linab*packing_density;",
"Cylindrical_I_Correction(muR)"
],
"gauss_fwhm": "gauss_fwhm = Sqrt({} Cos(2 * Th)^4 + {} Cos(2 * Th)^2 + {});",
"lp_factor": "LP_Factor({}) 'change the LP correction or lh value if required",
"synchrotron": "lam ymin_on_ymax 0.0001 la 1.0 lo {} lh 0.1",
"neutron": [
"lam ymin_on_ymax 0.0001 la 1.0 lo {} lh 0.5",
"neutron_data"
],
"MoKa":[
"lam ymin_on_ymax 0.0001",
"la 0.6533 lo 0.7093 lh 0.2695",
"la 0.3467 lo 0.713574 lh 0.2795"
],
"RECX2": [
"Rp 280",
"Rs 280"
],
"zero_error": "Zero_Error(!zero, 0)",
"th2_offset": [
"prm !zero\t\t\t= 0 ;: 0 \t\t\t\tmin = Max(Val - 20 Yobs_dx_at(X1), -100 Yobs_dx_at(X1)); max = Min(Val + 20 Yobs_dx_at(X2), 100 Yobs_dx_at(X2)); del = .01 Yobs_dx_at(X1); val_on_continue 0",
"prm !cos_shift\t\t= 0 ;: 0 \t\t\t\tmin = Val-.8; max = Val+.8; del 0.001",
"prm !sin_shift\t\t= 0 ;: 0 \t\t\t\tmin = Val-.8; max = Val+.8; del 0.001",
"th2_offset = (zero) + (cos_shift) Cos(Th) + (sin_shift) Sin(Th) ;"
],
"fit_peak_width": "DC1( ad, 0, bd, 0, cd, 0)",
"TCHZ_Peak_Type": "TCHZ_Peak_Type(pku_1, 0, pkv_1, 0,pkw_1, 0, !pkx_1, 0.0000,pky_1, 0,!pkz_1, 0.0000)",
"Simple_Axial_Model": "Simple_Axial_Model( axial_1, 0)",
"magnetic_moment_str": "mlx = ml_x_{}_{}_XXXX ; \t mly = ml_y_{}_{}_XXXX ; \t mlz = ml_z_{}_{}_XXXX ; \t MM_CrystalAxis_Display( 0, 0, 0)",
"peak": [
"xo_Is",
"xo @ {}",
"peak_type fp",
"LVol_FWHM_CS_G_L( 1, 0, 0.89, 0,,,@, 2)",
"I @ 35.35632`"
]
}

1
nafuma/xrd/topas.conf Normal file
View file

@ -0,0 +1 @@
C:/TOPAS6/

View file

@ -0,0 +1,15 @@
{
"A_matrix_memory_allowed_in_Mbytes": null,
"approximate_A": false,
"bootstrap_errors": null,
"capdia": 0.5,
"chi2_convergence_criteria": 0.001,
"conserve_memory": false,
"continue_after_convergence": false,
"convolution_step": 1,
"do_errors": false,
"iters": 100000,
"lp_factor": 90,
"num_runs": null,
"packing_density": 0.5
}

11
setup.py Normal file
View file

@ -0,0 +1,11 @@
from setuptools import setup, find_packages
setup(name='nafuma',
version='0.4',
description='Analysis tools for inorganic materials chemistry at the NAFUMA-group at the University of Oslo',
url='https://github.com/rasmusthog/nafuma',
author='Rasmus Vester Thøgersen, Halvor Høen Hval',
author_email='code@rasmusthog.me',
license='MIT',
packages=find_packages(),
zip_safe=False)

0
test.txt Normal file
View file