Compare commits
No commits in common. "8a044e73e18a2d9da31d1772fc03576fe2ab290d" and "ca87c8595ff6229f2df3d26128364c8401528ba4" have entirely different histories.
8a044e73e1
...
ca87c8595f
54 changed files with 2 additions and 13868 deletions
35
.github/workflows/automated-testing.yml
vendored
35
.github/workflows/automated-testing.yml
vendored
|
|
@ -1,35 +0,0 @@
|
||||||
name: Automated testing
|
|
||||||
|
|
||||||
on: [push]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-linux:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
max-parallel: 5
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Set up Python 3.10
|
|
||||||
uses: actions/setup-python@v3
|
|
||||||
with:
|
|
||||||
python-version: 3.10.4
|
|
||||||
- name: Add conda to system path
|
|
||||||
run: |
|
|
||||||
# $CONDA is an environment variable pointing to the root of the miniconda directory
|
|
||||||
echo $CONDA/bin >> $GITHUB_PATH
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
conda env update --file environment.yml --name base
|
|
||||||
pip install .
|
|
||||||
- name: Lint with flake8
|
|
||||||
run: |
|
|
||||||
conda install flake8
|
|
||||||
# stop the build if there are Python syntax errors or undefined names
|
|
||||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
|
||||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
|
||||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
|
||||||
- name: Test with pytest
|
|
||||||
run: |
|
|
||||||
conda install pytest
|
|
||||||
pytest
|
|
||||||
129
.gitignore
vendored
129
.gitignore
vendored
|
|
@ -1,129 +0,0 @@
|
||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
wheels/
|
|
||||||
pip-wheel-metadata/
|
|
||||||
share/python-wheels/
|
|
||||||
*.egg-info/
|
|
||||||
.installed.cfg
|
|
||||||
*.egg
|
|
||||||
MANIFEST
|
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.nox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*.cover
|
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
|
||||||
.scrapy
|
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
|
||||||
.python-version
|
|
||||||
|
|
||||||
# pipenv
|
|
||||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
||||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
||||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
||||||
# install all needed dependencies.
|
|
||||||
#Pipfile.lock
|
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
|
||||||
*.sage.py
|
|
||||||
|
|
||||||
# Environments
|
|
||||||
.env
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
|
||||||
.spyderproject
|
|
||||||
.spyproject
|
|
||||||
|
|
||||||
# Rope project settings
|
|
||||||
.ropeproject
|
|
||||||
|
|
||||||
# mkdocs documentation
|
|
||||||
/site
|
|
||||||
|
|
||||||
# mypy
|
|
||||||
.mypy_cache/
|
|
||||||
.dmypy.json
|
|
||||||
dmypy.json
|
|
||||||
|
|
||||||
# Pyre type checker
|
|
||||||
.pyre/
|
|
||||||
2
README.md
Normal file
2
README.md
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
# nafuma
|
||||||
|
|
||||||
|
|
@ -1,20 +0,0 @@
|
||||||
# Minimal makefile for Sphinx documentation
|
|
||||||
#
|
|
||||||
|
|
||||||
# You can set these variables from the command line, and also
|
|
||||||
# from the environment for the first two.
|
|
||||||
SPHINXOPTS ?=
|
|
||||||
SPHINXBUILD ?= sphinx-build
|
|
||||||
SOURCEDIR = .
|
|
||||||
BUILDDIR = _build
|
|
||||||
|
|
||||||
# Put it first so that "make" without argument is like "make help".
|
|
||||||
help:
|
|
||||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
|
||||||
|
|
||||||
.PHONY: help Makefile
|
|
||||||
|
|
||||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
|
||||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
|
||||||
%: Makefile
|
|
||||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
# About
|
|
||||||
|
|
||||||
This package contains data processing, analysis and viewing tools written in Python for several different activities related to inorganic materials chemistry conducted in the NAFUMA-group at the University of Oslo. It is written with the intention of creating a reproducible workflow for documentation purposes, with a focus on interactivity in the data exploration process.
|
|
||||||
|
|
||||||
As of now (08-04-22), the intention is to include tools for XRD-, XANES- and electrochemistry-analysis, however other modules might be added as well.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
57
docs/conf.py
57
docs/conf.py
|
|
@ -1,57 +0,0 @@
|
||||||
# Configuration file for the Sphinx documentation builder.
|
|
||||||
#
|
|
||||||
# This file only contains a selection of the most common options. For a full
|
|
||||||
# list see the documentation:
|
|
||||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
|
||||||
|
|
||||||
# -- Path setup --------------------------------------------------------------
|
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
|
||||||
#
|
|
||||||
# import os
|
|
||||||
# import sys
|
|
||||||
# sys.path.insert(0, os.path.abspath('.'))
|
|
||||||
|
|
||||||
|
|
||||||
# -- Project information -----------------------------------------------------
|
|
||||||
|
|
||||||
project = 'NAFUMA'
|
|
||||||
copyright = '2022, Rasmus Vester Thøgersen & Halvor Høen Hval'
|
|
||||||
author = 'Rasmus Vester Thøgersen & Halvor Høen Hval'
|
|
||||||
|
|
||||||
# The full version, including alpha/beta/rc tags
|
|
||||||
release = '0.2'
|
|
||||||
|
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be
|
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
|
||||||
# ones.
|
|
||||||
extensions = ['myst_parser']
|
|
||||||
source_suffix = ['.rst', '.md']
|
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
|
||||||
templates_path = ['_templates']
|
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
|
||||||
# directories to ignore when looking for source files.
|
|
||||||
# This pattern also affects html_static_path and html_extra_path.
|
|
||||||
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for HTML output -------------------------------------------------
|
|
||||||
|
|
||||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
|
||||||
# a list of builtin themes.
|
|
||||||
#
|
|
||||||
html_theme = 'sphinx_rtd_theme'
|
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
|
||||||
html_static_path = ['_static']
|
|
||||||
|
|
||||||
html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']}
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
||||||
.. NAFUMA documentation master file, created by
|
|
||||||
sphinx-quickstart on Fri Apr 8 15:32:14 2022.
|
|
||||||
You can adapt this file completely to your liking, but it should at least
|
|
||||||
contain the root `toctree` directive.
|
|
||||||
|
|
||||||
Welcome to NAFUMA's documentation!
|
|
||||||
==================================
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
:caption: Contents:
|
|
||||||
|
|
||||||
about
|
|
||||||
installation
|
|
||||||
modules/modules
|
|
||||||
|
|
||||||
Indices and tables
|
|
||||||
==================
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
|
||||||
* :ref:`modindex`
|
|
||||||
* :ref:`search`
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
# Installation
|
|
||||||
|
|
||||||
This package is not available on any package repositories, but can be installed by cloning the repository from GitHub and installing via ```pip install``` from the root folder:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ git clone git@github.com:rasmusthog/nafuma.git
|
|
||||||
$ cd nafuma
|
|
||||||
$ pip install .
|
|
||||||
```
|
|
||||||
If you are planning on making changes to the code base, you might want to consider installing it in develop-mode in order for changes to take effect without reinstalling by including the ```-e``` flag:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip install -e .
|
|
||||||
```
|
|
||||||
|
|
||||||
As of now (v0.2, 08-04-22), the installer will not install any dependencies. It is recommended that you use `conda` to create an environment from `environment.yml` in the root folder:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ conda env create --name <your_environment_name_here> --file environment.yml
|
|
||||||
$ conda activate <your_environment_name_here>
|
|
||||||
```
|
|
||||||
|
|
||||||
(remember to also get rid of <> when substituting your environment name).
|
|
||||||
|
|
||||||
This should get you up and running!
|
|
||||||
|
|
@ -1,35 +0,0 @@
|
||||||
@ECHO OFF
|
|
||||||
|
|
||||||
pushd %~dp0
|
|
||||||
|
|
||||||
REM Command file for Sphinx documentation
|
|
||||||
|
|
||||||
if "%SPHINXBUILD%" == "" (
|
|
||||||
set SPHINXBUILD=sphinx-build
|
|
||||||
)
|
|
||||||
set SOURCEDIR=.
|
|
||||||
set BUILDDIR=_build
|
|
||||||
|
|
||||||
if "%1" == "" goto help
|
|
||||||
|
|
||||||
%SPHINXBUILD% >NUL 2>NUL
|
|
||||||
if errorlevel 9009 (
|
|
||||||
echo.
|
|
||||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
|
||||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
|
||||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
|
||||||
echo.may add the Sphinx directory to PATH.
|
|
||||||
echo.
|
|
||||||
echo.If you don't have Sphinx installed, grab it from
|
|
||||||
echo.https://www.sphinx-doc.org/
|
|
||||||
exit /b 1
|
|
||||||
)
|
|
||||||
|
|
||||||
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
|
||||||
goto end
|
|
||||||
|
|
||||||
:help
|
|
||||||
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
|
||||||
|
|
||||||
:end
|
|
||||||
popd
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
# Electrochemistry
|
|
||||||
|
|
||||||
This is a placeholder
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
Modules
|
|
||||||
==================================
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 1
|
|
||||||
:caption: Contents
|
|
||||||
|
|
||||||
xrd.md
|
|
||||||
xanes.md
|
|
||||||
electrochemistry.md
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
# XANES
|
|
||||||
|
|
@ -1,130 +0,0 @@
|
||||||
# XRD
|
|
||||||
|
|
||||||
This module contains functions to view diffractogram data from several different sources. The Some features include:
|
|
||||||
|
|
||||||
- Allows the user to plot the data in wavelength independent parameters (d, 1/d, q, q{math}`^2`, q{math}`^4`), or translated to CuK{math}`\alpha` or MoK{math}`\alpha` allowing comparison between diffractograms obtained with different wavelengths
|
|
||||||
- Plotting in interactive mode within Jupyter Notebook using the `ipywidgets`-package allowing real-time change of (certain) parameters
|
|
||||||
- Plotting reflection ticks and/or reflection indices from multiple simulated reflection tables (generated by VESTA) for comparison
|
|
||||||
- Plotting series of diffractograms in stacked mode (including ability to rotate the view for a 3D-view) or as a heatmap
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## 1 Compatible file formats
|
|
||||||
|
|
||||||
The module is partially built as a wrapper around [pyFAI](https://github.com/silx-kit/pyFAI) (Fast Azimuthal Integrator) developed at the ESRF for integrating 2D diffractograms from the detectors they have. Given a suitable calibration file (`.poni`), the XRD-module will automatically integrate any file pyFAI can integrate. Upon running in interactive mode, the integration is only done once, but it is advised to perform integration of many diffractograms in a separate processing step and saving the results as `.xy`-files, as the integration will run again each time the function is called.
|
|
||||||
|
|
||||||
In addition to this, it can also read the `.brml`-files produced by Bruker-instruments in the RECX-lab at the University of Oslo.
|
|
||||||
|
|
||||||
## 2 Basic usage
|
|
||||||
|
|
||||||
Plotting diffractograms is done by calling the `xrd.plot.plot_diffractogram()`-function, which takes two dictionaries as arguments: `data`, containing all data specific information and `options` which allows customisation of a range of different parameters. The `options`-argument is optional, and the function will contains a bunch of default values to make an as good plot as possible to begin with.
|
|
||||||
|
|
||||||
**Example #1: Single diffractogram**
|
|
||||||
|
|
||||||
```py
|
|
||||||
import nafuma.xrd as xrd
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'path': 'path/to/data/diffractogram.brml'
|
|
||||||
}
|
|
||||||
|
|
||||||
options = {
|
|
||||||
'reflections_data': [
|
|
||||||
{'path': 'reflections_phase_1.txt', 'min_alpha': 0.1, 'reflection_indices': 4, 'label': 'Phase 1', 'text_colour': 'black'},
|
|
||||||
{'path': 'reflections_phase_2.txt', 'min_alpha': 0.1, 'reflections_indices': 4, 'label': 'Phase 2', 'text_colour': 'red'}
|
|
||||||
],
|
|
||||||
'hide_y_ticklabels': True,
|
|
||||||
'hide_y_ticks': True
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
diff, fig, ax = xrd.plot.plot_diffractogram(data=data, options=options)
|
|
||||||
```
|
|
||||||
|
|
||||||
The return value `diff` is a list containing one `pandas.DataFrame` per diffractogram passed, in the above example only one. `fig` and `ax` are `matplotlib.pyplot.Figure`- and `matplotlib.pyplot.Axes`-objects, respectively.
|
|
||||||
|
|
||||||
**Example #2: 2D diffractogram from ESRF requiring integration**
|
|
||||||
|
|
||||||
```py
|
|
||||||
import nafuma.xrd as xrd
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'path': 'path/to/data/2d_diffractogram.edf',
|
|
||||||
'calibrant': 'path/to/calibrant/calibrant.poni',
|
|
||||||
'nbins': 3000
|
|
||||||
}
|
|
||||||
|
|
||||||
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
|
|
||||||
```
|
|
||||||
|
|
||||||
In this case we did not specify any options and will thus only use default values, and we stored both `fig` and `ax` in the variable `_` as we do not intend to use these.
|
|
||||||
|
|
||||||
**Example #3: Plotting with interactive mode**
|
|
||||||
|
|
||||||
This will can be done within a Jupyter Notebook, and will allow the user to tweak certain parameters real-time instead of having to recall the function every time.
|
|
||||||
|
|
||||||
```py
|
|
||||||
import nafuma.xrd as xrd
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'path': 'path/to/data/diffractogram.brml'
|
|
||||||
}
|
|
||||||
|
|
||||||
options = {
|
|
||||||
'interactive': True
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Example #4: Plotting multiple diffractograms as stacked plots**
|
|
||||||
|
|
||||||
Instead of passing just a string, you can pass a lsit of filenames. This will be plotted sequentially, with offsets, if desired (`offset_x` and `offset_y`). Default values of `offset_y` is 1 if less than 10 diffractograms have been passed, and 0.1 if more than 10 diffractograms are passed. When plotting series data (e.g. from *in situ* or *operando* measurements), a smaller offset is suitable. Keep in mind that these values only makes sense when the diffractograms are normalised (`'normalise': True`) - if not, the default offsets will be way too small to be noticeable.
|
|
||||||
|
|
||||||
```py
|
|
||||||
import nafuma.xrd as xrd
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'path': ['path/to/data/diffractogram_1.brml', 'path/to/data/diffractogram_2.brml']
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = {
|
|
||||||
'offset_y': 0.1,
|
|
||||||
'offset_x': 0.05,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
**Example #5: Plotting series data as heatmap**
|
|
||||||
|
|
||||||
This differs very little from above, except that heatmaps are probably nonesense if not used on series data, and that you don't want offset in heatmaps.
|
|
||||||
|
|
||||||
```py
|
|
||||||
import nafuma.xrd as xrd
|
|
||||||
|
|
||||||
list_of_data = ['data_1.brml', 'data_2.brml'. ...., 'data_n.brml']
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'path': lists_of_data
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = {
|
|
||||||
'heatmap': True
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
diff, _ = xrd.plot.plot_diffractogram(data=data, options=options)
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
name: nafuma
|
|
||||||
channels:
|
|
||||||
- diffpy
|
|
||||||
- defaults
|
|
||||||
- conda-forge
|
|
||||||
dependencies:
|
|
||||||
- ipywidgets
|
|
||||||
- seaborn
|
|
||||||
- sympy
|
|
||||||
- matplotlib
|
|
||||||
- pytest
|
|
||||||
- numpy
|
|
||||||
- pandas
|
|
||||||
- palettable
|
|
||||||
- pyfai
|
|
||||||
prefix: C:\Users\rasmusvt\Anaconda3\envs\nafuma
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
|
|
||||||
|
|
@ -1,188 +0,0 @@
|
||||||
import json
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
import time
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
def update_options(options, default_options, required_options=None):
|
|
||||||
''' Takes a dictionary of options along with a list of required options and dictionary of default options, and sets all keyval-pairs of options that is not already defined to the default values'''
|
|
||||||
#FIXME This has been updated so that required_options is not needed. But lots of scripts still passes required_options, so for now it is still accepted, but has a default value and remains unused. Needs to go through all scripts to stop passing of this variable to remove it.
|
|
||||||
|
|
||||||
|
|
||||||
for option in default_options.keys():
|
|
||||||
if option not in options.keys():
|
|
||||||
options[option] = default_options[option]
|
|
||||||
|
|
||||||
|
|
||||||
return options
|
|
||||||
|
|
||||||
def save_options(options, path, ignore=None):
|
|
||||||
''' Saves any options dictionary to a JSON-file in the specified path'''
|
|
||||||
|
|
||||||
options_copy = options.copy()
|
|
||||||
|
|
||||||
if ignore:
|
|
||||||
if not isinstance(ignore, list):
|
|
||||||
ignore = [ignore]
|
|
||||||
|
|
||||||
for i in ignore:
|
|
||||||
options_copy[i] = 'Removed'
|
|
||||||
|
|
||||||
|
|
||||||
if not os.path.isdir(os.path.dirname(path)):
|
|
||||||
if os.path.dirname(path):
|
|
||||||
os.makedirs(os.path.dirname(path))
|
|
||||||
|
|
||||||
|
|
||||||
with open(path, 'w') as f:
|
|
||||||
json.dump(options_copy, f, skipkeys=True, indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
def load_options(path):
|
|
||||||
''' Loads JSON-file into a dictionary'''
|
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
options = json.load(f)
|
|
||||||
|
|
||||||
return(options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def ceil(a, roundto=1):
|
|
||||||
|
|
||||||
fac = 1/roundto
|
|
||||||
|
|
||||||
a = np.ceil(a*fac) / fac
|
|
||||||
|
|
||||||
return a
|
|
||||||
|
|
||||||
def floor(a, roundto=1):
|
|
||||||
|
|
||||||
fac = 1/roundto
|
|
||||||
|
|
||||||
a = np.floor(a*fac) / fac
|
|
||||||
|
|
||||||
return a
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def write_log(message, options={}):
|
|
||||||
|
|
||||||
|
|
||||||
required_options = ['logfile']
|
|
||||||
default_options = {
|
|
||||||
'logfile': f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S.log")}'
|
|
||||||
}
|
|
||||||
|
|
||||||
options = update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
if not os.path.isdir(os.path.dirname(options['logfile'])):
|
|
||||||
os.makedirs(os.path.dirname(options['logfile']))
|
|
||||||
|
|
||||||
|
|
||||||
now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
|
|
||||||
message = f'[{now}] {message} \n'
|
|
||||||
|
|
||||||
|
|
||||||
with open(options['logfile'], 'a') as f:
|
|
||||||
f.write(message)
|
|
||||||
|
|
||||||
|
|
||||||
#Function that "collects" all the files in a folder, only accepting .dat-files from xanes-measurements
|
|
||||||
def get_filenames(path, ext, filter=''):
|
|
||||||
''' Collects all filenames from specified path with a specificed extension
|
|
||||||
|
|
||||||
Input:
|
|
||||||
path: path to find all filenames (relative or absolute)
|
|
||||||
ext: extension (including ".")'''
|
|
||||||
|
|
||||||
filenames = [os.path.join(path, filename) for filename in os.listdir(path) if os.path.isfile(os.path.join(path, filename)) and filename.endswith(ext) and filter in filename]
|
|
||||||
|
|
||||||
return filenames
|
|
||||||
|
|
||||||
def move_list_element_last(filenames,string):
|
|
||||||
for i,file in enumerate(filenames):
|
|
||||||
if string in file:
|
|
||||||
del filenames[i]
|
|
||||||
filenames.append(file)
|
|
||||||
return filenames
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def backup_file(filename, backup_dir):
|
|
||||||
# Creates backup-folder if it does not exist
|
|
||||||
if not os.path.isdir(backup_dir):
|
|
||||||
os.makedirs(backup_dir)
|
|
||||||
|
|
||||||
|
|
||||||
# Get a list of all previous backup files with the same basename as well as the creation time for the
|
|
||||||
prev_backup_files = [file for file in os.listdir(backup_dir) if os.path.basename(filename.split('.')[0]) in file]
|
|
||||||
creation_time = datetime.strptime(time.ctime(os.path.getmtime(filename)), '%a %b %d %H:%M:%S %Y').strftime("%Y-%m-%d_%H-%M-%S")
|
|
||||||
ext = '.' + filename.split('.')[-1]
|
|
||||||
|
|
||||||
dst_basename = creation_time + '_' + filename.split('.')[0] + '_' + f'{len(prev_backup_files)}'.zfill(4) + ext
|
|
||||||
dst = os.path.join(backup_dir, dst_basename)
|
|
||||||
|
|
||||||
|
|
||||||
shutil.copy(filename, dst)
|
|
||||||
|
|
||||||
|
|
||||||
def get_unique(full_list):
|
|
||||||
|
|
||||||
unique_list = []
|
|
||||||
|
|
||||||
for entry in full_list:
|
|
||||||
if not entry in unique_list:
|
|
||||||
unique_list.append(entry)
|
|
||||||
|
|
||||||
return unique_list
|
|
||||||
|
|
||||||
|
|
||||||
def swap_values(options: dict, key1, key2):
|
|
||||||
|
|
||||||
if not isinstance(key1,list):
|
|
||||||
key1 = [key1]
|
|
||||||
if not isinstance(key2,list):
|
|
||||||
key2 = [key2]
|
|
||||||
|
|
||||||
assert len(key1) == len(key2)
|
|
||||||
|
|
||||||
for k1, k2 in zip(key1, key2):
|
|
||||||
options[k1], options[k2] = options[k2], options[k1]
|
|
||||||
|
|
||||||
|
|
||||||
return options
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def find_neighbours(value, df, colname, start=0, end=-1):
|
|
||||||
''' Finds closest match to a given value in colname of df. If there is an exact match, returns index of this value. Else, it returns the nearest neighbors (upper and lower)'''
|
|
||||||
|
|
||||||
df = df.iloc[start:end]
|
|
||||||
|
|
||||||
exactmatch = df[df[colname] == value]
|
|
||||||
if not exactmatch.empty:
|
|
||||||
return exactmatch.index.values[0]
|
|
||||||
else:
|
|
||||||
lower_df = df[df[colname] < value][colname]
|
|
||||||
upper_df = df[df[colname] > value][colname]
|
|
||||||
|
|
||||||
|
|
||||||
if not lower_df.empty:
|
|
||||||
lowerneighbour_ind = lower_df.idxmax()
|
|
||||||
else:
|
|
||||||
lowerneighbour_ind = np.nan
|
|
||||||
|
|
||||||
if not upper_df.empty:
|
|
||||||
upperneighbour_ind = upper_df.idxmin()
|
|
||||||
else:
|
|
||||||
upperneighbour_ind = np.nan
|
|
||||||
|
|
||||||
return [lowerneighbour_ind, upperneighbour_ind]
|
|
||||||
|
|
||||||
|
|
||||||
def isnan(value):
|
|
||||||
|
|
||||||
return value!=value
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import electrons, io, structure, phonons
|
|
||||||
File diff suppressed because it is too large
Load diff
1021
nafuma/dft/io.py
1021
nafuma/dft/io.py
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,935 +0,0 @@
|
||||||
import math
|
|
||||||
import re
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
from scipy.optimize import curve_fit
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
|
|
||||||
import importlib
|
|
||||||
import matplotlib.patches as mpatches
|
|
||||||
import matplotlib.lines as mlines
|
|
||||||
from mpl_toolkits.axisartist.axislines import Subplot
|
|
||||||
from cycler import cycler
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
from ase import Atoms
|
|
||||||
from ase.io.trajectory import Trajectory
|
|
||||||
from ase import io
|
|
||||||
from ase.units import kJ
|
|
||||||
from ase.eos import EquationOfState
|
|
||||||
import os
|
|
||||||
import os.path
|
|
||||||
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
import nafuma.plotting as btp
|
|
||||||
|
|
||||||
|
|
||||||
def read_eos_data(path, options):
|
|
||||||
''' Reads volume and energy data from a energy-volume run and fits the data to an equation of state. Outputs a list with one pandas DataFrame containing the data points from the DFT-calculations,
|
|
||||||
one DataFrame containing the fitted curve data points and one dictionary with equilibrium volume, equilibrium energy and bulk modulus in GPa
|
|
||||||
|
|
||||||
path: Path to the folder containing the energ.dat and POSCAR files. energ.dat must have two columns with volumes in the first, energy in the second separated by whitespace.
|
|
||||||
atoms_per_fu: Number of atoms per formula unit. Used to scale the values to be comparable with other calculations that may have a different sized unit cell.
|
|
||||||
eos: Type of equation of state to fit to. Same keywords as the ones used in ASE, as it simply calls ASE to fit the equation of state.
|
|
||||||
'''
|
|
||||||
|
|
||||||
required_options = ['atoms_per_fu', 'reference', 'eos']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'atoms_per_fu': -1, # Scaling factor to output energy per f.u.
|
|
||||||
'reference': 0, # Whether the energy should be relative to some reference energy (typically lowest energy)
|
|
||||||
'eos': 'birchmurnaghan', # what type of EoS curve to fit the data to. Options: murnaghan, birch, birchmurnaghan, vinet, pouriertarantola
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
# Make paths for the energ.dat and POSCAR files.
|
|
||||||
energ_path = os.path.join(path, 'energ.dat')
|
|
||||||
poscar_path = os.path.join(path, 'POSCAR')
|
|
||||||
|
|
||||||
# Read POSCAR and calculate the scale factor to give values per formula unit
|
|
||||||
at = io.read(poscar_path)
|
|
||||||
|
|
||||||
if options['atoms_per_fu'] == -1:
|
|
||||||
scale_factor = 1
|
|
||||||
else:
|
|
||||||
scale_factor = options['atoms_per_fu'] / len(at)
|
|
||||||
|
|
||||||
# Get the label
|
|
||||||
label = os.path.basename(path)
|
|
||||||
|
|
||||||
# Reads the energ.dat file and structures the data into a pandas DataFrame. Then scales the values according to the scale factor.
|
|
||||||
dft_df = pd.read_csv(energ_path, delim_whitespace=True, header=None)
|
|
||||||
dft_df.columns = ['Configuration', 'Volume', 'Energy']
|
|
||||||
dft_df['Energy'] = dft_df['Energy'] * scale_factor
|
|
||||||
dft_df['Volume'] = dft_df['Volume'] * scale_factor
|
|
||||||
|
|
||||||
|
|
||||||
dft_df["Energy"] = dft_df["Energy"] - options['reference'] # subtracts a reference energy if provided. THis value defaults to 0, so will not do anything if not provided.
|
|
||||||
|
|
||||||
# Fit data to Equation of State using ASEs EquationOfState object. Makes a DataFrame out of the data points of the fitted curve. Also makes a ditionary of the equilibrium constants,
|
|
||||||
#then packages everything in a list which is returned by the function.
|
|
||||||
eos = EquationOfState(dft_df['Volume'].values, dft_df['Energy'].values, eos=options['eos'])
|
|
||||||
|
|
||||||
try:
|
|
||||||
v0, e0, B = eos.fit()
|
|
||||||
eos_df = pd.DataFrame(data={'Volume': eos.getplotdata()[4], 'Energy': eos.getplotdata()[5]})
|
|
||||||
|
|
||||||
equilibrium_constants = {'v0': v0, 'e0': e0,'B': B/kJ * 1.0e24}
|
|
||||||
|
|
||||||
data = [dft_df, eos_df, equilibrium_constants, label]
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
except:
|
|
||||||
warnings.warn(f'WARNING: Unable to fit EoS curve for {label}')
|
|
||||||
|
|
||||||
return [None, None, None, label]
|
|
||||||
|
|
||||||
|
|
||||||
def read_eos_datas(path, options):
|
|
||||||
|
|
||||||
|
|
||||||
required_options = ['subset', 'sort_by']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'subset': None, # list with directory names of what you want to include
|
|
||||||
'sort_by': 'e0', # whether the data should be sorted or not - relevant for bar plots, but also for the order of the entries in the legend in the EoScruve plot
|
|
||||||
}
|
|
||||||
|
|
||||||
options = update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
# If a subset of directories is not specified, will create a list of all directories in the path given.
|
|
||||||
if not options['subset']:
|
|
||||||
dirs = [dir for dir in os.listdir(path) if os.path.isdir(os.path.join(path, dir)) and dir[0] != '.']
|
|
||||||
else:
|
|
||||||
dirs = options['subset']
|
|
||||||
|
|
||||||
|
|
||||||
datas = []
|
|
||||||
|
|
||||||
|
|
||||||
# Loop through all subdirectories and reads the data from these. Also appends the name of the directory to the list that is returned from the plot_eos_data() function
|
|
||||||
for dir in dirs:
|
|
||||||
subdir = os.path.join(path, dir)
|
|
||||||
data = read_eos_data(subdir, options)
|
|
||||||
|
|
||||||
if isinstance(data[0], pd.DataFrame):
|
|
||||||
datas.append(data)
|
|
||||||
|
|
||||||
|
|
||||||
# Sorts the data if sort is enabled.
|
|
||||||
if options['sort_by']:
|
|
||||||
datas = sort_data(datas, options['sort_by'])
|
|
||||||
|
|
||||||
|
|
||||||
return datas
|
|
||||||
|
|
||||||
|
|
||||||
def get_summarised_data(path, options):
|
|
||||||
|
|
||||||
datas = read_eos_datas(path=path, options=options)
|
|
||||||
|
|
||||||
summary = []
|
|
||||||
for data in datas:
|
|
||||||
summary.append([data[3], data[2]['e0'], data[2]['v0'], data[2]['B']])
|
|
||||||
|
|
||||||
df = pd.DataFrame(summary)
|
|
||||||
df.columns = ['Label', 'E0', 'V0', 'B']
|
|
||||||
|
|
||||||
emin = df["E0"].min()
|
|
||||||
|
|
||||||
df["dE0"] = df["E0"] - emin
|
|
||||||
|
|
||||||
# Rearranging the columns
|
|
||||||
df = df[['Label', 'E0', 'dE0', 'V0', 'B']]
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def plot_eos_data(path, options):
|
|
||||||
''' Plots the data from the energy-volume curve runs. Allows plotting of just the energy-volume curves, a bar plot showing the equilibrium energies or both.
|
|
||||||
|
|
||||||
path: path to where the data is located. It should point to a directory with subdirectories for each structure to be plotted. Inside each of these subdirectories there should be an energ.dat and a POSCAR file.
|
|
||||||
atoms_per_fu: Number of atoms per formula unit. Used to scale the values to be comparable with other calculations that may have a different sized unit cell.
|
|
||||||
dirs: List of directory names if only a subset of all available datasets is to be plotted. Defaults to None, and will thus get data from all subdirectories.
|
|
||||||
eos: Type of equation of state to fit to. Same keywords as the ones used in ASE, as it simply calls ASE to fit the equation of state.
|
|
||||||
width: Width of the total figure. Defaults to None, which will again default to width=20.
|
|
||||||
height: Height of the total figure. Defaults to None, which will again will default to height= width / phi where phi is the golden ratio.
|
|
||||||
dpi: Dots per inch of the figure. Defaults to pyplot's default
|
|
||||||
colour_cycles: List of tuples with sets of colours for the palettable colour collection. Defaults to two sets of in total 20 colours. Used for giving different colours to energy-volume curves.
|
|
||||||
energyunit: The energy unit. Defaults to eV per formula unit. Only used on the axis labels.
|
|
||||||
volumeunit: The volume unit. Defaults to Å^3. Only used on the axis labels.
|
|
||||||
xlim: Limits of the x-axes. List of min and max. If mode = both is used, has to contain two lists for each of the plots. As the x-limits for a bar plot is nonesense, should just contain a list with a NoneType.
|
|
||||||
ylim: Limits of the y-axes. List of min and max. If mode = both is used, has to contain two lists for each of the plots.
|
|
||||||
sort: Whether or not to sort the data from lowest to highest equilibrium energy. Defaults to True.
|
|
||||||
sort_by: What to sort by if sort is enabled. Defaults to e0. Other options: v0 = equilibrium volumes, B = bulk moduli. Alphabetical order sorting is not implemented.
|
|
||||||
mode: Determines what to plot. Defaults to energy-volume curves ('curves'). Other options: 'bars', bar-plot of equilibrium energies. 'both', both energy-volume curves and bar plots are plotted side-by-side.
|
|
||||||
highlight: Takes a list, either of booleans to highlight certain bars (must be the same length as the number of data sets). Alternatively can contain only names of the datasets to highlight. Defaults to None.'''
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME A lot of refactoring required to tidy this up
|
|
||||||
|
|
||||||
required_options = ['plot_kind', 'highlight',
|
|
||||||
'reference',
|
|
||||||
'eos', 'sort_by',
|
|
||||||
'colours',
|
|
||||||
'xlabel', 'ylabel',
|
|
||||||
'xunit', 'yunit',
|
|
||||||
'palettes',
|
|
||||||
'markers',
|
|
||||||
'ylim',
|
|
||||||
'legend_map',
|
|
||||||
'rc_params',
|
|
||||||
'legend']
|
|
||||||
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'plot_kind': 'EoScurve', # EoScurve or EoSbars
|
|
||||||
'highlight': None, # list with directory names (or Boolean array) of which bars to highlight. Only relevant to EoSbars
|
|
||||||
'reference': 0, # Whether the energy should be relative to some reference energy (typically lowest energy)
|
|
||||||
'eos': 'birchmurnaghan', # what type of EoS curve to fit the data to. Options: murnaghan, birch, birchmurnaghan, vinet, pouriertarantola
|
|
||||||
'sort_by': 'e0', # whether the data should be sorted or not - relevant for bar plots, but also for the order of the entries in the legend in the EoScruve plot
|
|
||||||
'colours': None,
|
|
||||||
'xlabel': 'Volume', 'ylabel': 'Energy',
|
|
||||||
'xunit': 'Å$^3$', 'yunit': 'eV',
|
|
||||||
'palettes': [('qualitative', 'Dark2_8'), ('qualitative', 'Paired_12')], # a set of two colour cycles from the palettable package. Requires many colours for the EoScurve plot
|
|
||||||
'markers': ('o', '*', '^', 'v', 'd', 'H', '8', '>', 'P', 'X'), # marker styles for the EoScurve plot
|
|
||||||
'ylim': None, # y-limits (ist)
|
|
||||||
'legend': True,
|
|
||||||
'legend_map': None, # a dictionary with mappings between the folder names and what should appear in the legend
|
|
||||||
'rc_params': None # dictionary of run commands to update plot style
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
# Create path to the data
|
|
||||||
datas = read_eos_datas(path=path, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
### PLOT THE ENERGY-VOLUME CURVES
|
|
||||||
if options['plot_kind'] == 'EoScurve':
|
|
||||||
|
|
||||||
# Fetches a figure and axes object from the prepare_plot() function
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
|
|
||||||
# Make an cyclic iterable of markers to be used for the calculated data points.
|
|
||||||
marker_cycle = itertools.cycle(options['markers'])
|
|
||||||
|
|
||||||
|
|
||||||
# Creates a list of all the colours that is passed in the colour_cycles argument. Then makes cyclic iterables of these.
|
|
||||||
colour_collection = []
|
|
||||||
for cycle in options['palettes']:
|
|
||||||
mod = importlib.import_module("palettable.colorbrewer.%s" % cycle[0])
|
|
||||||
colour = getattr(mod, cycle[1]).mpl_colors
|
|
||||||
colour_collection = colour_collection + colour
|
|
||||||
|
|
||||||
colour_cycle = itertools.cycle(colour_collection)
|
|
||||||
|
|
||||||
labels = []
|
|
||||||
colours = []
|
|
||||||
markers = []
|
|
||||||
|
|
||||||
|
|
||||||
# For each of the data sets, extracts the data and plots them.
|
|
||||||
for data in datas:
|
|
||||||
dft_df, eos_df, label = data[0], data[1], data[3]
|
|
||||||
|
|
||||||
|
|
||||||
# If ylim is passed, only plot those that have a minimum energy below the max ylim parameter
|
|
||||||
if options['ylim']:
|
|
||||||
plot = True if dft_df["Energy"].min() < options['ylim'][1] else False
|
|
||||||
else:
|
|
||||||
plot = True
|
|
||||||
|
|
||||||
if plot:
|
|
||||||
if options['label_map']:
|
|
||||||
labels.append(options['label_map'][label])
|
|
||||||
|
|
||||||
colours.append(next(colour_cycle))
|
|
||||||
markers.append(next(marker_cycle))
|
|
||||||
|
|
||||||
dft_df.plot.scatter(x=1, y=2, ax=ax, marker=markers[-1], color=colours[-1], s=20)
|
|
||||||
eos_df.plot(x=0, y=1, ax=ax, color=colours[-1], label='_', ls='--')
|
|
||||||
|
|
||||||
options['labels'] = labels
|
|
||||||
|
|
||||||
if options['legend']:
|
|
||||||
options['legend_content'] = [labels, colours, markers]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### PLOT THE BAR PLOTS
|
|
||||||
elif options['plot_kind'] == 'EoSbars':
|
|
||||||
|
|
||||||
# Fetches a figure and axes object from the prepare_plot() function
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
|
|
||||||
e0 = []
|
|
||||||
labels = []
|
|
||||||
colours = []
|
|
||||||
|
|
||||||
# Pick out colour for highlighting (NB! These colours are not passed as arguments, but could be in future)
|
|
||||||
|
|
||||||
bar_colours = []
|
|
||||||
for cycle in options['palettes']:
|
|
||||||
mod = importlib.import_module("palettable.colorbrewer.%s" % cycle[0])
|
|
||||||
bar_colours.append(getattr(mod, cycle[1]).mpl_colors[0])
|
|
||||||
|
|
||||||
|
|
||||||
# Loops through the datasets, picks out equilibrium volume and labels and sets colours according to the whether the highlight option is used or not.
|
|
||||||
for data in datas:
|
|
||||||
|
|
||||||
if options['ylim']:
|
|
||||||
plot = True if data[2]['e0'] < options['ylim'][1] else False
|
|
||||||
else:
|
|
||||||
plot = True
|
|
||||||
|
|
||||||
if plot:
|
|
||||||
|
|
||||||
# Adds 100 if plotting in relative mode. The bases of the bar plots are sunk by 100 during plotting
|
|
||||||
adjustment = 100 if options['reference'] != 0 else 100
|
|
||||||
print(adjustment)
|
|
||||||
|
|
||||||
e0.append(data[2]['e0']+adjustment)
|
|
||||||
print(e0[-1])
|
|
||||||
labels.append(options['label_map'][data[3]])
|
|
||||||
|
|
||||||
if options['highlight'] is not None:
|
|
||||||
if data[3] in options['highlight']:
|
|
||||||
colours.append(bar_colours[0])
|
|
||||||
else:
|
|
||||||
colours.append(bar_colours[1])
|
|
||||||
|
|
||||||
elif options['highlight'] is not None and type(options['highlight'][0] == str):
|
|
||||||
if labels[-1] in options['highlight']:
|
|
||||||
colours.append(bar_colours[0])
|
|
||||||
else:
|
|
||||||
colours.append(bar_colours[1])
|
|
||||||
|
|
||||||
else:
|
|
||||||
colours.append(bar_colours[0])
|
|
||||||
|
|
||||||
# Makes the bar plot.
|
|
||||||
bottom = -100 if options['reference'] != 0 else 0
|
|
||||||
plt.bar(range(len(e0)), e0, color=colours, bottom=bottom)
|
|
||||||
plt.xticks(range(len(e0)), labels, rotation=90)
|
|
||||||
|
|
||||||
|
|
||||||
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
|
|
||||||
|
|
||||||
return datas, fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def sort_data(datas, sort_by='e0'):
|
|
||||||
''' Bubble sort algorithm to sort the data sets'''
|
|
||||||
|
|
||||||
l = len(datas)
|
|
||||||
|
|
||||||
for i in range(0, l):
|
|
||||||
for j in range(0, l-i-1):
|
|
||||||
if datas[j][2]['{}'.format(sort_by)] > datas[j+1][2]['{}'.format(sort_by)]:
|
|
||||||
temp = datas[j]
|
|
||||||
datas[j] = datas[j+1]
|
|
||||||
datas[j+1] = temp
|
|
||||||
|
|
||||||
return datas
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_plot(options={}):
|
|
||||||
|
|
||||||
# Reset run commands
|
|
||||||
plt.rcdefaults()
|
|
||||||
|
|
||||||
# Update run commands if any is passed
|
|
||||||
if 'rc_params' in options.keys():
|
|
||||||
update_rc_params(options['rc_params'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
required_options = ['single_column_width', 'double_column_width', 'column_type', 'width_ratio', 'aspect_ratio', 'compress_width', 'compress_height', 'upscaling_factor', 'dpi']
|
|
||||||
default_options = {
|
|
||||||
'single_column_width': 8.3,
|
|
||||||
'double_column_width': 17.1,
|
|
||||||
'column_type': 'single',
|
|
||||||
'width_ratio': '1:1',
|
|
||||||
'aspect_ratio': '1:1',
|
|
||||||
'compress_width': 1,
|
|
||||||
'compress_height': 1,
|
|
||||||
'upscaling_factor': 1.0,
|
|
||||||
'dpi': 600}
|
|
||||||
|
|
||||||
options = update_options(options, required_options, default_options)
|
|
||||||
|
|
||||||
width = determine_width(options)
|
|
||||||
height = determine_height(options, width)
|
|
||||||
width, height = scale_figure(options=options, width=width, height=height)
|
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(width, height), dpi=options['dpi'])
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def update_rc_params(rc_params):
|
|
||||||
''' Update all passed run commands in matplotlib'''
|
|
||||||
|
|
||||||
if rc_params:
|
|
||||||
for key in rc_params.keys():
|
|
||||||
plt.rcParams.update({key: rc_params[key]})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def update_options(options, required_options, default_options):
|
|
||||||
''' Update all passed options'''
|
|
||||||
|
|
||||||
|
|
||||||
for option in required_options:
|
|
||||||
if option not in options.keys():
|
|
||||||
options[option] = default_options[option]
|
|
||||||
|
|
||||||
|
|
||||||
return options
|
|
||||||
|
|
||||||
|
|
||||||
def determine_width(options):
|
|
||||||
|
|
||||||
conversion_cm_inch = 0.3937008 # cm to inch
|
|
||||||
|
|
||||||
if options['column_type'] == 'single':
|
|
||||||
column_width = options['single_column_width']
|
|
||||||
elif options['column_type'] == 'double':
|
|
||||||
column_width = options['double_column_width']
|
|
||||||
|
|
||||||
column_width *= conversion_cm_inch
|
|
||||||
|
|
||||||
|
|
||||||
width_ratio = [float(num) for num in options['width_ratio'].split(':')]
|
|
||||||
|
|
||||||
|
|
||||||
width = column_width * width_ratio[0]/width_ratio[1]
|
|
||||||
|
|
||||||
|
|
||||||
return width
|
|
||||||
|
|
||||||
|
|
||||||
def determine_height(options, width):
|
|
||||||
|
|
||||||
aspect_ratio = [float(num) for num in options['aspect_ratio'].split(':')]
|
|
||||||
|
|
||||||
height = width/(aspect_ratio[0] / aspect_ratio[1])
|
|
||||||
|
|
||||||
return height
|
|
||||||
|
|
||||||
def scale_figure(options, width, height):
|
|
||||||
width = width * options['upscaling_factor'] * options['compress_width']
|
|
||||||
height = height * options['upscaling_factor'] * options['compress_height']
|
|
||||||
|
|
||||||
return width, height
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_plot_old(width=None, height=None, dpi=None, energyunit='eV', volumeunit=r'Å$^3$', mode='curves', width_ratio=[1, 1], square=True, pad_bottom=None, scale=1, format_params=None):
|
|
||||||
'''Prepares pyplot figure and axes objects.'''
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
linewidth = 3*scale
|
|
||||||
axeswidth = 3*scale
|
|
||||||
|
|
||||||
plt.rc('lines', linewidth=linewidth)
|
|
||||||
plt.rc('axes', linewidth=axeswidth)
|
|
||||||
|
|
||||||
|
|
||||||
if square:
|
|
||||||
if not width:
|
|
||||||
width = 20
|
|
||||||
|
|
||||||
height = width
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
if not width:
|
|
||||||
width = 20
|
|
||||||
|
|
||||||
|
|
||||||
if not height:
|
|
||||||
golden_ratio = (math.sqrt(5) - 1) / 2
|
|
||||||
height = width*golden_ratio
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if mode == 'curves':
|
|
||||||
|
|
||||||
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(width, height), facecolor='w', dpi=dpi)
|
|
||||||
|
|
||||||
|
|
||||||
if mode == 'bars':
|
|
||||||
|
|
||||||
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(width, height), facecolor='w', dpi=dpi)
|
|
||||||
|
|
||||||
|
|
||||||
if mode == 'both':
|
|
||||||
|
|
||||||
fig, ax = plt.subplots(1, 2, figsize=(width, height), gridspec_kw={'width_ratios': width_ratio})
|
|
||||||
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
def prettify_plot(fig, ax, options):
|
|
||||||
'''Prepares pyplot figure and axes objects.'''
|
|
||||||
|
|
||||||
required_options = ['plot_kind', 'hide_x_labels', 'hide_y_labels', 'xunit', 'yunit', 'legend_content', 'legend_position', 'x_tick_locators', 'y_tick_locators', 'tick_directions', 'subplots_adjust', 'xlim', 'ylim']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'plot_kind': 'EoScurve', # EoScurve or EoSbars
|
|
||||||
'hide_x_labels': False, # Whether x labels should be hidden
|
|
||||||
'hide_y_labels': False, # whether y labels should be hidden
|
|
||||||
'xunit': r'Å$^3$', # The unit of the x-values in the curve plot
|
|
||||||
'yunit': r'eV f.u.$^{-1}$', # The unit of the y-values in the curve and bar plots
|
|
||||||
'xlim': None,
|
|
||||||
'ylim': None,
|
|
||||||
'legend_content': None,
|
|
||||||
'legend_position': ['upper center', (1.10, 0.90)], # the position of the legend passed as arguments to loc and bbox_to_anchor respectively
|
|
||||||
'x_tick_locators': [10, 5], # Major and minor tick locators
|
|
||||||
'y_tick_locators': [.1, .05], # Major and minor tick locators
|
|
||||||
'tick_directions': 'in', # in or out
|
|
||||||
'subplots_adjust': [0.1, 0.1, 0.9, 0.9]
|
|
||||||
}
|
|
||||||
|
|
||||||
options = update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
if options['plot_kind'] == 'EoScurve':
|
|
||||||
|
|
||||||
# Set labels on x- and y-axes
|
|
||||||
ax.set_xlabel('Volume [{}]'.format(options['xunit']))
|
|
||||||
|
|
||||||
if not options['hide_y_labels']:
|
|
||||||
ax.set_ylabel('Energy [{}]'.format(options['yunit']))
|
|
||||||
else:
|
|
||||||
ax.set_ylabel('')
|
|
||||||
ax.tick_params(labelleft=False)
|
|
||||||
|
|
||||||
|
|
||||||
ax.xaxis.set_major_locator(MultipleLocator(options['x_tick_locators'][0]))
|
|
||||||
ax.xaxis.set_minor_locator(MultipleLocator(options['x_tick_locators'][1]))
|
|
||||||
|
|
||||||
ax.yaxis.set_major_locator(MultipleLocator(options['y_tick_locators'][0]))
|
|
||||||
ax.yaxis.set_minor_locator(MultipleLocator(options['y_tick_locators'][1]))
|
|
||||||
|
|
||||||
if ax.get_legend():
|
|
||||||
ax.get_legend().remove()
|
|
||||||
|
|
||||||
|
|
||||||
if options['legend']:
|
|
||||||
labels = options['legend_content'][0]
|
|
||||||
colours = options['legend_content'][1]
|
|
||||||
markers = options['legend_content'][2]
|
|
||||||
|
|
||||||
entries = []
|
|
||||||
|
|
||||||
for i in range(len(options['legend_content'][0])):
|
|
||||||
entries.append(mlines.Line2D([], [], label=labels[i], color=colours[i], marker=markers[i], linestyle='None'))
|
|
||||||
|
|
||||||
|
|
||||||
fig.legend(handles=entries, loc=options['legend_position'][0], bbox_to_anchor=options['legend_position'][1], frameon=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['plot_kind'] == 'EoSbars':
|
|
||||||
|
|
||||||
if not options['hide_y_labels']:
|
|
||||||
ax.set_ylabel('Energy [{}]'.format(options['yunit']))
|
|
||||||
|
|
||||||
ax.yaxis.set_major_locator(MultipleLocator(options['y_tick_locators'][0]))
|
|
||||||
ax.yaxis.set_minor_locator(MultipleLocator(options['y_tick_locators'][1]))
|
|
||||||
|
|
||||||
ax.tick_params(axis='x', which='minor', bottom=False, top=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Adjust where the axes start within the figure. Default value is 10% in from the left and bottom edges. Used to make room for the plot within the figure size (to avoid using bbox_inches='tight' in the savefig-command, as this screws with plot dimensions)
|
|
||||||
plt.subplots_adjust(left=options['subplots_adjust'][0], bottom=options['subplots_adjust'][1], right=options['subplots_adjust'][2], top=options['subplots_adjust'][3])
|
|
||||||
|
|
||||||
|
|
||||||
# If limits for x- and y-axes is passed, sets these.
|
|
||||||
if options['xlim'] is not None:
|
|
||||||
ax.set_xlim(options['xlim'])
|
|
||||||
|
|
||||||
if options['ylim'] is not None:
|
|
||||||
ax.set_ylim(options['ylim'])
|
|
||||||
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def prettify_plot_old(fig, ax, energyunit='eV', volumeunit=r'Å$^3$', mode='curves', legend_content=None, pad_bottom=None, scale=1, hide_ylabels=False, xpad=None, ypad=None):
|
|
||||||
'''Prepares pyplot figure and axes objects.'''
|
|
||||||
|
|
||||||
# Set sizes of ticks, labes etc.
|
|
||||||
ticksize = 30*scale
|
|
||||||
labelsize = 30*scale
|
|
||||||
legendsize = 15*scale
|
|
||||||
titlesize = 30*scale
|
|
||||||
|
|
||||||
linewidth = 3*scale
|
|
||||||
axeswidth = 3*scale
|
|
||||||
markersize = 15*scale
|
|
||||||
majorticklength = 20*scale
|
|
||||||
minorticklength = 10*scale
|
|
||||||
|
|
||||||
xpad = 4 if not xpad else xpad
|
|
||||||
ypad = 4 if not ypad else ypad
|
|
||||||
|
|
||||||
|
|
||||||
if mode == 'curves':
|
|
||||||
|
|
||||||
# Set labels on x- and y-axes
|
|
||||||
ax.set_xlabel('Volume [{}]'.format(volumeunit), size=labelsize, labelpad=xpad)
|
|
||||||
|
|
||||||
if not hide_ylabels:
|
|
||||||
ax.set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, labelpad=ypad)
|
|
||||||
else:
|
|
||||||
ax.set_ylabel('')
|
|
||||||
|
|
||||||
# Set tick parameters
|
|
||||||
ax.tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, right=True, top=True, labelsize=ticksize)
|
|
||||||
ax.tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, right=True, top=True, labelsize=ticksize)
|
|
||||||
|
|
||||||
ax.tick_params(axis='x', pad=xpad)
|
|
||||||
ax.tick_params(axis='y', pad=ypad)
|
|
||||||
|
|
||||||
if hide_ylabels:
|
|
||||||
ax.tick_params(labelleft=False)
|
|
||||||
|
|
||||||
plt.xticks(fontsize=ticksize)
|
|
||||||
plt.yticks(fontsize=ticksize)
|
|
||||||
|
|
||||||
|
|
||||||
ax.xaxis.set_major_locator(MultipleLocator(10))
|
|
||||||
ax.xaxis.set_minor_locator(MultipleLocator(5))
|
|
||||||
|
|
||||||
ax.yaxis.set_major_locator(MultipleLocator(.1))
|
|
||||||
ax.yaxis.set_minor_locator(MultipleLocator(.05))
|
|
||||||
|
|
||||||
|
|
||||||
ax.get_legend().remove()
|
|
||||||
if legend_content:
|
|
||||||
patches = []
|
|
||||||
labels = legend_content[0]
|
|
||||||
colours = legend_content[1]
|
|
||||||
markers = legend_content[2]
|
|
||||||
|
|
||||||
entries = []
|
|
||||||
|
|
||||||
for ind, label in enumerate(legend_content[0]):
|
|
||||||
entries.append(mlines.Line2D([], [], color=colours[ind], marker=markers[ind], linestyle='None',
|
|
||||||
markersize=markersize, label=labels[ind]))
|
|
||||||
|
|
||||||
#patches.append(mpatches.Patch(color=colours[ind], label=labels[ind]))
|
|
||||||
|
|
||||||
|
|
||||||
fig.legend(handles=entries, loc='upper center', bbox_to_anchor=(1.10, 0.90), fontsize=legendsize, frameon=False)
|
|
||||||
|
|
||||||
if pad_bottom is not None:
|
|
||||||
bigax = fig.add_subplot(111)
|
|
||||||
bigax.set_facecolor([1,1,1,0])
|
|
||||||
bigax.spines['top'].set_visible(False)
|
|
||||||
bigax.spines['bottom'].set_visible(True)
|
|
||||||
bigax.spines['left'].set_visible(False)
|
|
||||||
bigax.spines['right'].set_visible(False)
|
|
||||||
bigax.tick_params(labelcolor='w', color='w', direction='in', top=False, bottom=True, left=False, right=False, labelleft=False, pad=pad_bottom)
|
|
||||||
|
|
||||||
if mode == 'bars':
|
|
||||||
|
|
||||||
|
|
||||||
ax.tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, right=True, top=True)
|
|
||||||
ax.tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, right=True, top=True)
|
|
||||||
|
|
||||||
if not hide_ylabels:
|
|
||||||
ax.set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, labelpad=ypad)
|
|
||||||
|
|
||||||
ax.yaxis.set_major_locator(MultipleLocator(.1))
|
|
||||||
ax.yaxis.set_minor_locator(MultipleLocator(.05))
|
|
||||||
|
|
||||||
ax.tick_params(axis='x', pad=xpad)
|
|
||||||
ax.tick_params(axis='y', pad=ypad)
|
|
||||||
|
|
||||||
plt.xticks(fontsize=ticksize)
|
|
||||||
plt.yticks(fontsize=ticksize)
|
|
||||||
|
|
||||||
if pad_bottom is not None:
|
|
||||||
bigax = fig.add_subplot(111)
|
|
||||||
bigax.set_facecolor([1,1,1,0])
|
|
||||||
bigax.spines['top'].set_visible(False)
|
|
||||||
bigax.spines['bottom'].set_visible(True)
|
|
||||||
bigax.spines['left'].set_visible(False)
|
|
||||||
bigax.spines['right'].set_visible(False)
|
|
||||||
bigax.tick_params(labelcolor='w', color='w', direction='in', top=False, bottom=True, left=False, right=False, labelleft=False, pad=pad_bottom)
|
|
||||||
|
|
||||||
if mode == 'both':
|
|
||||||
|
|
||||||
# Set labels on x- and y-axes
|
|
||||||
ax[0].set_xlabel('Volume [{}]'.format(volumeunit), size=labelsize, labelpad=xpad)
|
|
||||||
ax[0].set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, labelpad=ypad)
|
|
||||||
|
|
||||||
# Set tick parameters
|
|
||||||
ax[0].tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, right=True, left=True, top=True, labelsize=ticksize)
|
|
||||||
ax[0].tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, right=True, left=True, top=True, labelsize=ticksize)
|
|
||||||
|
|
||||||
ax[0].tick_params(axis='x', pad=xpad)
|
|
||||||
ax[0].tick_params(axis='y', pad=ypad)
|
|
||||||
|
|
||||||
ax[0].xaxis.set_major_locator(MultipleLocator(10))
|
|
||||||
ax[0].xaxis.set_minor_locator(MultipleLocator(5))
|
|
||||||
|
|
||||||
ax[0].yaxis.set_major_locator(MultipleLocator(.1))
|
|
||||||
ax[0].yaxis.set_minor_locator(MultipleLocator(.05))
|
|
||||||
|
|
||||||
plt.xticks(fontsize=ticksize)
|
|
||||||
plt.yticks(fontsize=ticksize)
|
|
||||||
|
|
||||||
|
|
||||||
ax[1].yaxis.set_major_locator(MultipleLocator(.2))
|
|
||||||
ax[1].yaxis.set_minor_locator(MultipleLocator(.1))
|
|
||||||
ax[1].yaxis.set_label_position('right')
|
|
||||||
ax[1].yaxis.tick_right()
|
|
||||||
ax[1].set_ylabel('Energy [{}]'.format(energyunit), size=labelsize, ypad=ypad)
|
|
||||||
ax[1].tick_params(axis='both', direction='in', which='major', length=majorticklength, width=axeswidth, left=True, right=True, top=True)
|
|
||||||
ax[1].tick_params(axis='both', direction='in', which='minor', length=minorticklength, width=axeswidth, left=True, right=True, top=True)
|
|
||||||
|
|
||||||
ax[1].tick_params(axis='x', pad=xpad)
|
|
||||||
ax[1].tick_params(axis='y', pad=ypad)
|
|
||||||
|
|
||||||
|
|
||||||
plt.xticks(fontsize=ticksize)
|
|
||||||
plt.yticks(fontsize=ticksize)
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def parabola(V, a, b, c):
|
|
||||||
"""parabola polynomial function
|
|
||||||
|
|
||||||
this function is used to fit the data to get good guesses for
|
|
||||||
the equation of state fits
|
|
||||||
|
|
||||||
a 4th order polynomial fit to get good guesses for
|
|
||||||
was not a good idea because for noisy data the fit is too wiggly
|
|
||||||
2nd order seems to be sufficient, and guarantees a single minimum"""
|
|
||||||
|
|
||||||
|
|
||||||
E = (a * V**2) + (b * V) + c
|
|
||||||
|
|
||||||
return E
|
|
||||||
|
|
||||||
|
|
||||||
def murnaghan(V, E0, V0, B0, BP):
|
|
||||||
'From PRB 28,5480 (1983'
|
|
||||||
|
|
||||||
E = E0 + ((B0 * V) / BP) * (((V0 / V)**BP) / (BP - 1) + 1) - ((V0 * B0) / (BP - 1))
|
|
||||||
return E
|
|
||||||
|
|
||||||
|
|
||||||
def birch(V, E0, V0, B0, BP):
|
|
||||||
"""
|
|
||||||
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
|
|
||||||
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos
|
|
||||||
paper downloaded from Web
|
|
||||||
|
|
||||||
case where n=0
|
|
||||||
"""
|
|
||||||
|
|
||||||
E = (E0 +
|
|
||||||
9 / 8 * B0 * V0 * ((V0 / V)**(2 / 3) - 1)**2 +
|
|
||||||
9 / 16 * B0 * V0 * (BP - 4) * ((V0 / V)**(2 / 3) - 1)**3)
|
|
||||||
return E
|
|
||||||
|
|
||||||
|
|
||||||
def birchmurnaghan(V, E0, V0, B0, BP):
|
|
||||||
"""
|
|
||||||
BirchMurnaghan equation from PRB 70, 224107
|
|
||||||
Eq. (3) in the paper. Note that there's a typo in the paper and it uses
|
|
||||||
inversed expression for eta.
|
|
||||||
"""
|
|
||||||
|
|
||||||
eta = (V0 / V)**(1 / 3)
|
|
||||||
|
|
||||||
E = E0 + 9 * B0 * V0 / 16 * (eta**2 - 1)**2 * (6 + BP * (eta**2 - 1) - 4 * eta**2)
|
|
||||||
|
|
||||||
return E
|
|
||||||
|
|
||||||
|
|
||||||
def vinet(V, E0, V0, B0, BP):
|
|
||||||
'Vinet equation from PRB 70, 224107'
|
|
||||||
|
|
||||||
eta = (V / V0)**(1 / 3)
|
|
||||||
|
|
||||||
E = (E0 + 2 * B0 * V0 / (BP - 1)**2 *
|
|
||||||
(2 - (5 + 3 * BP * (eta - 1) - 3 * eta) *
|
|
||||||
np.exp(-3 * (BP - 1) * (eta - 1) / 2)))
|
|
||||||
|
|
||||||
return E
|
|
||||||
|
|
||||||
def pouriertarantola(V, E0, V0, B0, BP):
|
|
||||||
'Pourier-Tarantola equation from PRB 70, 224107'
|
|
||||||
|
|
||||||
eta = (V / V0)**(1 / 3)
|
|
||||||
squiggle = -3 * np.log(eta)
|
|
||||||
|
|
||||||
E = E0 + B0 * V0 * squiggle**2 / 6 * (3 + squiggle * (BP - 2))
|
|
||||||
return E
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_initial_guesses(volume, energy):
|
|
||||||
|
|
||||||
p = np.polyfit(volume, energy, deg=2)
|
|
||||||
|
|
||||||
a, b, c = p[0], p[1], p[2]
|
|
||||||
|
|
||||||
# Estimated from dE/dV = 2aV0 + b => V0 = -b / 2a
|
|
||||||
v0 = -b / (2*a)
|
|
||||||
|
|
||||||
# Estimated by evaluating a parabola with a, b and c values at V = V0
|
|
||||||
e0 = parabola(v0, a, b, c)
|
|
||||||
|
|
||||||
# Estimated form B0 ~ V0 * d^2E / dV^2. d^2E / dV^2 = 2a.
|
|
||||||
b0 = 2 * a * v0
|
|
||||||
|
|
||||||
# Just a reasonable starting value
|
|
||||||
bp = 4
|
|
||||||
|
|
||||||
|
|
||||||
return [e0, v0, b0, bp]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def fit_eos_curve(volume, energy, p0, eos):
|
|
||||||
|
|
||||||
eos_dict = {'murnaghan': murnaghan, 'birch': birch, 'birchmurnaghan': birchmurnaghan, 'vinet': vinet, 'pouriertarantola': pouriertarantola}
|
|
||||||
|
|
||||||
func = eos_dict[eos]
|
|
||||||
|
|
||||||
popt, pcov = curve_fit(func, volume, energy, p0)
|
|
||||||
|
|
||||||
E0, V0, B0, BP = popt[0], popt[1], popt[2], popt[3]
|
|
||||||
|
|
||||||
return [E0, V0, B0, BP]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_plotdata(volume, energy, equilibrium_values, eos):
|
|
||||||
|
|
||||||
eos_dict = {'murnaghan': murnaghan, 'birch': birch, 'birchmurnaghan': birchmurnaghan, 'vinet': vinet, 'pouriertarantola': pouriertarantola}
|
|
||||||
|
|
||||||
V = np.linspace(volume.min(), volume.max(), 100)
|
|
||||||
|
|
||||||
E0, V0, B0, BP = equilibrium_values[0], equilibrium_values[1], equilibrium_values[2], equilibrium_values[3]
|
|
||||||
|
|
||||||
print(E0, V0, B0, BP)
|
|
||||||
|
|
||||||
func = eos_dict[eos]
|
|
||||||
|
|
||||||
print(func)
|
|
||||||
|
|
||||||
E = func(V, E0, V0, B0, BP)
|
|
||||||
|
|
||||||
return E, V
|
|
||||||
|
|
||||||
|
|
||||||
def get_atoms(poscar):
|
|
||||||
|
|
||||||
with open(poscar, 'r') as poscar:
|
|
||||||
lines = poscar.readlines()
|
|
||||||
|
|
||||||
atoms = lines[5].split()
|
|
||||||
atom_num = lines[6].split()
|
|
||||||
|
|
||||||
|
|
||||||
atom_num = [int(num) for num in atom_num]
|
|
||||||
|
|
||||||
atoms_dict = {}
|
|
||||||
|
|
||||||
for ind, atom in enumerate(atoms):
|
|
||||||
atoms_dict[atom] = atom_num[ind]
|
|
||||||
|
|
||||||
return atoms, atom_num, atoms_dict
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_equilibrium_data(path, atoms_per_formula_unit, eos=None):
|
|
||||||
|
|
||||||
|
|
||||||
if not eos:
|
|
||||||
eos = 'murnaghan'
|
|
||||||
|
|
||||||
|
|
||||||
dirs = [os.path.join(path, dir) for dir in os.listdir(path)]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
data = []
|
|
||||||
|
|
||||||
for dir in dirs:
|
|
||||||
atoms, atom_num, atoms_dict = get_atoms(os.path.join(dir, 'POSCAR'))
|
|
||||||
scaling_factor = sum(atom_num) / atoms_per_formula_unit
|
|
||||||
|
|
||||||
label = os.path.basename(dir)
|
|
||||||
|
|
||||||
dft_df = pd.read_csv(os.path.join(dir, 'energ.dat'), header=None, delim_whitespace=True, index_col=0)
|
|
||||||
dft_df.reset_index(drop=True, inplace=True)
|
|
||||||
dft_df.columns = ['Volume', 'Energy']
|
|
||||||
|
|
||||||
volume = dft_df["Volume"].to_numpy() / scaling_factor
|
|
||||||
energy = dft_df["Energy"].to_numpy() / scaling_factor
|
|
||||||
|
|
||||||
p0 = get_initial_guesses(volume, energy)
|
|
||||||
|
|
||||||
try:
|
|
||||||
equilibrium_constants = fit_eos_curve(volume, energy, p0, eos)
|
|
||||||
|
|
||||||
e0, v0, b0, bp = equilibrium_constants[0], equilibrium_constants[1], equilibrium_constants[2], equilibrium_constants[3]
|
|
||||||
|
|
||||||
data.append([label, e0, v0, b0/kJ*1e24, bp])
|
|
||||||
|
|
||||||
except:
|
|
||||||
data.append([label, None, None, None, None])
|
|
||||||
|
|
||||||
|
|
||||||
df = pd.DataFrame(data)
|
|
||||||
df.columns = ['Label', 'E0', 'V0', 'B0', 'Bp']
|
|
||||||
df.sort_values(by='E0', ascending=True, inplace=True)
|
|
||||||
df.reset_index(inplace=True)
|
|
||||||
|
|
||||||
E_min = df['E0'].min()
|
|
||||||
|
|
||||||
df['dE'] = df['E0'] - E_min
|
|
||||||
|
|
||||||
df = df[['Label', 'E0', 'dE', 'V0', 'B0', 'Bp']]
|
|
||||||
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import io, plot
|
|
||||||
152
nafuma/eds/io.py
152
nafuma/eds/io.py
|
|
@ -1,152 +0,0 @@
|
||||||
from PIL import Image
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
def read_image(path, weight=None, colour=None, crop=None, resize=None, brightness=None):
|
|
||||||
|
|
||||||
img = np.array(Image.open(path))
|
|
||||||
|
|
||||||
if colour is not None:
|
|
||||||
img = change_colour(img, colour)
|
|
||||||
|
|
||||||
if brightness is not None:
|
|
||||||
img = increase_brightness(img, increase=brightness)
|
|
||||||
|
|
||||||
if crop is not None:
|
|
||||||
img = crop_image(img, crop)
|
|
||||||
|
|
||||||
if resize is not None:
|
|
||||||
img = resize_image(img, resize)
|
|
||||||
|
|
||||||
if weight is not None:
|
|
||||||
img = scale_image(img, weight)
|
|
||||||
|
|
||||||
return img
|
|
||||||
|
|
||||||
|
|
||||||
def scale_image(image, factor):
|
|
||||||
|
|
||||||
for i in range(0,image.shape[0]):
|
|
||||||
for j in range(0, image.shape[1]):
|
|
||||||
image[i][j][0] = image[i][j][0]*factor
|
|
||||||
image[i][j][1] = image[i][j][1]*factor
|
|
||||||
image[i][j][2] = image[i][j][2]*factor
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
def crop_image(image, factor):
|
|
||||||
|
|
||||||
y, x = image.shape[0:2]
|
|
||||||
|
|
||||||
new_y, new_x = int(y*factor), int(x*factor)
|
|
||||||
|
|
||||||
image = image[:new_y, :new_x]
|
|
||||||
|
|
||||||
res = cv2.resize(image, dsize=(x, y), interpolation=cv2.INTER_CUBIC)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def resize_image(image, factor):
|
|
||||||
|
|
||||||
y, x = image.shape[0:2]
|
|
||||||
|
|
||||||
new_y, new_x = int(y*factor), int(x*factor)
|
|
||||||
|
|
||||||
res = cv2.resize(image, dsize=(new_x, new_y), interpolation=cv2.INTER_CUBIC)
|
|
||||||
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def increase_brightness(image, brightness):
|
|
||||||
|
|
||||||
for i in range(0,image.shape[0]):
|
|
||||||
for j in range(0, image.shape[1]):
|
|
||||||
image[i][j][0] = image[i][j][0]+brightness
|
|
||||||
image[i][j][1] = image[i][j][1]+brightness
|
|
||||||
image[i][j][2] = image[i][j][2]+brightness
|
|
||||||
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
def add_images(image1, image2):
|
|
||||||
|
|
||||||
assert image1.shape == image2.shape
|
|
||||||
|
|
||||||
compound_image = np.zeros((image1.shape[0], image1.shape[1], image1.shape[2]))
|
|
||||||
for i in range(image1.shape[0]):
|
|
||||||
for j in range(image1.shape[1]):
|
|
||||||
compound_image[i][j] = [0, 0, 0]
|
|
||||||
|
|
||||||
compound_image[i][j][0] = int(int(image1[i][j][0]) + int(image2[i][j][0]))
|
|
||||||
compound_image[i][j][1] = int(int(image1[i][j][1]) + int(image2[i][j][1]))
|
|
||||||
compound_image[i][j][2] = int(int(image1[i][j][2]) + int(image2[i][j][2]))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return compound_image
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_colour(image):
|
|
||||||
|
|
||||||
|
|
||||||
colour = [0, 0, 0]
|
|
||||||
for i in range(image.shape[0]):
|
|
||||||
for j in range(image.shape[1]):
|
|
||||||
if image[i][j][0] > colour[0]:
|
|
||||||
colour[0] = image[i][j][0]
|
|
||||||
|
|
||||||
if image[i][j][1] > colour[1]:
|
|
||||||
colour[1] = image[i][j][1]
|
|
||||||
|
|
||||||
if image[i][j][2] > colour[2]:
|
|
||||||
colour[2] = image[i][j][2]
|
|
||||||
|
|
||||||
colour = np.array(colour)
|
|
||||||
|
|
||||||
return colour
|
|
||||||
|
|
||||||
|
|
||||||
def change_colour(image, new_colour):
|
|
||||||
|
|
||||||
new_colour = np.array(new_colour)
|
|
||||||
|
|
||||||
old_colour = get_colour(image)
|
|
||||||
|
|
||||||
|
|
||||||
for i in range(image.shape[0]):
|
|
||||||
for j in range(image.shape[1]):
|
|
||||||
factor = max(image[i][j]) / max(old_colour)
|
|
||||||
image[i][j] = new_colour.astype(float) * factor
|
|
||||||
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
def read_spectrum(path):
|
|
||||||
|
|
||||||
headers = find_start(path)
|
|
||||||
|
|
||||||
spectrum = pd.read_csv(path, skiprows=headers, delim_whitespace=True)
|
|
||||||
|
|
||||||
|
|
||||||
return spectrum
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def find_start(path):
|
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
line = f.readline()
|
|
||||||
i = 0
|
|
||||||
while not line.startswith('Energy'):
|
|
||||||
line = f.readline()
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
return i
|
|
||||||
|
|
||||||
|
|
@ -1,135 +0,0 @@
|
||||||
import nafuma.auxillary as aux
|
|
||||||
import nafuma.plotting as btp
|
|
||||||
import nafuma.eds.io as io
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
def show_image(data, options={}):
|
|
||||||
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'hide_x_labels': True,
|
|
||||||
'hide_y_labels': True,
|
|
||||||
'hide_x_ticklabels': True,
|
|
||||||
'hide_y_ticklabels': True,
|
|
||||||
'hide_x_ticks': True,
|
|
||||||
'hide_y_ticks': True,
|
|
||||||
'colours': None,
|
|
||||||
'brightness': None,
|
|
||||||
'show_image': True,
|
|
||||||
'resize': None,
|
|
||||||
'crop': None,
|
|
||||||
'ax': None,
|
|
||||||
'fig': None,
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if not isinstance(data['path'], list):
|
|
||||||
data['path'] = [data['path']]
|
|
||||||
|
|
||||||
|
|
||||||
if not 'image' in data.keys():
|
|
||||||
|
|
||||||
data['image'] = [None for _ in range(len(data['path']))]
|
|
||||||
|
|
||||||
if not 'weights' in data.keys():
|
|
||||||
data['weights'] = [1.0 for _ in range(len(data['path']))]
|
|
||||||
|
|
||||||
if not options['colours']:
|
|
||||||
options['colours'] = [None for _ in range(len(data['path']))]
|
|
||||||
|
|
||||||
for i, (path, weight, colour) in enumerate(zip(data['path'], data['weights'], options['colours'])):
|
|
||||||
data['image'][i] = io.read_image(path=path, weight=weight, colour=colour, resize=options['resize'], crop=options['crop'])
|
|
||||||
|
|
||||||
|
|
||||||
images = []
|
|
||||||
for i, image in enumerate(data['image']):
|
|
||||||
images.append(image)
|
|
||||||
#
|
|
||||||
final_image = np.mean(images, axis=0) / 255
|
|
||||||
if options['brightness']:
|
|
||||||
final_image = io.increase_brightness(final_image, brightness=options['brightness'])
|
|
||||||
|
|
||||||
if len(data['path']) > 1:
|
|
||||||
data['image'].append(final_image)
|
|
||||||
|
|
||||||
|
|
||||||
if options['show_image']:
|
|
||||||
if not options['fig'] and not options['ax']:
|
|
||||||
fig, ax = btp.prepare_plot(options)
|
|
||||||
else:
|
|
||||||
fig, ax = options['fig'], options['ax']
|
|
||||||
|
|
||||||
ax.imshow(final_image)
|
|
||||||
btp.adjust_plot(fig=fig, ax=ax, options=options)
|
|
||||||
|
|
||||||
return data['image'], fig, ax
|
|
||||||
|
|
||||||
else:
|
|
||||||
return data['image'], None, None
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def plot_spectrum(data: dict, options={}):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'deconvolutions': None,
|
|
||||||
'lines': None,
|
|
||||||
'colours': None,
|
|
||||||
'xlabel': 'Energy', 'xunit': 'keV', 'xlim': None,
|
|
||||||
'ylabel': 'Counts', 'yunit': 'arb. u.', 'ylim': None, 'hide_y_ticklabels': True, 'hide_y_ticks': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
|
|
||||||
|
|
||||||
spectrum = io.read_spectrum(data['path'])
|
|
||||||
|
|
||||||
if options['deconvolutions']:
|
|
||||||
|
|
||||||
deconvolutions = []
|
|
||||||
if not isinstance(options['deconvolutions'], list):
|
|
||||||
options['deconvolutions'] = [options['deconvolutions']]
|
|
||||||
|
|
||||||
if options['colours'] and (len(options['colours']) != len(options['deconvolutions'])):
|
|
||||||
options['colours'] = None
|
|
||||||
|
|
||||||
for deconv in options['deconvolutions']:
|
|
||||||
df = io.read_spectrum(deconv)
|
|
||||||
deconvolutions.append(df)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
spectrum.plot(x='Energy', y='Counts', ax=ax, color='black')
|
|
||||||
|
|
||||||
if options['deconvolutions']:
|
|
||||||
if options['colours']:
|
|
||||||
for deconv, colour in zip(deconvolutions, options['colours']):
|
|
||||||
ax.fill_between(x=deconv['Energy'], y1=deconv['Counts'], y2=0, color=colour, alpha=0.4)
|
|
||||||
else:
|
|
||||||
for deconv in deconvolutions:
|
|
||||||
ax.fill_between(x=deconv['Energy'], y1=deconv['Counts'], y2=0, alpha=0.4)
|
|
||||||
|
|
||||||
|
|
||||||
if not options['xlim']:
|
|
||||||
options['xlim'] = [spectrum['Energy'].min(), spectrum['Energy'].max()]
|
|
||||||
|
|
||||||
if not options['ylim']:
|
|
||||||
options['ylim'] = [0, 1.1*spectrum['Counts'].max()]
|
|
||||||
|
|
||||||
if options['lines']:
|
|
||||||
for i, (line, energy) in enumerate(options['lines'].items()):
|
|
||||||
ax.axvline(x=energy, ls='--', lw=0.5, c='black')
|
|
||||||
ax.text(s=line, x=energy, y=(0.9-0.1*i)*options['ylim'][1], fontsize=8)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
return spectrum, fig, ax
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import io, plot, unit_tables
|
|
||||||
|
|
@ -1,871 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import os
|
|
||||||
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
from sympy import re
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME This is not good practice, but a temporary fix as I don't have time to understand what causes the SettingWithCopyWarning.
|
|
||||||
# Read this: https://www.dataquest.io/blog/settingwithcopywarning/
|
|
||||||
pd.set_option('mode.chained_assignment', None)
|
|
||||||
|
|
||||||
|
|
||||||
def read_data(data, options={}):
|
|
||||||
|
|
||||||
if data['kind'] == 'neware':
|
|
||||||
df = read_neware(data['path'], options=options)
|
|
||||||
cycles = process_neware_data(df=df, options=options)
|
|
||||||
|
|
||||||
elif data['kind'] == 'batsmall':
|
|
||||||
df = read_batsmall(data['path'])
|
|
||||||
cycles = process_batsmall_data(df=df, options=options)
|
|
||||||
|
|
||||||
elif data['kind'] == 'biologic':
|
|
||||||
df = read_biologic(data['path'])
|
|
||||||
cycles = process_biologic_data(df=df, options=options)
|
|
||||||
|
|
||||||
return cycles
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def read_neware(path, options={}):
|
|
||||||
''' Reads electrochemistry data, currently only from the Neware battery cycler. Will convert to .csv if the filetype is .xlsx,
|
|
||||||
which is the file format the Neware provides for the backup data. In this case it matters if summary is False or not. If file
|
|
||||||
type is .csv, it will just open the datafile and it does not matter if summary is False or not.'''
|
|
||||||
from xlsx2csv import Xlsx2csv
|
|
||||||
|
|
||||||
# FIXME Do a check if a .csv-file already exists even if the .xlsx is passed
|
|
||||||
|
|
||||||
# Convert from .xlsx to .csv to make readtime faster
|
|
||||||
if path.endswith('xlsx'):
|
|
||||||
csv_details = ''.join(path[:-5]) + '_details.csv'
|
|
||||||
csv_summary = os.path.abspath(''.join(path[:-5]) + '_summary.csv')
|
|
||||||
|
|
||||||
if not os.path.isfile(csv_summary):
|
|
||||||
Xlsx2csv(path, outputencoding="utf-8").convert(os.path.abspath(csv_summary), sheetid=3)
|
|
||||||
|
|
||||||
if not os.path.isfile(csv_details):
|
|
||||||
Xlsx2csv(path, outputencoding="utf-8").convert(csv_details, sheetid=4)
|
|
||||||
|
|
||||||
if options['summary']:
|
|
||||||
df = pd.read_csv(csv_summary)
|
|
||||||
else:
|
|
||||||
df = pd.read_csv(csv_details)
|
|
||||||
|
|
||||||
elif path.endswith('csv'):
|
|
||||||
df = pd.read_csv(path)
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def read_batsmall(path):
|
|
||||||
''' Reads BATSMALL-data into a DataFrame.
|
|
||||||
|
|
||||||
Input:
|
|
||||||
path (required): string with path to datafile
|
|
||||||
|
|
||||||
Output:
|
|
||||||
df: pandas DataFrame containing the data as-is, but without additional NaN-columns.'''
|
|
||||||
|
|
||||||
|
|
||||||
# Determine if decimal point is . or ,
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
for i, line in enumerate(f):
|
|
||||||
if i == 10:
|
|
||||||
values = line.split()
|
|
||||||
if len(values[1].split('.')) == 2:
|
|
||||||
decimal_point = '.'
|
|
||||||
elif len(values[1].split(',')) == 2:
|
|
||||||
decimal_point = ','
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
df = pd.read_csv(path, skiprows=2, sep='\t', decimal=decimal_point)
|
|
||||||
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def read_biologic(path):
|
|
||||||
''' Reads Bio-Logic-data into a DataFrame.
|
|
||||||
|
|
||||||
Input:
|
|
||||||
path (required): string with path to datafile
|
|
||||||
|
|
||||||
Output:
|
|
||||||
df: pandas DataFrame containing the data as-is, but without additional NaN-columns.'''
|
|
||||||
|
|
||||||
with open(path, 'rb') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
header_lines = int(lines[1].split()[-1]) - 1
|
|
||||||
|
|
||||||
|
|
||||||
df = pd.read_csv(path, sep='\t', skiprows=header_lines, encoding='cp1252')
|
|
||||||
df.dropna(inplace=True, axis=1)
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def process_batsmall_data(df, options=None):
|
|
||||||
''' Takes BATSMALL-data in the form of a DataFrame and cleans the data up and converts units into desired units.
|
|
||||||
Splits up into individual charge and discharge DataFrames per cycle, and outputs a list where each element is a tuple with the Chg and DChg-data. E.g. cycles[10][0] gives the charge data for the 11th cycle.
|
|
||||||
|
|
||||||
For this to work, the cycling program must be set to use the counter.
|
|
||||||
|
|
||||||
Input:
|
|
||||||
df (required): A pandas DataFrame containing BATSMALL-data, as obtained from read_batsmall().
|
|
||||||
t (optional): Unit for time data. Defaults to ms.
|
|
||||||
C (optional): Unit for specific capacity. Defaults to mAh/g.
|
|
||||||
I (optional): Unit for current. Defaults mA.
|
|
||||||
U (optional): Unit for voltage. Defaults to V.
|
|
||||||
|
|
||||||
Output:
|
|
||||||
cycles: A list with
|
|
||||||
'''
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'splice_cycles': False,
|
|
||||||
'append': False, # Add max of ions and specific_capacity of previous run #TODO Generalise
|
|
||||||
'append_gap': 0, # Add a gap between cyclces - only used if append == True.
|
|
||||||
'molecular_weight': None,
|
|
||||||
'reverse_discharge': False,
|
|
||||||
'units': None,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
aux.update_options(options=options, default_options=default_options)
|
|
||||||
options['kind'] = 'batsmall'
|
|
||||||
|
|
||||||
# Complete set of new units and get the units used in the dataset, and convert values in the DataFrame from old to new.
|
|
||||||
set_units(options)
|
|
||||||
options['old_units'] = get_old_units(df, options)
|
|
||||||
|
|
||||||
|
|
||||||
df = add_columns(df=df, options=options) # adds columns to the DataFrame if active material weight and/or molecular weight has been passed in options
|
|
||||||
|
|
||||||
df = unit_conversion(df=df, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
if options['splice_cycles']:
|
|
||||||
df = splice_cycles(df=df, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
# Replace NaN with empty string in the Comment-column and then remove all steps where the program changes - this is due to inconsistent values for current
|
|
||||||
df[["comment"]] = df[["comment"]].fillna(value={'comment': ''})
|
|
||||||
df = df[df["comment"].str.contains("program")==False]
|
|
||||||
|
|
||||||
# Creates masks for charge and discharge curves
|
|
||||||
chg_mask = df['current'] >= 0
|
|
||||||
dchg_mask = df['current'] < 0
|
|
||||||
|
|
||||||
# Initiate cycles list
|
|
||||||
cycles = []
|
|
||||||
|
|
||||||
# Loop through all the cycling steps, change the current and capacities in the
|
|
||||||
for i in range(df["count"].max()):
|
|
||||||
|
|
||||||
sub_df = df.loc[df['count'] == i+1].copy()
|
|
||||||
|
|
||||||
sub_df.loc[dchg_mask, 'current'] *= -1
|
|
||||||
sub_df.loc[dchg_mask, 'specific_capacity'] *= -1
|
|
||||||
sub_df.loc[dchg_mask, 'ions'] *= -1
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
chg_df = sub_df.loc[chg_mask]
|
|
||||||
dchg_df = sub_df.loc[dchg_mask]
|
|
||||||
|
|
||||||
# Continue to next iteration if the charge and discharge DataFrames are empty (i.e. no current)
|
|
||||||
if chg_df.empty and dchg_df.empty:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if options['append']:
|
|
||||||
if cycles:
|
|
||||||
chg_df.loc[chg_mask, 'ions'] += cycles[-1][1]['ions'].max() + options['append_gap']
|
|
||||||
|
|
||||||
dchg_df.loc[dchg_mask, 'ions'] += chg_df['ions'].max() + options['append_gap']
|
|
||||||
|
|
||||||
if options['reverse_discharge']:
|
|
||||||
max_capacity = dchg_df['capacity'].max()
|
|
||||||
dchg_df['capacity'] = np.abs(dchg_df['capacity'] - max_capacity)
|
|
||||||
|
|
||||||
if 'specific_capacity' in df.columns:
|
|
||||||
max_capacity = dchg_df['specific_capacity'].max()
|
|
||||||
dchg_df['specific_capacity'] = np.abs(dchg_df['specific_capacity'] - max_capacity)
|
|
||||||
|
|
||||||
if 'ions' in df.columns:
|
|
||||||
max_capacity = dchg_df['ions'].max()
|
|
||||||
dchg_df['ions'] = np.abs(dchg_df['ions'] - max_capacity)
|
|
||||||
|
|
||||||
cycles.append((chg_df, dchg_df))
|
|
||||||
|
|
||||||
|
|
||||||
return cycles
|
|
||||||
|
|
||||||
|
|
||||||
def splice_cycles(df, options: dict) -> pd.DataFrame:
|
|
||||||
''' Splices two cycles together - if e.g. one charge cycle are split into several cycles due to change in parameters.
|
|
||||||
|
|
||||||
Incomplete, only accomodates BatSmall so far, and only for charge.'''
|
|
||||||
|
|
||||||
if options['kind'] == 'batsmall':
|
|
||||||
|
|
||||||
# Creates masks for charge and discharge curves
|
|
||||||
chg_mask = df['current'] >= 0
|
|
||||||
|
|
||||||
# Loop through all the cycling steps, change the current and capacities in the
|
|
||||||
for i in range(df["count"].max()):
|
|
||||||
sub_df = df.loc[df['count'] == i+1]
|
|
||||||
sub_df_chg = sub_df.loc[chg_mask]
|
|
||||||
|
|
||||||
# get indices where the program changed
|
|
||||||
chg_indices = sub_df_chg[sub_df_chg["comment"].str.contains("program")==True].index.to_list()
|
|
||||||
|
|
||||||
# Delete first item if first cycle after rest (this will just be the start of the cycling)
|
|
||||||
if i+1 == 1:
|
|
||||||
del chg_indices[0]
|
|
||||||
|
|
||||||
|
|
||||||
if chg_indices:
|
|
||||||
last_chg = chg_indices.pop()
|
|
||||||
|
|
||||||
if chg_indices:
|
|
||||||
for i in chg_indices:
|
|
||||||
add = df['specific_capacity'].iloc[i-1]
|
|
||||||
df['specific_capacity'].iloc[i:last_chg] = df['specific_capacity'].iloc[i:last_chg] + add
|
|
||||||
|
|
||||||
|
|
||||||
if options['kind'] == 'neware':
|
|
||||||
|
|
||||||
if options['summary']:
|
|
||||||
for i in range(df['cycle'].max()):
|
|
||||||
sub_df = df.loc[df['cycle'] == i+1].copy()
|
|
||||||
|
|
||||||
if sub_df['status'].loc[sub_df['status'] == 'CC Chg'].count() > 1:
|
|
||||||
indices = sub_df.index[sub_df['status'] == 'CC Chg']
|
|
||||||
|
|
||||||
add_columns = ['capacity', 'specific_capacity', 'ions', 'energy', 'cycle_time']
|
|
||||||
|
|
||||||
for column in add_columns:
|
|
||||||
if column in df.columns:
|
|
||||||
df[column].iloc[indices[-1]] = df[column].iloc[indices[-1]] + df[column].iloc[indices[0]]
|
|
||||||
|
|
||||||
df.drop(index=indices[0], inplace=True)
|
|
||||||
df.reset_index(inplace=True, drop=True)
|
|
||||||
|
|
||||||
else:
|
|
||||||
for i in range(df['cycle'].max()):
|
|
||||||
sub_df = df.loc[df['cycle'] == i+1].copy()
|
|
||||||
sub_chg_df = sub_df.loc[sub_df['status'] == 'CC Chg'].copy()
|
|
||||||
|
|
||||||
steps_indices = sub_chg_df['steps'].unique()
|
|
||||||
|
|
||||||
if len(steps_indices) > 1:
|
|
||||||
|
|
||||||
add_columns = ['capacity', 'specific_capacity', 'ions', 'energy', 'cycle_time']
|
|
||||||
|
|
||||||
for column in add_columns:
|
|
||||||
if column in df.columns:
|
|
||||||
# Extract the maximum value from the first of the two cycles by accessing the column value of the highest index of the first cycle
|
|
||||||
add = df[column].iloc[df.loc[df['steps'] == steps_indices[0]].index.max()]
|
|
||||||
|
|
||||||
df[column].loc[df['steps'] == steps_indices[1]] += add
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def process_neware_data(df, options={}):
|
|
||||||
|
|
||||||
""" Takes data from NEWARE in a DataFrame as read by read_neware() and converts units, adds columns and splits into cycles.
|
|
||||||
|
|
||||||
Input:
|
|
||||||
df: pandas DataFrame containing NEWARE data as read by read_neware()
|
|
||||||
units: dictionary containing the desired units. keywords: capacity, current, voltage, mass, energy, time
|
|
||||||
splice_cycles: tuple containing index of cycles that should be spliced. Specifically designed to add two charge steps during the formation cycle with two different max voltages
|
|
||||||
active_materiale_weight: weight of the active material (in mg) used in the cell.
|
|
||||||
molecular_weight: the molar mass (in g mol^-1) of the active material, to calculate the number of ions extracted. Assumes one electron per Li+/Na+-ion """
|
|
||||||
|
|
||||||
required_options = ['units', 'active_material_weight', 'molecular_weight', 'reverse_discharge', 'splice_cycles', 'increment_cycles_from', 'delete_datapoints']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'units': None,
|
|
||||||
'active_material_weight': None,
|
|
||||||
'molecular_weight': None,
|
|
||||||
'reverse_discharge': False,
|
|
||||||
'splice_cycles': None,
|
|
||||||
'increment_cycles_from': None,# index
|
|
||||||
'delete_datapoints': None, # list of indices
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
options['kind'] = 'neware'
|
|
||||||
|
|
||||||
|
|
||||||
if not options['summary']:
|
|
||||||
# Complete set of new units and get the units used in the dataset, and convert values in the DataFrame from old to new.
|
|
||||||
set_units(options=options) # sets options['units']
|
|
||||||
options['old_units'] = get_old_units(df=df, options=options)
|
|
||||||
|
|
||||||
df = add_columns(df=df, options=options) # adds columns to the DataFrame if active material weight and/or molecular weight has been passed in options
|
|
||||||
|
|
||||||
df = unit_conversion(df=df, options=options) # converts all units from the old units to the desired units
|
|
||||||
|
|
||||||
if options['increment_cycles_from']:
|
|
||||||
df['cycle'].iloc[options['increment_cycles_from']:] += 1
|
|
||||||
|
|
||||||
if options['delete_datapoints']:
|
|
||||||
for datapoint in options['delete_datapoints']:
|
|
||||||
df.drop(index=datapoint, inplace=True)
|
|
||||||
|
|
||||||
if options['splice_cycles']:
|
|
||||||
df = splice_cycles(df=df, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
# Creates masks for charge and discharge curves
|
|
||||||
chg_mask = df['status'] == 'CC Chg'
|
|
||||||
dchg_mask = df['status'] == 'CC DChg'
|
|
||||||
|
|
||||||
# Initiate cycles list
|
|
||||||
cycles = []
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Loop through all the cycling steps, change the current and capacities in the
|
|
||||||
for i in range(df["cycle"].max()):
|
|
||||||
|
|
||||||
sub_df = df.loc[df['cycle'] == i+1].copy()
|
|
||||||
|
|
||||||
chg_df = sub_df.loc[chg_mask]
|
|
||||||
dchg_df = sub_df.loc[dchg_mask]
|
|
||||||
|
|
||||||
# Continue to next iteration if the charge and discharge DataFrames are empty (i.e. no current)
|
|
||||||
if chg_df.empty and dchg_df.empty:
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
|
||||||
# Reverses the discharge curve if specified
|
|
||||||
if options['reverse_discharge']:
|
|
||||||
max_capacity = dchg_df['capacity'].max()
|
|
||||||
dchg_df['capacity'] = np.abs(dchg_df['capacity'] - max_capacity)
|
|
||||||
|
|
||||||
if 'specific_capacity' in df.columns:
|
|
||||||
max_capacity = dchg_df['specific_capacity'].max()
|
|
||||||
dchg_df['specific_capacity'] = np.abs(dchg_df['specific_capacity'] - max_capacity)
|
|
||||||
|
|
||||||
if 'ions' in df.columns:
|
|
||||||
max_capacity = dchg_df['ions'].max()
|
|
||||||
dchg_df['ions'] = np.abs(dchg_df['ions'] - max_capacity)
|
|
||||||
|
|
||||||
|
|
||||||
if not chg_df.empty:
|
|
||||||
chg_df.reset_index(inplace=True)
|
|
||||||
if not dchg_df.empty:
|
|
||||||
dchg_df.reset_index(inplace=True)
|
|
||||||
|
|
||||||
cycles.append((chg_df, dchg_df))
|
|
||||||
|
|
||||||
|
|
||||||
return cycles
|
|
||||||
|
|
||||||
|
|
||||||
elif options['summary']:
|
|
||||||
set_units(options=options)
|
|
||||||
options['old_units'] = get_old_units(df=df, options=options)
|
|
||||||
|
|
||||||
df = add_columns(df=df, options=options)
|
|
||||||
df = unit_conversion(df=df, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
if options['splice_cycles']:
|
|
||||||
df = splice_cycles(df=df, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
chg_df = df.loc[df['status'] == 'CC Chg']
|
|
||||||
chg_df.reset_index(inplace=True)
|
|
||||||
dchg_df = df.loc[df['status'] == 'CC DChg']
|
|
||||||
dchg_df.reset_index(inplace=True)
|
|
||||||
|
|
||||||
# Construct new DataFrame
|
|
||||||
new_df = pd.DataFrame(chg_df["cycle"])
|
|
||||||
new_df.insert(1,'charge_capacity',chg_df['capacity'])
|
|
||||||
new_df.insert(1,'charge_specific_capacity',chg_df['specific_capacity'])
|
|
||||||
new_df.insert(1,'discharge_capacity',dchg_df['capacity'])
|
|
||||||
new_df.insert(1,'discharge_specific_capacity',dchg_df['specific_capacity'])
|
|
||||||
new_df.insert(1,'charge_energy',chg_df['energy'])
|
|
||||||
new_df.insert(1,'charge_specific_energy',chg_df['specific_energy'])
|
|
||||||
new_df.insert(1,'discharge_energy',dchg_df['energy'])
|
|
||||||
new_df.insert(1,'discharge_specific_energy',dchg_df['specific_energy'])
|
|
||||||
|
|
||||||
new_df = calculate_efficiency(df=new_df, options=options)
|
|
||||||
|
|
||||||
return new_df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def process_biologic_data(df, options=None):
|
|
||||||
|
|
||||||
required_options = ['units', 'active_material_weight', 'molecular_weight', 'reverse_discharge', 'splice_cycles']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'units': None,
|
|
||||||
'active_material_weight': None,
|
|
||||||
'molecular_weight': None,
|
|
||||||
'reverse_discharge': False,
|
|
||||||
'splice_cycles': None}
|
|
||||||
|
|
||||||
|
|
||||||
# Check if the DataFrame contains GC or CV data.
|
|
||||||
# FIXME This might not be a very rigorous method of checking. E.g. Rest has mode == 3, so if loading a short GC with many Rest-datapoints, the mean will be 2 and it will be treated as CV. For now manual override is sufficient
|
|
||||||
if not 'mode' in options.keys():
|
|
||||||
options['mode'] = 'GC' if int(df['mode'].mean()) == 1 else 'CV'
|
|
||||||
|
|
||||||
aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
options['kind'] = 'biologic'
|
|
||||||
|
|
||||||
# Pick out necessary columns
|
|
||||||
headers = [
|
|
||||||
'Ns changes', 'Ns', 'time/s', 'Ewe/V', 'Energy charge/W.h', 'Energy discharge/W.h', '<I>/mA', 'Capacity/mA.h', 'cycle number' ] if options['mode'] == 'GC' else [
|
|
||||||
'ox/red', 'time/s', 'control/V', 'Ewe/V', '<I>/mA', 'cycle number', '(Q-Qo)/C', 'P/W'
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
df = df[headers].copy()
|
|
||||||
|
|
||||||
|
|
||||||
# Complete set of new units and get the units used in the dataset, and convert values in the DataFrame from old to new.
|
|
||||||
set_units(options)
|
|
||||||
options['old_units'] = get_old_units(df=df, options=options)
|
|
||||||
|
|
||||||
df = add_columns(df=df, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
df = unit_conversion(df=df, options=options)
|
|
||||||
|
|
||||||
# Creates masks for charge and discharge curves
|
|
||||||
if options['mode'] == 'GC':
|
|
||||||
chg_mask = (df['status'] == 1) & (df['status_change'] != 1)
|
|
||||||
dchg_mask = (df['status'] == 2) & (df['status_change'] != 1)
|
|
||||||
|
|
||||||
elif options['mode'] == 'CV':
|
|
||||||
chg_mask = (df['status'] == 1) # oxidation
|
|
||||||
dchg_mask = (df['status'] == 0) # reduction
|
|
||||||
|
|
||||||
# Initiate cycles list
|
|
||||||
cycles = []
|
|
||||||
|
|
||||||
if df['cycle'].max() == 0:
|
|
||||||
no_cycles = 1
|
|
||||||
else:
|
|
||||||
no_cycles = int(df['cycle'].max())
|
|
||||||
|
|
||||||
# Loop through all the cycling steps, change the current and capacities in the
|
|
||||||
for i in range(no_cycles):
|
|
||||||
|
|
||||||
sub_df = df.loc[df['cycle'] == i].copy()
|
|
||||||
|
|
||||||
#sub_df.loc[dchg_mask, 'current'] *= -1
|
|
||||||
#sub_df.loc[dchg_mask, 'capacity'] *= -1
|
|
||||||
|
|
||||||
chg_df = sub_df.loc[chg_mask]
|
|
||||||
dchg_df = sub_df.loc[dchg_mask]
|
|
||||||
|
|
||||||
# Continue to next iteration if the charge and discharge DataFrames are empty (i.e. no current)
|
|
||||||
if chg_df.empty and dchg_df.empty:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if options['mode'] == 'GC' and options['reverse_discharge']:
|
|
||||||
max_capacity = dchg_df['capacity'].max()
|
|
||||||
dchg_df['capacity'] = np.abs(dchg_df['capacity'] - max_capacity)
|
|
||||||
|
|
||||||
if 'specific_capacity' in df.columns:
|
|
||||||
max_capacity = dchg_df['specific_capacity'].max()
|
|
||||||
dchg_df['specific_capacity'] = np.abs(dchg_df['specific_capacity'] - max_capacity)
|
|
||||||
|
|
||||||
if 'ions' in df.columns:
|
|
||||||
max_capacity = dchg_df['ions'].max()
|
|
||||||
dchg_df['ions'] = np.abs(dchg_df['ions'] - max_capacity)
|
|
||||||
|
|
||||||
|
|
||||||
if options['mode'] == 'CV':
|
|
||||||
chg_df = chg_df.sort_values(by='voltage').reset_index(drop=True)
|
|
||||||
dchg_df = dchg_df.sort_values(by='voltage', ascending=False).reset_index(drop=True)
|
|
||||||
|
|
||||||
cycles.append((chg_df, dchg_df))
|
|
||||||
|
|
||||||
|
|
||||||
return cycles
|
|
||||||
|
|
||||||
|
|
||||||
def add_columns(df, options):
|
|
||||||
from . import unit_tables
|
|
||||||
|
|
||||||
if options['kind'] == 'neware':
|
|
||||||
|
|
||||||
if options['summary']:
|
|
||||||
df[f'Energy({options["old_units"]["energy"]})'] = np.abs(df[f'Net discharge energy({options["old_units"]["energy"]})'])
|
|
||||||
|
|
||||||
if options['active_material_weight']:
|
|
||||||
df[f"SpecificCapacity({options['old_units']['capacity']}/mg)"] = df["Capacity({})".format(options['old_units']['capacity'])] / (options['active_material_weight'])
|
|
||||||
df[f"SpecificEnergy({options['old_units']['energy']}/mg)"] = df["Energy({})".format(options['old_units']['energy'])] / (options['active_material_weight'])
|
|
||||||
|
|
||||||
if options['molecular_weight']:
|
|
||||||
faradays_constant = 96485.3365 # [F] = C mol^-1 = As mol^-1
|
|
||||||
seconds_per_hour = 3600 # s h^-1
|
|
||||||
f = faradays_constant / seconds_per_hour * 1000.0 # [f] = mAh mol^-1
|
|
||||||
|
|
||||||
df["IonsExtracted"] = (df["SpecificCapacity({}/mg)".format(options['old_units']['capacity'])]*options['molecular_weight'])*1000/f
|
|
||||||
|
|
||||||
|
|
||||||
if options['kind'] == 'biologic':
|
|
||||||
if options['active_material_weight']:
|
|
||||||
|
|
||||||
capacity = options['old_units']['capacity'].split('h')[0] + '.h'
|
|
||||||
|
|
||||||
|
|
||||||
df["SpecificCapacity({}/mg)".format(options['old_units']["capacity"])] = df["Capacity/{}".format(capacity)] / (options['active_material_weight'])
|
|
||||||
|
|
||||||
if options['molecular_weight']:
|
|
||||||
faradays_constant = 96485.3365 # [F] = C mol^-1 = As mol^-1
|
|
||||||
seconds_per_hour = 3600 # s h^-1
|
|
||||||
f = faradays_constant / seconds_per_hour * 1000.0 # [f] = mAh mol^-1
|
|
||||||
|
|
||||||
df["IonsExtracted"] = (df["SpecificCapacity({}/mg)".format(options['old_units']['capacity'])]*options['molecular_weight'])*1000/f
|
|
||||||
|
|
||||||
|
|
||||||
if options['kind'] == 'batsmall':
|
|
||||||
if options['active_material_weight']:
|
|
||||||
|
|
||||||
|
|
||||||
active_material_weight = options['active_material_weight'] * unit_tables.mass()['mg'].loc[options['units']['mass']]
|
|
||||||
capacity = options['old_units']['capacity']
|
|
||||||
|
|
||||||
df[f'Capacity [{options["old_units"]["capacity"]}]'] = df[f'C [{options["old_units"]["capacity"]}/{options["old_units"]["mass"]}]'] * active_material_weight
|
|
||||||
|
|
||||||
if options['molecular_weight']:
|
|
||||||
faradays_constant = 96485.3365 # [F] = C mol^-1 = As mol^-1
|
|
||||||
seconds_per_hour = 3600 # s h^-1
|
|
||||||
f = faradays_constant / seconds_per_hour * 1000.0 # [f] = mAh mol^-1
|
|
||||||
|
|
||||||
molecular_weight = options['molecular_weight'] * unit_tables.mass()['g'].loc[options['old_units']['mass']]
|
|
||||||
|
|
||||||
df["IonsExtracted"] = (df[f'C [{options["old_units"]["capacity"]}/{options["old_units"]["mass"]}]'] * molecular_weight)/f
|
|
||||||
|
|
||||||
|
|
||||||
#df['reaction_coordinate'] = (df[f'TT [{options["old_units"]["time"]}]'] * unit_tables.time()[options['old_units']["time"]].loc["h"]) / np.abs(df[f'I [{options["old_units"]["current"]}]'] * unit_tables.current()[options['old_units']["current"]].loc['A'])
|
|
||||||
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_efficiency(df: pd.DataFrame, options: dict) -> pd.DataFrame:
|
|
||||||
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'reference_index': 0
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
|
|
||||||
|
|
||||||
df['charge_capacity_fade'] = (df['charge_capacity'] / df['charge_capacity'].iloc[options['reference_index']])*100
|
|
||||||
df['discharge_capacity_fade'] = (df['discharge_capacity'] / df['discharge_capacity'].iloc[options['reference_index']])*100
|
|
||||||
|
|
||||||
df['coulombic_efficiency'] = (df['discharge_capacity'] / df['charge_capacity'])*100
|
|
||||||
df['energy_efficiency'] = (df['discharge_energy'] / df['charge_energy'])*100
|
|
||||||
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def unit_conversion(df, options):
|
|
||||||
from . import unit_tables
|
|
||||||
|
|
||||||
if options['kind'] == 'batsmall':
|
|
||||||
|
|
||||||
df["TT [{}]".format(options['old_units']["time"])] = df["TT [{}]".format(options['old_units']["time"])] * unit_tables.time()[options['old_units']["time"]].loc[options['units']['time']]
|
|
||||||
df["U [{}]".format(options['old_units']["voltage"])] = df["U [{}]".format(options['old_units']["voltage"])] * unit_tables.voltage()[options['old_units']["voltage"]].loc[options['units']['voltage']]
|
|
||||||
df["I [{}]".format(options['old_units']["current"])] = df["I [{}]".format(options['old_units']["current"])] * unit_tables.current()[options['old_units']["current"]].loc[options['units']['current']]
|
|
||||||
df["C [{}/{}]".format(options['old_units']["capacity"], options['old_units']["mass"])] = df["C [{}/{}]".format(options['old_units']["capacity"], options['old_units']["mass"])] * (unit_tables.capacity()[options['old_units']["capacity"]].loc[options['units']["capacity"]] / unit_tables.mass()[options['old_units']["mass"]].loc[options['units']["mass"]])
|
|
||||||
|
|
||||||
columns = ['time', 'voltage', 'current', 'count', 'specific_capacity', 'comment']
|
|
||||||
|
|
||||||
# Add column labels for specific capacity and ions if they exist
|
|
||||||
if f'Capacity [{options["old_units"]["capacity"]}]' in df.columns:
|
|
||||||
df[f'Capacity [{options["old_units"]["capacity"]}]'] = df[f'Capacity [{options["old_units"]["capacity"]}]'] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']]
|
|
||||||
columns.append('capacity')
|
|
||||||
|
|
||||||
if 'IonsExtracted' in df.columns:
|
|
||||||
columns.append('ions')
|
|
||||||
|
|
||||||
#columns.append('reaction_coordinate')
|
|
||||||
|
|
||||||
df.columns = columns
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['kind'] == 'neware':
|
|
||||||
|
|
||||||
|
|
||||||
record_number = 'Data serial number' if 'Data serial number' in df.columns else 'Record number'
|
|
||||||
relative_time = 'Relative Time(h:min:s.ms)' if 'Relative Time(h:min:s.ms)' in df.columns else 'Relative Time'
|
|
||||||
continuous_time = 'Continuous Time(h:min:s.ms)' if 'Continuous Time(h:min:s.ms)' in df.columns else 'Continuous Time'
|
|
||||||
real_time = 'Real Time(h:min:s.ms)' if 'Real Time(h:min:s.ms)' in df.columns else 'Real Time'
|
|
||||||
|
|
||||||
|
|
||||||
if options['summary']:
|
|
||||||
df[f'Energy({options["old_units"]["energy"]})'] = df[f'Energy({options["old_units"]["energy"]})'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']]
|
|
||||||
df[f'Starting current({options["old_units"]["current"]})'] = df[f'Starting current({options["old_units"]["current"]})'] * unit_tables.current()[options['old_units']['current']].loc[options['units']['current']]
|
|
||||||
df[f'Start Volt({options["old_units"]["voltage"]})'] = df[f'Start Volt({options["old_units"]["voltage"]})'] * unit_tables.voltage()[options['old_units']['voltage']].loc[options['units']['voltage']]
|
|
||||||
df[f'Capacity({options["old_units"]["capacity"]})'] = df[f'Capacity({options["old_units"]["capacity"]})'] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']]
|
|
||||||
df[f'Energy({options["old_units"]["energy"]})'] = df[f'Energy({options["old_units"]["energy"]})'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']]
|
|
||||||
df[f'CycleTime({options["units"]["time"]})'] = df.apply(lambda row : convert_time_string(row[relative_time], unit=options['units']['time']), axis=1)
|
|
||||||
df[f'RunTime({options["units"]["time"]})'] = df.apply(lambda row : convert_datetime_string(row[real_time], reference=df[real_time].iloc[0], ref_time=df[f'CycleTime({options["units"]["time"]})'].iloc[0],unit=options['units']['time']), axis=1)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
droplist = [
|
|
||||||
'Chnl',
|
|
||||||
'Original step',
|
|
||||||
f'End Volt({options["old_units"]["voltage"]})',
|
|
||||||
f'Termination current({options["old_units"]["current"]})',
|
|
||||||
relative_time,
|
|
||||||
real_time,
|
|
||||||
continuous_time,
|
|
||||||
f'Net discharge capacity({options["old_units"]["capacity"]})',
|
|
||||||
f'Chg Cap({options["old_units"]["capacity"]})',
|
|
||||||
f'DChg Cap({options["old_units"]["capacity"]})',
|
|
||||||
f'Net discharge energy({options["old_units"]["energy"]})',
|
|
||||||
f'Chg Eng({options["old_units"]["energy"]})',
|
|
||||||
f'DChg Eng({options["old_units"]["energy"]})'
|
|
||||||
]
|
|
||||||
|
|
||||||
# Drop all undesireable columns
|
|
||||||
for drop in droplist:
|
|
||||||
if drop in df.columns:
|
|
||||||
df.drop(drop, axis=1, inplace=True)
|
|
||||||
|
|
||||||
columns = ['cycle', 'steps', 'status', 'voltage', 'current', 'capacity', 'energy']
|
|
||||||
|
|
||||||
|
|
||||||
# Add column labels for specific capacity and ions if they exist
|
|
||||||
if 'SpecificCapacity({}/mg)'.format(options['old_units']['capacity']) in df.columns:
|
|
||||||
df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] = df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
|
|
||||||
columns.append('specific_capacity')
|
|
||||||
|
|
||||||
if f'SpecificEnergy({options["old_units"]["energy"]}/mg)' in df.columns:
|
|
||||||
df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] = df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
|
|
||||||
columns.append('specific_energy')
|
|
||||||
|
|
||||||
if 'IonsExtracted' in df.columns:
|
|
||||||
columns.append('ions')
|
|
||||||
|
|
||||||
# Append energy column label here as it was the last column to be generated
|
|
||||||
columns.append('cycle_time')
|
|
||||||
columns.append('runtime')
|
|
||||||
|
|
||||||
# Apply new column labels
|
|
||||||
df.columns = columns
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
df['Current({})'.format(options['old_units']['current'])] = df['Current({})'.format(options['old_units']['current'])] * unit_tables.current()[options['old_units']['current']].loc[options['units']['current']]
|
|
||||||
df['Voltage({})'.format(options['old_units']['voltage'])] = df['Voltage({})'.format(options['old_units']['voltage'])] * unit_tables.voltage()[options['old_units']['voltage']].loc[options['units']['voltage']]
|
|
||||||
df['Capacity({})'.format(options['old_units']['capacity'])] = df['Capacity({})'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']]
|
|
||||||
df['Energy({})'.format(options['old_units']['energy'])] = df['Energy({})'.format(options['old_units']['energy'])] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']]
|
|
||||||
df['CycleTime({})'.format(options['units']['time'])] = df.apply(lambda row : convert_time_string(row[relative_time], unit=options['units']['time']), axis=1)
|
|
||||||
df['RunTime({})'.format(options['units']['time'])] = df.apply(lambda row : convert_datetime_string(row[real_time], reference=df[real_time].iloc[0], ref_time=df[f'CycleTime({options["units"]["time"]})'].iloc[0], unit=options['units']['time']), axis=1)
|
|
||||||
columns = ['status', 'jump', 'cycle', 'steps', 'current', 'voltage', 'capacity', 'energy']
|
|
||||||
|
|
||||||
if 'SpecificCapacity({}/mg)'.format(options['old_units']['capacity']) in df.columns:
|
|
||||||
df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] = df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
|
|
||||||
columns.append('specific_capacity')
|
|
||||||
|
|
||||||
if f'SpecificEnergy({options["old_units"]["energy"]}/mg)' in df.columns:
|
|
||||||
df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] = df[f'SpecificEnergy({options["old_units"]["energy"]}/mg)'] * unit_tables.energy()[options['old_units']['energy']].loc[options['units']['energy']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
|
|
||||||
columns.append('specific_energy')
|
|
||||||
|
|
||||||
|
|
||||||
if 'IonsExtracted' in df.columns:
|
|
||||||
columns.append('ions')
|
|
||||||
|
|
||||||
columns.append('cycle_time')
|
|
||||||
columns.append('time')
|
|
||||||
|
|
||||||
|
|
||||||
droplist = [record_number, relative_time, real_time]
|
|
||||||
|
|
||||||
for drop in droplist:
|
|
||||||
if drop in df.columns:
|
|
||||||
df.drop(drop, axis=1, inplace=True)
|
|
||||||
|
|
||||||
df.columns = columns
|
|
||||||
|
|
||||||
if options['kind'] == 'biologic':
|
|
||||||
for column in df.columns:
|
|
||||||
if 'time' in column:
|
|
||||||
df['time/{}'.format(options['old_units']['time'])] = df["time/{}".format(options['old_units']["time"])] * unit_tables.time()[options['old_units']["time"]].loc[options['units']['time']]
|
|
||||||
|
|
||||||
if 'Ewe' in column:
|
|
||||||
df["Ewe/{}".format(options['old_units']["voltage"])] = df["Ewe/{}".format(options['old_units']["voltage"])] * unit_tables.voltage()[options['old_units']["voltage"]].loc[options['units']['voltage']]
|
|
||||||
|
|
||||||
if '<I>' in column:
|
|
||||||
df["<I>/{}".format(options['old_units']["current"])] = df["<I>/{}".format(options['old_units']["current"])] * unit_tables.current()[options['old_units']["current"]].loc[options['units']['current']]
|
|
||||||
|
|
||||||
if 'Capacity' in column:
|
|
||||||
capacity = options['old_units']['capacity'].split('h')[0] + '.h'
|
|
||||||
df["Capacity/{}".format(capacity)] = df["Capacity/{}".format(capacity)] * (unit_tables.capacity()[options['old_units']["capacity"]].loc[options['units']["capacity"]])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
columns = [
|
|
||||||
'status_change', 'status', 'time', 'voltage', 'energy_charge', 'energy_discharge', 'current', 'capacity', 'cycle'] if options['mode'] == 'GC' else [ # GC headers
|
|
||||||
'status', 'time', 'control_voltage', 'voltage', 'current', 'cycle', 'charge', 'power' # CV headers
|
|
||||||
]
|
|
||||||
|
|
||||||
if options['mode'] == 'GC':
|
|
||||||
if 'SpecificCapacity({}/mg)'.format(options['old_units']['capacity']) in df.columns:
|
|
||||||
df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] = df['SpecificCapacity({}/mg)'.format(options['old_units']['capacity'])] * unit_tables.capacity()[options['old_units']['capacity']].loc[options['units']['capacity']] / unit_tables.mass()['mg'].loc[options['units']["mass"]]
|
|
||||||
columns.append('specific_capacity')
|
|
||||||
|
|
||||||
if 'IonsExtracted' in df.columns:
|
|
||||||
columns.append('ions')
|
|
||||||
|
|
||||||
df.columns = columns
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def set_units(options: dict) -> None:
|
|
||||||
|
|
||||||
# Complete the list of units - if not all are passed, then default value will be used
|
|
||||||
required_units = ['time', 'current', 'voltage', 'capacity', 'mass', 'energy', 'specific_capacity']
|
|
||||||
|
|
||||||
default_units = {
|
|
||||||
'time': 'h',
|
|
||||||
'current': 'mA',
|
|
||||||
'voltage': 'V',
|
|
||||||
'capacity': 'mAh',
|
|
||||||
'mass': 'g',
|
|
||||||
'energy': 'mWh',
|
|
||||||
'specific_capacity': None}
|
|
||||||
|
|
||||||
if not options['units']:
|
|
||||||
options['units'] = default_units
|
|
||||||
|
|
||||||
|
|
||||||
aux.update_options(options=options['units'], required_options=required_units, default_options=default_units)
|
|
||||||
|
|
||||||
options['units']['specific_capacity'] = r'{} {}'.format(options['units']['capacity'], options['units']['mass']) + '$^{-1}$'
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_old_units(df: pd.DataFrame, options: dict) -> dict:
|
|
||||||
''' Reads a DataFrame with cycling data and determines which units have been used and returns these in a dictionary'''
|
|
||||||
|
|
||||||
if options['kind'] == 'batsmall':
|
|
||||||
|
|
||||||
old_units = {}
|
|
||||||
|
|
||||||
for column in df.columns:
|
|
||||||
if 'TT [' in column:
|
|
||||||
old_units['time'] = column.split()[-1].strip('[]')
|
|
||||||
elif 'U [' in column:
|
|
||||||
old_units['voltage'] = column.split()[-1].strip('[]')
|
|
||||||
elif 'I [' in column:
|
|
||||||
old_units['current'] = column.split()[-1].strip('[]')
|
|
||||||
elif 'C [' in column:
|
|
||||||
old_units['capacity'], old_units['mass'] = column.split()[-1].strip('[]').split('/')
|
|
||||||
|
|
||||||
# time = df.columns[0].split()[-1].strip('[]')
|
|
||||||
# voltage = df.columns[1].split()[-1].strip('[]')
|
|
||||||
# current = df.columns[2].split()[-1].strip('[]')
|
|
||||||
# capacity, mass = df.columns[4].split()[-1].strip('[]').split('/')
|
|
||||||
# old_units = {'time': time, 'current': current, 'voltage': voltage, 'capacity': capacity, 'mass': mass}
|
|
||||||
|
|
||||||
if options['kind']=='neware':
|
|
||||||
|
|
||||||
for column in df.columns:
|
|
||||||
if 'Voltage' in column or 'Start Volt' in column:
|
|
||||||
voltage = column.split('(')[-1].strip(')')
|
|
||||||
elif 'Current' in column or 'Starting current' in column:
|
|
||||||
current = column.split('(')[-1].strip(')')
|
|
||||||
elif 'Capacity' in column:
|
|
||||||
capacity = column.split('(')[-1].strip(')')
|
|
||||||
elif 'Energy' in column or 'Eng' in column:
|
|
||||||
energy = column.split('(')[-1].strip(')')
|
|
||||||
|
|
||||||
old_units = {'voltage': voltage, 'current': current, 'capacity': capacity, 'energy': energy}
|
|
||||||
|
|
||||||
|
|
||||||
if options['kind'] == 'biologic':
|
|
||||||
|
|
||||||
old_units = {}
|
|
||||||
for column in df.columns:
|
|
||||||
if 'time' in column:
|
|
||||||
old_units['time'] = column.split('/')[-1]
|
|
||||||
elif 'Ewe' in column:
|
|
||||||
old_units['voltage'] = column.split('/')[-1]
|
|
||||||
elif 'Capacity' in column:
|
|
||||||
old_units['capacity'] = column.split('/')[-1].replace('.', '')
|
|
||||||
elif 'Energy' in column:
|
|
||||||
old_units['energy'] = column.split('/')[-1].replace('.', '')
|
|
||||||
elif '<I>' in column:
|
|
||||||
old_units['current'] = column.split('/')[-1]
|
|
||||||
|
|
||||||
return old_units
|
|
||||||
|
|
||||||
def convert_time_string(time_string, unit='ms'):
|
|
||||||
''' Convert time string from Neware-data with the format hh:mm:ss.xx to any given unit'''
|
|
||||||
|
|
||||||
h, m, s = time_string.split(':')
|
|
||||||
ms = float(s)*1000 + int(m)*1000*60 + int(h)*1000*60*60
|
|
||||||
|
|
||||||
factors = {'ms': 1, 's': 1/1000, 'min': 1/(1000*60), 'h': 1/(1000*60*60)}
|
|
||||||
|
|
||||||
t = ms*factors[unit]
|
|
||||||
|
|
||||||
return t
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def convert_datetime_string(datetime_string, reference, ref_time, unit='s'):
|
|
||||||
''' Convert time string from Neware-data with the format yyy-mm-dd hh:mm:ss to any given unit'''
|
|
||||||
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
# Parse the
|
|
||||||
current_date, current_time = datetime_string.split()
|
|
||||||
current_year, current_month, current_day = current_date.split('-')
|
|
||||||
current_hour, current_minute, current_second = current_time.split(':')
|
|
||||||
current_date = datetime(int(current_year), int(current_month), int(current_day), int(current_hour), int(current_minute), int(current_second))
|
|
||||||
|
|
||||||
reference_date, reference_time = reference.split()
|
|
||||||
reference_year, reference_month, reference_day = reference_date.split('-')
|
|
||||||
reference_hour, reference_minute, reference_second = reference_time.split(':')
|
|
||||||
reference_date = datetime(int(reference_year), int(reference_month), int(reference_day), int(reference_hour), int(reference_minute), int(reference_second))
|
|
||||||
|
|
||||||
days = current_date - reference_date
|
|
||||||
|
|
||||||
|
|
||||||
s = days.days*24*60*60 + days.seconds
|
|
||||||
|
|
||||||
factors = {'ms': 1000, 's': 1, 'min': 1/(60), 'h': 1/(60*60)}
|
|
||||||
|
|
||||||
time = s * factors[unit] + ref_time
|
|
||||||
|
|
||||||
return time
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,747 +0,0 @@
|
||||||
from pickle import MARK
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import math
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
import ipywidgets as widgets
|
|
||||||
from IPython.display import display
|
|
||||||
|
|
||||||
import nafuma.electrochemistry as ec
|
|
||||||
import nafuma.plotting as btp
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def plot_gc(data, options=None):
|
|
||||||
|
|
||||||
|
|
||||||
# Update options
|
|
||||||
default_options = {
|
|
||||||
'force_reload': False,
|
|
||||||
'x_vals': 'capacity', 'y_vals': 'voltage',
|
|
||||||
'which_cycles': 'all',
|
|
||||||
'limit': None, # Limit line to be drawn
|
|
||||||
'exclude_cycles': [],
|
|
||||||
'show_plot': True,
|
|
||||||
'summary': False,
|
|
||||||
'charge': True, 'discharge': True,
|
|
||||||
'colours': None,
|
|
||||||
'markers': None,
|
|
||||||
'differentiate_charge_discharge': True,
|
|
||||||
'gradient': False,
|
|
||||||
'interactive': False,
|
|
||||||
'interactive_session_active': False,
|
|
||||||
'rc_params': {},
|
|
||||||
'format_params': {},
|
|
||||||
'save_gif': False,
|
|
||||||
'save_path': 'animation.gif',
|
|
||||||
'fps': 1,
|
|
||||||
'fig': None, 'ax': None,
|
|
||||||
'edgecolor': plt.rcParams['lines.markeredgecolor'],
|
|
||||||
'plot_every': 1,
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
# Read data if not already loaded
|
|
||||||
if not 'cycles' in data.keys() or options['force_reload']:
|
|
||||||
data['cycles'] = ec.io.read_data(data=data, options=options)
|
|
||||||
|
|
||||||
# Update list of cycles to correct indices
|
|
||||||
update_cycles_list(data=data, options=options)
|
|
||||||
|
|
||||||
if options['interactive']:
|
|
||||||
options['interactive'], options['interactive_session_active'] = False, True
|
|
||||||
plot_gc_interactive(data=data, options=options)
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
colours = generate_colours(options=options)
|
|
||||||
markers = generate_markers(options=options)
|
|
||||||
|
|
||||||
if not options['summary']:
|
|
||||||
|
|
||||||
if options['show_plot']:
|
|
||||||
# Prepare plot
|
|
||||||
|
|
||||||
if not options['fig'] and not options['ax']:
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
else:
|
|
||||||
fig, ax = options['fig'], options['ax']
|
|
||||||
|
|
||||||
|
|
||||||
for i, cycle in enumerate(options['which_cycles']):
|
|
||||||
if options['charge']:
|
|
||||||
data['cycles'][cycle][0].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][0])
|
|
||||||
|
|
||||||
if options['discharge']:
|
|
||||||
data['cycles'][cycle][1].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][1])
|
|
||||||
|
|
||||||
|
|
||||||
if options['interactive_session_active']:
|
|
||||||
update_labels(options, force=True)
|
|
||||||
else:
|
|
||||||
update_labels(options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['save_gif'] and not options['interactive_session_active']:
|
|
||||||
if not os.path.isdir('tmp'):
|
|
||||||
os.makedirs('tmp')
|
|
||||||
|
|
||||||
# Scale image to make GIF smaller
|
|
||||||
options['format_params']['width'] = 7.5
|
|
||||||
options['format_params']['height'] = 3
|
|
||||||
|
|
||||||
options['format_params']['dpi'] = 200
|
|
||||||
|
|
||||||
for i, cycle in enumerate(data['cycles']):
|
|
||||||
if i in options['which_cycles']:
|
|
||||||
|
|
||||||
giffig, gifax = btp.prepare_plot(options=options)
|
|
||||||
|
|
||||||
if options['charge']:
|
|
||||||
cycle[0].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][0])
|
|
||||||
if options['discharge']:
|
|
||||||
cycle[1].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][1])
|
|
||||||
|
|
||||||
gifax.text(x=gifax.get_xlim()[1]*0.8, y=3, s=f'{i+1}')
|
|
||||||
update_labels(options)
|
|
||||||
|
|
||||||
giffig, gifax = btp.adjust_plot(fig=giffig, ax=gifax, options=options)
|
|
||||||
|
|
||||||
plt.savefig(os.path.join('tmp', str(i+1).zfill(4)+'.png'))
|
|
||||||
plt.close()
|
|
||||||
|
|
||||||
|
|
||||||
img_paths = [os.path.join('tmp', path) for path in os.listdir('tmp') if path.endswith('png')]
|
|
||||||
frames = []
|
|
||||||
for path in img_paths:
|
|
||||||
frame = Image.open(path)
|
|
||||||
frames.append(frame)
|
|
||||||
|
|
||||||
frames[0].save(options['save_path'], format='GIF', append_images=frames[1:], save_all=True, duration=(1/options['fps'])*1000, loop=0)
|
|
||||||
|
|
||||||
shutil.rmtree('tmp')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
elif options['summary'] and options['show_plot']:
|
|
||||||
# Prepare plot
|
|
||||||
if not options['fig'] and not options['ax']:
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
else:
|
|
||||||
fig, ax = options['fig'], options['ax']
|
|
||||||
|
|
||||||
mask = []
|
|
||||||
for i in range(data['cycles'].shape[0]):
|
|
||||||
if i+1 in options['which_cycles']:
|
|
||||||
mask.append(True)
|
|
||||||
else:
|
|
||||||
mask.append(False)
|
|
||||||
|
|
||||||
|
|
||||||
# Drop the last row if it is midway through a charge in order to avoid mismatch of length of mask and dataset.
|
|
||||||
if len(mask) > data['cycles'].shape[0]:
|
|
||||||
del mask[-1]
|
|
||||||
data['cycles'].drop(data['cycles'].tail(1).index, inplace=True)
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME To begin, the default is that y-values correspond to x-values. This should probably be implemented in more logical and consistent manner in the future.
|
|
||||||
if options['x_vals'] in ['coulombic_efficiency', 'energy_efficiency']:
|
|
||||||
data['cycles'].loc[mask].plot(x='cycle', y=options['x_vals'], ax=ax, color=colours[0][1], kind='scatter', s=plt.rcParams['lines.markersize']*10, marker=markers[0], edgecolor=plt.rcParams['lines.markeredgecolor'])
|
|
||||||
if options['limit']:
|
|
||||||
ax.axhline(y=options['limit'], ls='--', c='black')
|
|
||||||
|
|
||||||
else:
|
|
||||||
if options['charge']:
|
|
||||||
yval = 'charge_' + options['x_vals']
|
|
||||||
data['cycles'].loc[mask].plot(x='cycle', y=yval, ax=ax, color=colours[0][0], kind='scatter', s=plt.rcParams['lines.markersize']*10, marker=markers[0], edgecolor=plt.rcParams['lines.markeredgecolor'])
|
|
||||||
|
|
||||||
if options['discharge']:
|
|
||||||
yval = 'discharge_' + options['x_vals']
|
|
||||||
data['cycles'].loc[mask].plot(x='cycle', y=yval, ax=ax, color=colours[0][1], kind='scatter', s=plt.rcParams['lines.markersize']*10, marker=markers[1], edgecolor=plt.rcParams['lines.markeredgecolor'])
|
|
||||||
|
|
||||||
|
|
||||||
if options['limit']:
|
|
||||||
ax.axhline(y=options['limit'], ls='--', c='black')
|
|
||||||
|
|
||||||
|
|
||||||
if options['interactive_session_active']:
|
|
||||||
update_labels(options, force=True)
|
|
||||||
else:
|
|
||||||
update_labels(options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['show_plot']:
|
|
||||||
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
|
|
||||||
return data['cycles'], fig, ax
|
|
||||||
else:
|
|
||||||
return data['cycles'], None, None
|
|
||||||
|
|
||||||
|
|
||||||
def plot_gc_interactive(data, options):
|
|
||||||
|
|
||||||
w = widgets.interactive(btp.ipywidgets_update, func=widgets.fixed(plot_gc), data=widgets.fixed(data), options=widgets.fixed(options),
|
|
||||||
charge=widgets.ToggleButton(value=True),
|
|
||||||
discharge=widgets.ToggleButton(value=True),
|
|
||||||
x_vals=widgets.Dropdown(options=['specific_capacity', 'capacity', 'ions', 'voltage', 'time', 'energy'], value='specific_capacity', description='X-values')
|
|
||||||
)
|
|
||||||
|
|
||||||
options['widget'] = w
|
|
||||||
|
|
||||||
display(w)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def plot_cv(data, options):
|
|
||||||
|
|
||||||
# Update options
|
|
||||||
default_options = {
|
|
||||||
'force_reload': False,
|
|
||||||
'x_vals': 'voltage', 'y_vals': 'current',
|
|
||||||
'which_cycles': 'all',
|
|
||||||
'limit': None, # Limit line to be drawn
|
|
||||||
'exclude_cycles': [],
|
|
||||||
'show_plot': True,
|
|
||||||
'charge': True, 'discharge': True,
|
|
||||||
'colours': None,
|
|
||||||
'differentiate_charge_discharge': True,
|
|
||||||
'gradient': False,
|
|
||||||
'interactive': False,
|
|
||||||
'interactive_session_active': False,
|
|
||||||
'rc_params': {},
|
|
||||||
'format_params': {},
|
|
||||||
'save_gif': False,
|
|
||||||
'save_path': 'animation.gif',
|
|
||||||
'fps': 1,
|
|
||||||
'plot_every': 1,
|
|
||||||
'fig': None,
|
|
||||||
'ax': None
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
# Read data if not already loaded
|
|
||||||
if not 'cycles' in data.keys() or options['force_reload']:
|
|
||||||
data['cycles'] = ec.io.read_data(data=data, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
# Update list of cycles to correct indices
|
|
||||||
update_cycles_list(data=data, options=options)
|
|
||||||
|
|
||||||
colours = generate_colours(options=options)
|
|
||||||
|
|
||||||
if options['show_plot']:
|
|
||||||
# Prepare plot
|
|
||||||
if not options['fig'] and not options['ax']:
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
else:
|
|
||||||
fig, ax = options['fig'], options['ax']
|
|
||||||
|
|
||||||
for i, cycle in enumerate(options['which_cycles']):
|
|
||||||
if options['charge']:
|
|
||||||
data['cycles'][cycle][0].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][0])
|
|
||||||
|
|
||||||
if options['discharge']:
|
|
||||||
data['cycles'][cycle][1].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][1])
|
|
||||||
|
|
||||||
# for i, cycle in enumerate(data['cycles']):
|
|
||||||
# if i in options['which_cycles']:
|
|
||||||
# if options['charge']:
|
|
||||||
# cycle[0].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][0])
|
|
||||||
|
|
||||||
# if options['discharge']:
|
|
||||||
# cycle[1].plot(x=options['x_vals'], y=options['y_vals'], ax=ax, c=colours[i][1])
|
|
||||||
|
|
||||||
update_labels(options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['save_gif'] and not options['interactive_session_active']:
|
|
||||||
if not os.path.isdir('tmp'):
|
|
||||||
os.makedirs('tmp')
|
|
||||||
|
|
||||||
# Scale image to make GIF smaller
|
|
||||||
options['format_params']['width'] = 7.5
|
|
||||||
options['format_params']['height'] = 3
|
|
||||||
|
|
||||||
options['format_params']['dpi'] = 200
|
|
||||||
|
|
||||||
|
|
||||||
for i, cycle in enumerate(data['cycles']):
|
|
||||||
if i in options['which_cycles']:
|
|
||||||
|
|
||||||
giffig, gifax = btp.prepare_plot(options=options)
|
|
||||||
|
|
||||||
if options['charge']:
|
|
||||||
cycle[0].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][0])
|
|
||||||
if options['discharge']:
|
|
||||||
cycle[1].plot(x=options['x_vals'], y=options['y_vals'], ax=gifax, c=colours[i][1])
|
|
||||||
|
|
||||||
gifax.text(x=gifax.get_xlim()[1]*0.8, y=3, s=f'{i+1}')
|
|
||||||
update_labels(options)
|
|
||||||
|
|
||||||
giffig, gifax = btp.adjust_plot(fig=giffig, ax=gifax, options=options)
|
|
||||||
|
|
||||||
plt.savefig(os.path.join('tmp', str(i+1).zfill(4)+'.png'))
|
|
||||||
plt.close()
|
|
||||||
|
|
||||||
|
|
||||||
img_paths = [os.path.join('tmp', path) for path in os.listdir('tmp') if path.endswith('png')]
|
|
||||||
frames = []
|
|
||||||
for path in img_paths:
|
|
||||||
frame = Image.open(path)
|
|
||||||
frames.append(frame)
|
|
||||||
|
|
||||||
frames[0].save(options['save_path'], format='GIF', append_images=frames[1:], save_all=True, duration=(1/options['fps'])*1000, loop=0)
|
|
||||||
|
|
||||||
shutil.rmtree('tmp')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['show_plot']:
|
|
||||||
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
|
|
||||||
return data['cycles'], fig, ax
|
|
||||||
else:
|
|
||||||
return data['cycles'], None, None
|
|
||||||
|
|
||||||
def update_labels(options, force=False):
|
|
||||||
|
|
||||||
if 'xlabel' not in options.keys() or force:
|
|
||||||
options['xlabel'] = options['x_vals'].capitalize().replace('_', ' ')
|
|
||||||
|
|
||||||
if 'ylabel' not in options.keys() or force:
|
|
||||||
options['ylabel'] = options['y_vals'].capitalize().replace('_', ' ')
|
|
||||||
|
|
||||||
|
|
||||||
if 'xunit' not in options.keys() or force:
|
|
||||||
if options['x_vals'] == 'capacity':
|
|
||||||
options['xunit'] = options['units']['capacity']
|
|
||||||
elif options['x_vals'] == 'specific_capacity':
|
|
||||||
options['xunit'] = f"{options['units']['capacity']} {options['units']['mass']}$^{{-1}}$"
|
|
||||||
elif options['x_vals'] == 'time':
|
|
||||||
options['xunit'] = options['units']['time']
|
|
||||||
elif options['x_vals'] == 'ions':
|
|
||||||
options['xunit'] = None
|
|
||||||
|
|
||||||
|
|
||||||
if 'yunit' not in options.keys() or force:
|
|
||||||
if options['y_vals'] == 'voltage':
|
|
||||||
options['yunit'] = options['units']['voltage']
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def update_cycles_list(data, options: dict) -> None:
|
|
||||||
|
|
||||||
if options['which_cycles'] == 'all':
|
|
||||||
options['which_cycles'] = [i for i in range(len(data['cycles']))]
|
|
||||||
|
|
||||||
|
|
||||||
elif isinstance(options['which_cycles'], list):
|
|
||||||
|
|
||||||
cycles =[]
|
|
||||||
|
|
||||||
for cycle in options['which_cycles']:
|
|
||||||
if isinstance(cycle, int):
|
|
||||||
cycles.append(cycle-1)
|
|
||||||
|
|
||||||
elif isinstance(cycle, tuple):
|
|
||||||
interval = [i-1 for i in range(cycle[0], cycle[1]+1)]
|
|
||||||
cycles.extend(interval)
|
|
||||||
|
|
||||||
|
|
||||||
options['which_cycles'] = cycles
|
|
||||||
|
|
||||||
|
|
||||||
# Tuple is used to define an interval - as elements tuples can't be assigned, I convert it to a list here.
|
|
||||||
elif isinstance(options['which_cycles'], tuple):
|
|
||||||
which_cycles = list(options['which_cycles'])
|
|
||||||
|
|
||||||
if which_cycles[0] <= 0:
|
|
||||||
which_cycles[0] = 1
|
|
||||||
|
|
||||||
elif which_cycles[1] < 0:
|
|
||||||
which_cycles[1] = len(options['which_cycles'])
|
|
||||||
|
|
||||||
|
|
||||||
options['which_cycles'] = [i-1 for i in range(which_cycles[0], which_cycles[1]+1)]
|
|
||||||
|
|
||||||
for i, cycle in enumerate(options['which_cycles']):
|
|
||||||
if cycle in options['exclude_cycles']:
|
|
||||||
del options['which_cycles'][i]
|
|
||||||
|
|
||||||
|
|
||||||
options['which_cycles'] = options['which_cycles'][::options['plot_every']]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def prettify_gc_plot(fig, ax, options=None):
|
|
||||||
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
######################### UPDATE OPTIONS #########################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
# Define the required options
|
|
||||||
required_options = [
|
|
||||||
'columns',
|
|
||||||
'xticks', 'yticks',
|
|
||||||
'show_major_ticks', 'show_minor_ticks',
|
|
||||||
'xlim', 'ylim',
|
|
||||||
'hide_x_axis', 'hide_y_axis',
|
|
||||||
'positions',
|
|
||||||
'x_vals', 'y_vals',
|
|
||||||
'xlabel', 'ylabel',
|
|
||||||
'units', 'sizes',
|
|
||||||
'title'
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# Define the default options
|
|
||||||
default_options = {
|
|
||||||
'columns': 1,
|
|
||||||
'xticks': None, 'yticks': None,
|
|
||||||
'show_major_ticks': [True, True, True, True], 'show_minor_ticks': [True, True, True, True],
|
|
||||||
'xlim': None,'ylim': None,
|
|
||||||
'hide_x_axis': False, 'hide_y_axis': False,
|
|
||||||
'positions': {'xaxis': 'bottom', 'yaxis': 'left'},
|
|
||||||
'x_vals': 'specific_capacity', 'y_vals': 'voltage',
|
|
||||||
'xlabel': None, 'ylabel': None,
|
|
||||||
'units': {'capacity': 'mAh', 'specific_capacity': r'mAh g$^{-1}$', 'time': 's', 'current': 'mA', 'energy': 'mWh', 'mass': 'g', 'voltage': 'V'},
|
|
||||||
'sizes': None,
|
|
||||||
'title': None
|
|
||||||
}
|
|
||||||
|
|
||||||
aux.update_options(options, required_options, default_options)
|
|
||||||
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
########################## DEFINE SIZES ##########################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
# Define the required sizes
|
|
||||||
required_sizes = [
|
|
||||||
'labels',
|
|
||||||
'legend',
|
|
||||||
'title',
|
|
||||||
'line', 'axes',
|
|
||||||
'tick_labels',
|
|
||||||
'major_ticks', 'minor_ticks']
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Define default sizes
|
|
||||||
default_sizes = {
|
|
||||||
'labels': 30*options['columns'],
|
|
||||||
'legend': 30*options['columns'],
|
|
||||||
'title': 30*options['columns'],
|
|
||||||
'line': 3*options['columns'],
|
|
||||||
'axes': 3*options['columns'],
|
|
||||||
'tick_labels': 30*options['columns'],
|
|
||||||
'major_ticks': 20*options['columns'],
|
|
||||||
'minor_ticks': 10*options['columns']
|
|
||||||
}
|
|
||||||
|
|
||||||
# Initialise dictionary if it doesn't exist
|
|
||||||
if not options['sizes']:
|
|
||||||
options['sizes'] = {}
|
|
||||||
|
|
||||||
|
|
||||||
# Update dictionary with default values where none is supplied
|
|
||||||
for size in required_sizes:
|
|
||||||
if size not in options['sizes']:
|
|
||||||
options['sizes'][size] = default_sizes[size]
|
|
||||||
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
########################## AXIS LABELS ###########################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
|
|
||||||
if not options['xlabel']:
|
|
||||||
options['xlabel'] = prettify_labels(options['x_vals']) + ' [{}]'.format(options['units'][options['x_vals']])
|
|
||||||
|
|
||||||
else:
|
|
||||||
options['xlabel'] = options['xlabel'] + ' [{}]'.format(options['units'][options['x_vals']])
|
|
||||||
|
|
||||||
|
|
||||||
if not options['ylabel']:
|
|
||||||
options['ylabel'] = prettify_labels(options['y_vals']) + ' [{}]'.format(options['units'][options['y_vals']])
|
|
||||||
|
|
||||||
else:
|
|
||||||
options['ylabel'] = options['ylabel'] + ' [{}]'.format(options['units'][options['y_vals']])
|
|
||||||
|
|
||||||
ax.set_xlabel(options['xlabel'], size=options['sizes']['labels'])
|
|
||||||
ax.set_ylabel(options['ylabel'], size=options['sizes']['labels'])
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
###################### TICK MARKS & LABELS #######################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
ax.tick_params(direction='in', which='major', bottom=options['show_major_ticks'][0], left=options['show_major_ticks'][1], top=options['show_major_ticks'][2], right=options['show_major_ticks'][0], length=options['sizes']['major_ticks'], width=options['sizes']['axes'])
|
|
||||||
ax.tick_params(direction='in', which='minor', bottom=options['show_minor_ticks'][0], left=options['show_minor_ticks'][1], top=options['show_minor_ticks'][2], right=options['show_minor_ticks'][0], length=options['sizes']['minor_ticks'], width=options['sizes']['axes'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# DEFINE AND SET TICK DISTANCES
|
|
||||||
|
|
||||||
from . import unit_tables
|
|
||||||
|
|
||||||
# Define default ticks and scale to desired units
|
|
||||||
default_ticks = {
|
|
||||||
'specific_capacity': [100 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']] / unit_tables.mass()['g'].loc[options['units']['mass']]), 50 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']] / unit_tables.mass()['g'].loc[options['units']['mass']])],
|
|
||||||
'capacity': [0.1 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']]), 0.05 * (unit_tables.capacity()['mAh'].loc[options['units']['capacity']])],
|
|
||||||
'voltage': [0.5 * (unit_tables.voltage()['V'].loc[options['units']['voltage']]), 0.25 * (unit_tables.voltage()['V'].loc[options['units']['voltage']])],
|
|
||||||
'time': [10 * (unit_tables.time()['h'].loc[options['units']['time']]), 5 * (unit_tables.time()['h'].loc[options['units']['time']])]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
if options['positions']['yaxis'] == 'right':
|
|
||||||
ax.yaxis.set_label_position("right")
|
|
||||||
ax.yaxis.tick_right()
|
|
||||||
|
|
||||||
|
|
||||||
# Set default tick distances for x-axis if not specified
|
|
||||||
if not options['xticks']:
|
|
||||||
|
|
||||||
major_xtick = default_ticks[options['x_vals']][0]
|
|
||||||
minor_xtick = default_ticks[options['x_vals']][1]
|
|
||||||
|
|
||||||
# Otherwise apply user input
|
|
||||||
else:
|
|
||||||
major_xtick = options['xticks'][0]
|
|
||||||
minor_xtick = options['xticks'][1]
|
|
||||||
|
|
||||||
|
|
||||||
# Set default tick distances for x-axis if not specified
|
|
||||||
if not options['yticks']:
|
|
||||||
|
|
||||||
major_ytick = default_ticks[options['y_vals']][0]
|
|
||||||
minor_ytick = default_ticks[options['y_vals']][1]
|
|
||||||
|
|
||||||
# Otherwise apply user input
|
|
||||||
else:
|
|
||||||
major_ytick = options['yticks'][0]
|
|
||||||
minor_ytick = options['yticks'][1]
|
|
||||||
|
|
||||||
|
|
||||||
# Apply values
|
|
||||||
ax.xaxis.set_major_locator(MultipleLocator(major_xtick))
|
|
||||||
ax.xaxis.set_minor_locator(MultipleLocator(minor_xtick))
|
|
||||||
|
|
||||||
ax.yaxis.set_major_locator(MultipleLocator(major_ytick))
|
|
||||||
ax.yaxis.set_minor_locator(MultipleLocator(minor_ytick))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# SET FONTSIZE OF TICK LABELS
|
|
||||||
|
|
||||||
plt.xticks(fontsize=options['sizes']['tick_labels'])
|
|
||||||
plt.yticks(fontsize=options['sizes']['tick_labels'])
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
########################## AXES LIMITS ###########################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
if options['xlim']:
|
|
||||||
plt.xlim(options['xlim'])
|
|
||||||
|
|
||||||
if options['ylim']:
|
|
||||||
plt.ylim(options['ylim'])
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
############################# TITLE ##############################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
if options['title']:
|
|
||||||
ax.set_title(options['title'], size=options['sizes']['title'])
|
|
||||||
|
|
||||||
##################################################################
|
|
||||||
############################# LEGEND #############################
|
|
||||||
##################################################################
|
|
||||||
|
|
||||||
if ax.get_legend():
|
|
||||||
ax.get_legend().remove()
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
def prettify_labels(label):
|
|
||||||
|
|
||||||
labels_dict = {
|
|
||||||
'capacity': 'Capacity',
|
|
||||||
'specific_capacity': 'Specific capacity',
|
|
||||||
'voltage': 'Voltage',
|
|
||||||
'current': 'Current',
|
|
||||||
'energy': 'Energy',
|
|
||||||
'time': 'Time'
|
|
||||||
}
|
|
||||||
|
|
||||||
return labels_dict[label]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def generate_colours(options):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'gradient_colours': None,
|
|
||||||
}
|
|
||||||
|
|
||||||
aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
# Assign colours from the options dictionary if it is defined, otherwise use standard colours.
|
|
||||||
if options['colours']:
|
|
||||||
charge_colour = options['colours'][0]
|
|
||||||
discharge_colour = options['colours'][1]
|
|
||||||
|
|
||||||
if isinstance(charge_colour, tuple):
|
|
||||||
charge_colour = list(charge_colour)
|
|
||||||
if isinstance(discharge_colour, tuple):
|
|
||||||
discharge_colour = list(discharge_colour)
|
|
||||||
|
|
||||||
else:
|
|
||||||
charge_colour = [(40/255, 70/255, 75/255)] # Dark Slate Gray #28464B, coolors.co
|
|
||||||
discharge_colour = [(239/255, 160/255, 11/255)] # Marigold #EFA00B, coolors.co
|
|
||||||
|
|
||||||
if not options['differentiate_charge_discharge']:
|
|
||||||
discharge_colour = charge_colour
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# If gradient is enabled, find start and end points for each colour
|
|
||||||
if options['gradient']:
|
|
||||||
|
|
||||||
if not options['gradient_colours']:
|
|
||||||
|
|
||||||
options['gradient_colours'] = [[None, None], [None, None]]
|
|
||||||
|
|
||||||
add_charge = min([(1-x)*0.75 for x in charge_colour])
|
|
||||||
add_discharge = min([(1-x)*0.75 for x in discharge_colour])
|
|
||||||
|
|
||||||
options['gradient_colours'][0][0] = charge_colour
|
|
||||||
options['gradient_colours'][0][1] = [x+add_charge for x in charge_colour]
|
|
||||||
|
|
||||||
options['gradient_colours'][1][0] = discharge_colour
|
|
||||||
options['gradient_colours'][1][1] = [x+add_discharge for x in discharge_colour]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Generate lists of colours
|
|
||||||
colours = []
|
|
||||||
|
|
||||||
if len(charge_colour) != len(options['which_cycles']):
|
|
||||||
if options['gradient']:
|
|
||||||
options['number_of_colours'] = len(options['which_cycles'])
|
|
||||||
|
|
||||||
charge_colours = btp.mix_colours(colour1=options['gradient_colours'][0][0], colour2=options['gradient_colours'][0][1], options=options)
|
|
||||||
discharge_colours = btp.mix_colours(colour1=options['gradient_colours'][1][0], colour2=options['gradient_colours'][1][1], options=options)
|
|
||||||
|
|
||||||
for chg, dchg in zip(charge_colours, discharge_colours):
|
|
||||||
colours.append([chg, dchg])
|
|
||||||
|
|
||||||
else:
|
|
||||||
for i in options['which_cycles']:
|
|
||||||
colours.append([charge_colour, discharge_colour])
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
for chg, dchg in zip(charge_colour, discharge_colour):
|
|
||||||
colours.append([chg, dchg])
|
|
||||||
|
|
||||||
|
|
||||||
return colours
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def generate_markers(options):
|
|
||||||
|
|
||||||
if not options['markers']:
|
|
||||||
markers = ['o', 'v']
|
|
||||||
|
|
||||||
else:
|
|
||||||
markers = [options['markers'][0], options['markers'][1]]
|
|
||||||
|
|
||||||
return markers
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_tickmarks(df: pd.DataFrame, ticks: list, value: str, exclude=None):
|
|
||||||
|
|
||||||
|
|
||||||
min_val = df[value].min()
|
|
||||||
max_val = df[value].max()
|
|
||||||
|
|
||||||
|
|
||||||
# Get major ticks
|
|
||||||
major_ticks = [np.round((min_val + ticks[0]*i),2) for i in range(int(np.floor((max_val-min_val)/ticks[0]))+1)]
|
|
||||||
major_ticks.append(np.round(max_val, 2))
|
|
||||||
|
|
||||||
major_ticks = aux.get_unique(major_ticks)
|
|
||||||
|
|
||||||
major_ticklabels = [i*ticks[0] for i in range(len(major_ticks)-1)]
|
|
||||||
major_ticklabels.append(np.round((max_val-min_val),1))
|
|
||||||
|
|
||||||
if exclude:
|
|
||||||
for i, tick in enumerate(major_ticklabels):
|
|
||||||
if tick in exclude:
|
|
||||||
del major_ticks[i]
|
|
||||||
del major_ticklabels[i]
|
|
||||||
|
|
||||||
|
|
||||||
# Get minor ticks
|
|
||||||
minor_ticks = [np.round((min_val + ticks[1]*i),2) for i in range(int(np.floor((max_val-min_val)/ticks[1]))+1) if np.round((min_val + ticks[1]*i),2) not in major_ticks]
|
|
||||||
minor_ticklabels = [np.round(tick - min_val, 2) for tick in minor_ticks]
|
|
||||||
|
|
||||||
return major_ticks, major_ticklabels, minor_ticks, minor_ticklabels
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def assign_tickmarks(dfs: list, options, fig, ax, exclude=None):
|
|
||||||
|
|
||||||
major_ticks, major_ticklabels, minor_ticks = [], [], []
|
|
||||||
|
|
||||||
if not exclude:
|
|
||||||
exclude = [[None, None] for i in range(len(options['which_cycles']))]
|
|
||||||
|
|
||||||
for i, cycle in enumerate(options['which_cycles']):
|
|
||||||
#Get ticks from charge cycle
|
|
||||||
major_tick, major_ticklabel, minor_tick, minor_ticklabel = ec.plot.get_tickmarks(dfs[cycle][0], ticks=options['x_tick_locators'], value=options['x_vals'], exclude=exclude[i][0])
|
|
||||||
major_ticks += major_tick
|
|
||||||
major_ticklabels += major_ticklabel
|
|
||||||
minor_ticks += minor_tick
|
|
||||||
|
|
||||||
# Get ticks from discharge cycle
|
|
||||||
major_tick, major_ticklabel, minor_tick, minor_ticklabel = ec.plot.get_tickmarks(dfs[cycle][1], ticks=[1, 0.25], value='ions', exclude=exclude[i][1])
|
|
||||||
major_ticks += major_tick
|
|
||||||
major_ticklabels += major_ticklabel
|
|
||||||
minor_ticks += minor_tick
|
|
||||||
|
|
||||||
|
|
||||||
ax.set_xticks(major_ticks, minor=False)
|
|
||||||
ax.set_xticklabels(major_ticklabels)
|
|
||||||
ax.set_xticks(minor_ticks, minor=True)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
@ -1,53 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
def time():
|
|
||||||
# Define matrix for unit conversion for time
|
|
||||||
time = {'h': [1, 60, 3600, 3600000], 'min': [1/60, 1, 60, 60000], 's': [1/3600, 1/60, 1, 1000], 'ms': [1/3600000, 1/60000, 1/1000, 1]}
|
|
||||||
time = pd.DataFrame(time)
|
|
||||||
time.index = ['h', 'min', 's', 'ms']
|
|
||||||
|
|
||||||
return time
|
|
||||||
|
|
||||||
def current():
|
|
||||||
# Define matrix for unit conversion for current
|
|
||||||
current = {'A': [1, 1000, 1000000], 'mA': [1/1000, 1, 1000], 'uA': [1/1000000, 1/1000, 1]}
|
|
||||||
current = pd.DataFrame(current)
|
|
||||||
current.index = ['A', 'mA', 'uA']
|
|
||||||
|
|
||||||
return current
|
|
||||||
|
|
||||||
def voltage():
|
|
||||||
# Define matrix for unit conversion for voltage
|
|
||||||
voltage = {'V': [1, 1000, 1000000], 'mV': [1/1000, 1, 1000], 'uV': [1/1000000, 1/1000, 1]}
|
|
||||||
voltage = pd.DataFrame(voltage)
|
|
||||||
voltage.index = ['V', 'mV', 'uV']
|
|
||||||
|
|
||||||
return voltage
|
|
||||||
|
|
||||||
def capacity():
|
|
||||||
# Define matrix for unit conversion for capacity
|
|
||||||
capacity = {'Ah': [1, 1000, 1000000], 'mAh': [1/1000, 1, 1000], 'uAh': [1/1000000, 1/1000, 1]}
|
|
||||||
capacity = pd.DataFrame(capacity)
|
|
||||||
capacity.index = ['Ah', 'mAh', 'uAh']
|
|
||||||
|
|
||||||
return capacity
|
|
||||||
|
|
||||||
def mass():
|
|
||||||
# Define matrix for unit conversion for capacity
|
|
||||||
mass = {'kg': [1, 1000, 1000000, 1000000000], 'g': [1/1000, 1, 1000, 1000000], 'mg': [1/1000000, 1/1000, 1, 1000], 'ug': [1/1000000000, 1/1000000, 1/1000, 1]}
|
|
||||||
mass = pd.DataFrame(mass)
|
|
||||||
mass.index = ['kg', 'g', 'mg', 'ug']
|
|
||||||
|
|
||||||
return mass
|
|
||||||
|
|
||||||
|
|
||||||
def energy():
|
|
||||||
|
|
||||||
energy = {'kWh': [1, 1000, 1000000], 'Wh': [1/1000, 1, 1000], 'mWh': [1/100000, 1/1000, 1]}
|
|
||||||
energy = pd.DataFrame(energy)
|
|
||||||
energy.index = ['kWh', 'Wh', 'mWh']
|
|
||||||
|
|
||||||
return energy
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,520 +0,0 @@
|
||||||
import nafuma.auxillary as aux
|
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from matplotlib.ticker import (MultipleLocator)
|
|
||||||
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition, BboxPatch, BboxConnector)
|
|
||||||
|
|
||||||
from matplotlib.transforms import TransformedBbox
|
|
||||||
from matplotlib.patches import Rectangle
|
|
||||||
|
|
||||||
import importlib
|
|
||||||
import matplotlib.patches as mpatches
|
|
||||||
from matplotlib.lines import Line2D
|
|
||||||
import matplotlib.lines as mlines
|
|
||||||
import matplotlib.markers as mmarkers
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
from PIL import Image
|
|
||||||
import os
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_plot(options={}):
|
|
||||||
''' A general function to prepare a plot based on contents of options['rc_params'] and options['format_params'].
|
|
||||||
|
|
||||||
rc_params is a dictionary with keyval-pairs corresponding to rcParams in matplotlib, to give the user full control over this. Please consult the matplotlib-documentation
|
|
||||||
|
|
||||||
format_params will determine the size, aspect ratio, resolution etc. of the figure. Should be modified to conform with any requirements from a journal.'''
|
|
||||||
|
|
||||||
if 'rc_params' in options.keys():
|
|
||||||
rc_params = options['rc_params']
|
|
||||||
else:
|
|
||||||
rc_params = {}
|
|
||||||
|
|
||||||
|
|
||||||
if 'format_params' in options.keys():
|
|
||||||
format_params = options['format_params']
|
|
||||||
else:
|
|
||||||
format_params = {}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
default_format_params = {
|
|
||||||
'single_column_width': 8.3,
|
|
||||||
'double_column_width': 17.1,
|
|
||||||
'column_type': 'single',
|
|
||||||
'width_ratio': '1:1',
|
|
||||||
'aspect_ratio': '1:1',
|
|
||||||
'width': None,
|
|
||||||
'height': None,
|
|
||||||
'compress_width': 1,
|
|
||||||
'compress_height': 1,
|
|
||||||
'upscaling_factor': 1.0,
|
|
||||||
'dpi': 600,
|
|
||||||
'nrows': 1,
|
|
||||||
'ncols': 1,
|
|
||||||
'grid_ratio_height': None,
|
|
||||||
'grid_ratio_width': None
|
|
||||||
}
|
|
||||||
|
|
||||||
format_params = aux.update_options(options=format_params, default_options=default_format_params)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Reset run commands
|
|
||||||
plt.rcdefaults()
|
|
||||||
|
|
||||||
# Update run commands if any is passed (will pass an empty dictionary if not passed)
|
|
||||||
update_rc_params(rc_params)
|
|
||||||
|
|
||||||
if not format_params['width']:
|
|
||||||
format_params['width'] = determine_width(format_params=format_params)
|
|
||||||
|
|
||||||
if not format_params['height']:
|
|
||||||
format_params['height'] = determine_height(format_params=format_params, width=format_params['width'])
|
|
||||||
|
|
||||||
format_params['width'], format_params['height'] = scale_figure(format_params=format_params, width=format_params['width'], height=format_params['height'])
|
|
||||||
|
|
||||||
if format_params['nrows'] == 1 and format_params['ncols'] == 1:
|
|
||||||
fig, ax = plt.subplots(figsize=(format_params['width'], format_params['height']), dpi=format_params['dpi'])
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
else:
|
|
||||||
if not format_params['grid_ratio_height']:
|
|
||||||
format_params['grid_ratio_height'] = [1 for i in range(format_params['nrows'])]
|
|
||||||
|
|
||||||
if not format_params['grid_ratio_width']:
|
|
||||||
format_params['grid-ratio_width'] = [1 for i in range(format_params['ncols'])]
|
|
||||||
|
|
||||||
fig, axes = plt.subplots(nrows=format_params['nrows'], ncols=format_params['ncols'], figsize=(format_params['width'],format_params['height']),
|
|
||||||
gridspec_kw={'height_ratios': format_params['grid_ratio_height'], 'width_ratios': format_params['grid_ratio_width']},
|
|
||||||
facecolor='w', dpi=format_params['dpi'])
|
|
||||||
|
|
||||||
return fig, axes
|
|
||||||
|
|
||||||
|
|
||||||
def adjust_plot(fig, ax, options):
|
|
||||||
''' A general function to adjust plot according to contents of the options-dictionary '''
|
|
||||||
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'plot_kind': None, # defaults to None, but should be utilised when requiring special formatting for a particular plot
|
|
||||||
'xlabel': None, 'ylabel': None,
|
|
||||||
'xunit': None, 'yunit': None,
|
|
||||||
'xlabel_pad': 4.0, 'ylabel_pad': 4.0,
|
|
||||||
'hide_x_labels': False, 'hide_y_labels': False, # Whether the main labels on the x- and/or y-axes should be hidden
|
|
||||||
'hide_x_ticklabels': False, 'hide_y_ticklabels': False, # Whether ticklabels on the x- and/or y-axes should be hidden
|
|
||||||
'hide_x_ticks': False, 'hide_y_ticks': False, # Whether the ticks on the x- and/or y-axes should be hidden
|
|
||||||
'x_tick_locators': None, 'y_tick_locators': None, # The major and minor tick locators for the x- and y-axes
|
|
||||||
'rotation_x_ticks': 0, 'rotation_y_ticks': 0, # Degrees the x- and/or y-ticklabels should be rotated
|
|
||||||
'xticks': None, 'yticks': None, # Custom definition of the xticks and yticks. This is not properly implemented now.
|
|
||||||
'xlim': None, 'ylim': None, # Limits to the x- and y-axes
|
|
||||||
'xlim_reset': False, 'ylim_reset': False, # For use in setting limits of backgrounds - forcing reset of xlim and ylim, useful when more axes
|
|
||||||
'title': None, # Title of the plot
|
|
||||||
'backgrounds': [],
|
|
||||||
'legend': False, 'legend_position': ['lower center', (0.5, -0.1)], 'legend_ncol': 1, # Toggles on/off legend. Specifices legend position and the number of columns the legend should appear as.
|
|
||||||
'subplots_adjust': {'left': None, 'right': None, 'top': None, 'bottom': None, 'wspace': None, 'hspace': None}, # Adjustment of the Axes-object within the Figure-object. Fraction of the Figure-object the left, bottom, right and top edges of the Axes-object will start.
|
|
||||||
'marker_edges': None,
|
|
||||||
'text': None # Text to show in the plot. Should be a list where the first element is the string, and the second is a tuple with x- and y-coordinates. Could also be a list of lists to show more strings of text.
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
# Set labels on x- and y-axes
|
|
||||||
if not options['hide_y_labels']:
|
|
||||||
if not options['yunit']:
|
|
||||||
ax.set_ylabel(f'{options["ylabel"]}', labelpad=options['ylabel_pad'])
|
|
||||||
else:
|
|
||||||
ax.set_ylabel(f'{options["ylabel"]} [{options["yunit"]}]', labelpad=options['ylabel_pad'])
|
|
||||||
|
|
||||||
else:
|
|
||||||
ax.set_ylabel('')
|
|
||||||
|
|
||||||
if not options['hide_x_labels']:
|
|
||||||
if not options['xunit']:
|
|
||||||
ax.set_xlabel(f'{options["xlabel"]}', labelpad=options['xlabel_pad'])
|
|
||||||
else:
|
|
||||||
ax.set_xlabel(f'{options["xlabel"]} [{options["xunit"]}]', labelpad=options['xlabel_pad'])
|
|
||||||
else:
|
|
||||||
ax.set_xlabel('')
|
|
||||||
|
|
||||||
# Set multiple locators
|
|
||||||
if options['y_tick_locators']:
|
|
||||||
ax.yaxis.set_major_locator(MultipleLocator(options['y_tick_locators'][0]))
|
|
||||||
ax.yaxis.set_minor_locator(MultipleLocator(options['y_tick_locators'][1]))
|
|
||||||
|
|
||||||
if options['x_tick_locators']:
|
|
||||||
ax.xaxis.set_major_locator(MultipleLocator(options['x_tick_locators'][0]))
|
|
||||||
ax.xaxis.set_minor_locator(MultipleLocator(options['x_tick_locators'][1]))
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME THIS NEEDS REWORK FOR IT TO FUNCTION PROPERLY!
|
|
||||||
#if options['xticks']:
|
|
||||||
# ax.set_xticks(np.arange(plot_data['start'], plot_data['end']+1))
|
|
||||||
# ax.set_xticklabels(options['xticks'])
|
|
||||||
# else:
|
|
||||||
# ax.set_xticks(np.arange(plot_data['start'], plot_data['end']+1))
|
|
||||||
# ax.set_xticklabels([x/2 for x in np.arange(plot_data['start'], plot_data['end']+1)])
|
|
||||||
|
|
||||||
# Hide x- and y- ticklabels
|
|
||||||
if options['hide_y_ticklabels']:
|
|
||||||
ax.tick_params(axis='y', direction='in', which='both', labelleft=False, labelright=False)
|
|
||||||
else:
|
|
||||||
plt.xticks(rotation=options['rotation_x_ticks'])
|
|
||||||
#ax.set_xticklabels(ax.get_xticks(), rotation = options['rotation_x_ticks'])
|
|
||||||
|
|
||||||
if options['hide_x_ticklabels']:
|
|
||||||
ax.tick_params(axis='x', direction='in', which='both', labelbottom=False, labeltop=False)
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
#ax.set_yticklabels(ax.get_yticks(), rotation = options['rotation_y_ticks'])
|
|
||||||
|
|
||||||
|
|
||||||
# Hide x- and y-ticks:
|
|
||||||
if options['hide_y_ticks']:
|
|
||||||
ax.tick_params(axis='y', direction='in', which='both', left=False, right=False)
|
|
||||||
else:
|
|
||||||
ax.tick_params(axis='y', direction='in', which='both', left=True, right=True)
|
|
||||||
|
|
||||||
if options['hide_x_ticks']:
|
|
||||||
ax.tick_params(axis='x', direction='in', which='both', bottom=False, top=False)
|
|
||||||
else:
|
|
||||||
ax.tick_params(axis='x', direction='in', which='both', bottom=True, top=True)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Set title
|
|
||||||
if options['title']:
|
|
||||||
ax.set_title(options['title'], fontsize=plt.rcParams['font.size'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### DRAW/REMOVE LEGEND ####
|
|
||||||
# Options:
|
|
||||||
# 'legend_position': (default ['lower center', (0.5, -0.1)]) - Follows matplotlib's way of specifying legend position
|
|
||||||
# 'legend_ncol': (default 1) # Number of columns to write the legend in
|
|
||||||
# Also requires options to contain values in colours, markers and labels. (No defaults)
|
|
||||||
|
|
||||||
if ax.get_legend():
|
|
||||||
ax.get_legend().remove()
|
|
||||||
|
|
||||||
|
|
||||||
if options['legend']:
|
|
||||||
# Make palette and linestyles from original parameters
|
|
||||||
if not options['colours']:
|
|
||||||
colours = generate_colours(palettes=options['palettes'])
|
|
||||||
else:
|
|
||||||
colours = itertools.cycle(options['colours'])
|
|
||||||
|
|
||||||
|
|
||||||
markers = itertools.cycle(options['markers'])
|
|
||||||
|
|
||||||
# Create legend
|
|
||||||
active_markers = []
|
|
||||||
active_labels = []
|
|
||||||
|
|
||||||
for label in options['labels']:
|
|
||||||
|
|
||||||
|
|
||||||
# Discard next linestyle and colour if label is _
|
|
||||||
if label == '_':
|
|
||||||
_ = next(colours)
|
|
||||||
_ = next(markers)
|
|
||||||
|
|
||||||
else:
|
|
||||||
marker = next(markers)
|
|
||||||
if not marker:
|
|
||||||
active_markers.append(mlines.Line2D([], [], color=next(colours)))
|
|
||||||
else:
|
|
||||||
active_markers.append(mlines.Line2D([], [], markerfacecolor=next(colours), markeredgecolor=options['marker_edges'], markersize=10, color=(1,1,1,0), marker=marker))
|
|
||||||
|
|
||||||
active_labels.append(label)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ax.legend(active_markers, active_labels, frameon=False, loc=options['legend_position'][0], bbox_to_anchor=options['legend_position'][1], ncol=options['legend_ncol'])
|
|
||||||
#fig.legend(handles=patches, loc=options['legend_position'][0], bbox_to_anchor=options['legend_position'][1], frameon=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Adjust where the axes start within the figure. Default value is 10% in from the left and bottom edges. Used to make room for the plot within the figure size (to avoid using bbox_inches='tight' in the savefig-command, as this screws with plot dimensions)
|
|
||||||
plt.subplots_adjust(**options['subplots_adjust'])
|
|
||||||
|
|
||||||
|
|
||||||
# If limits for x- and y-axes is passed, sets these.
|
|
||||||
if options['xlim'] is not None:
|
|
||||||
ax.set_xlim(options['xlim'])
|
|
||||||
|
|
||||||
if options['ylim'] is not None:
|
|
||||||
ax.set_ylim(options['ylim'])
|
|
||||||
|
|
||||||
|
|
||||||
#### DRAW BACKGROUNDS ####
|
|
||||||
# options['backgrounds'] should contain a dictionary or a list of dictionaries. Options to be specified are listed below.
|
|
||||||
|
|
||||||
if options['backgrounds']:
|
|
||||||
|
|
||||||
if not isinstance(options['backgrounds'], list):
|
|
||||||
options['backgrounds'] = [options['backgrounds']]
|
|
||||||
|
|
||||||
|
|
||||||
for background in options['backgrounds']:
|
|
||||||
default_background_options = {
|
|
||||||
'colour': (0,0,0),
|
|
||||||
'alpha': 0.2,
|
|
||||||
'xlim': list(ax.get_xlim()),
|
|
||||||
'ylim': list(ax.get_ylim()),
|
|
||||||
'zorder': 0,
|
|
||||||
'edgecolour': None,
|
|
||||||
'linewidth': None
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
background = aux.update_options(options=background, default_options=default_background_options)
|
|
||||||
|
|
||||||
if options['xlim_reset']:
|
|
||||||
background['xlim'] = list(ax.get_xlim())
|
|
||||||
if options['ylim_reset']:
|
|
||||||
background['ylim'] = list(ax.get_ylim())
|
|
||||||
|
|
||||||
if not background['xlim'][0]:
|
|
||||||
background['xlim'][0] = ax.get_xlim()[0]
|
|
||||||
if not background['xlim'][1]:
|
|
||||||
background['xlim'][1] = ax.get_xlim()[1]
|
|
||||||
if not background['ylim'][0]:
|
|
||||||
background['ylim'][0] = ax.get_ylim()[0]
|
|
||||||
if not background['ylim'][1]:
|
|
||||||
background['ylim'][1] = ax.get_ylim()[1]
|
|
||||||
|
|
||||||
ax.add_patch(Rectangle(
|
|
||||||
xy=(background['xlim'][0], background['ylim'][0]), # Anchor point
|
|
||||||
width=background['xlim'][1]-background['xlim'][0], # Width of background
|
|
||||||
height=background['ylim'][1]-background['ylim'][0], # Height of background
|
|
||||||
zorder=background['zorder'], # Placement in stack
|
|
||||||
facecolor=(background['colour'][0], background['colour'][1], background['colour'][2], background['alpha']), # Colour
|
|
||||||
edgecolor=background['edgecolour'], # Edgecolour
|
|
||||||
linewidth=background['linewidth']) # Linewidth
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Add custom text
|
|
||||||
if options['text']:
|
|
||||||
|
|
||||||
# If only a single element, put it into a list so the below for-loop works.
|
|
||||||
if isinstance(options['text'][0], str):
|
|
||||||
options['text'] = [options['text']]
|
|
||||||
|
|
||||||
# Plot all passed texts
|
|
||||||
for text in options['text']:
|
|
||||||
ax.text(x=text[1][0], y=text[1][1], s=text[0])
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def ipywidgets_update(func, data, options={}, **kwargs):
|
|
||||||
''' A general ipywidgets update function that can be passed to ipywidgets.interactive. To use this, you can run:
|
|
||||||
|
|
||||||
import ipywidgets as widgets
|
|
||||||
import beamtime.plotting as btp
|
|
||||||
|
|
||||||
w = widgets.interactive(btp.ipywidgets_update, func=widgets.fixed(my_func), plot_data=widgets.fixed(plot_data), options=widgets.fixed(options), key1=widget1, key2=widget2, key3=widget3)
|
|
||||||
|
|
||||||
where key1, key2, key3 etc. are the values in the options-dictionary you want widget control of, and widget1, widget2, widget3 etc. are widgets to control these values, e.g. widgets.IntSlider(value=1, min=0, max=10)
|
|
||||||
'''
|
|
||||||
|
|
||||||
# Update the options-dictionary with the values from the widgets
|
|
||||||
for key in kwargs:
|
|
||||||
options[key] = kwargs[key]
|
|
||||||
|
|
||||||
# Call the function with the plot_data and options-dictionaries
|
|
||||||
func(data=data, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def determine_width(format_params):
|
|
||||||
''' '''
|
|
||||||
|
|
||||||
conversion_cm_inch = 0.3937008 # cm to inch
|
|
||||||
|
|
||||||
if format_params['column_type'] == 'single':
|
|
||||||
column_width = format_params['single_column_width']
|
|
||||||
elif format_params['column_type'] == 'double':
|
|
||||||
column_width = format_params['double_column_width']
|
|
||||||
|
|
||||||
column_width *= conversion_cm_inch
|
|
||||||
|
|
||||||
|
|
||||||
width_ratio = [float(num) for num in format_params['width_ratio'].split(':')]
|
|
||||||
|
|
||||||
|
|
||||||
width = column_width * width_ratio[0]/width_ratio[1]
|
|
||||||
|
|
||||||
|
|
||||||
return width
|
|
||||||
|
|
||||||
|
|
||||||
def determine_height(format_params, width):
|
|
||||||
|
|
||||||
aspect_ratio = [float(num) for num in format_params['aspect_ratio'].split(':')]
|
|
||||||
|
|
||||||
height = width/(aspect_ratio[0] / aspect_ratio[1])
|
|
||||||
|
|
||||||
return height
|
|
||||||
|
|
||||||
|
|
||||||
def scale_figure(format_params, width, height):
|
|
||||||
width = width * format_params['upscaling_factor'] * format_params['compress_width']
|
|
||||||
height = height * format_params['upscaling_factor'] * format_params['compress_height']
|
|
||||||
|
|
||||||
return width, height
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def update_rc_params(rc_params):
|
|
||||||
''' Update all passed run commands in matplotlib'''
|
|
||||||
|
|
||||||
if rc_params:
|
|
||||||
for key in rc_params.keys():
|
|
||||||
plt.rcParams.update({key: rc_params[key]})
|
|
||||||
|
|
||||||
|
|
||||||
def generate_colours(palettes, kind=None):
|
|
||||||
|
|
||||||
if kind == 'single':
|
|
||||||
colour_cycle = itertools.cycle(palettes)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# Creates a list of all the colours that is passed in the colour_cycles argument. Then makes cyclic iterables of these.
|
|
||||||
colour_collection = []
|
|
||||||
for palette in palettes:
|
|
||||||
mod = importlib.import_module("palettable.colorbrewer.%s" % palette[0])
|
|
||||||
colour = getattr(mod, palette[1]).mpl_colors
|
|
||||||
colour_collection = colour_collection + colour
|
|
||||||
|
|
||||||
colour_cycle = itertools.cycle(colour_collection)
|
|
||||||
|
|
||||||
|
|
||||||
return colour_cycle
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_inset_axes(parent_ax, options):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'hide_inset_x_labels': False, # Whether x labels should be hidden
|
|
||||||
'hide_inset_x_ticklabels': False,
|
|
||||||
'hide_inset_x_ticks': False,
|
|
||||||
'rotation_inset_x_ticks': 0,
|
|
||||||
'hide_inset_y_labels': False, # whether y labels should be hidden
|
|
||||||
'hide_inset_y_ticklabels': False,
|
|
||||||
'hide_inset_y_ticks': False,
|
|
||||||
'rotation_inset_y_ticks': 0,
|
|
||||||
'inset_x_tick_locators': [100, 50], # Major and minor tick locators
|
|
||||||
'inset_y_tick_locators': [10, 5],
|
|
||||||
'inset_position': [0.1,0.1,0.3,0.3],
|
|
||||||
'inset_bounding_box': [0,0,0.1, 0.1],
|
|
||||||
'inset_marks': [None, None],
|
|
||||||
'legend_position': ['upper center', (0.20, 0.90)], # the position of the legend passed as arguments to loc and bbox_to_anchor respectively,
|
|
||||||
'connecting_corners': [1,2]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
# Create a set of inset Axes: these should fill the bounding box allocated to
|
|
||||||
# them.
|
|
||||||
inset_ax = plt.axes(options["inset_bounding_box"])
|
|
||||||
# Manually set the position and relative size of the inset axes within ax1
|
|
||||||
ip = InsetPosition(parent_ax, options['inset_position'])
|
|
||||||
inset_ax.set_axes_locator(ip)
|
|
||||||
|
|
||||||
if options['connecting_corners'] and len(options["connecting_corners"]) == 2:
|
|
||||||
connect_inset(parent_ax, inset_ax, loc1a=options['connecting_corners'][0], loc2a=options['connecting_corners'][1], loc1b=options['connecting_corners'][0], loc2b=options['connecting_corners'][1], fc='none', ec='black')
|
|
||||||
elif options['connecting_corners'] and len(options['connecting_corners']) == 4:
|
|
||||||
connect_inset(parent_ax, inset_ax, loc1a=options['connecting_corners'][0], loc2a=options['connecting_corners'][1], loc1b=options['connecting_corners'][2], loc2b=options['connecting_corners'][3], fc='none', ec='black', ls='--')
|
|
||||||
|
|
||||||
inset_ax.xaxis.set_major_locator(MultipleLocator(options['inset_x_tick_locators'][0]))
|
|
||||||
inset_ax.xaxis.set_minor_locator(MultipleLocator(options['inset_x_tick_locators'][1]))
|
|
||||||
|
|
||||||
|
|
||||||
inset_ax.yaxis.set_major_locator(MultipleLocator(options['inset_y_tick_locators'][0]))
|
|
||||||
inset_ax.yaxis.set_minor_locator(MultipleLocator(options['inset_y_tick_locators'][1]))
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return inset_ax
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def connect_inset(parent_axes, inset_axes, loc1a=1, loc1b=1, loc2a=2, loc2b=2, **kwargs):
|
|
||||||
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
|
|
||||||
|
|
||||||
pp = BboxPatch(rect, fill=False, **kwargs)
|
|
||||||
parent_axes.add_patch(pp)
|
|
||||||
|
|
||||||
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1a, loc2=loc1b, **kwargs)
|
|
||||||
inset_axes.add_patch(p1)
|
|
||||||
p1.set_clip_on(False)
|
|
||||||
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2a, loc2=loc2b, **kwargs)
|
|
||||||
inset_axes.add_patch(p2)
|
|
||||||
p2.set_clip_on(False)
|
|
||||||
|
|
||||||
return pp, p1, p2
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def make_animation(paths, options={}):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'save_folder': '.',
|
|
||||||
'save_filename': 'animation.gif',
|
|
||||||
'fps': 5
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
frames = []
|
|
||||||
for path in paths:
|
|
||||||
frame = Image.open(path)
|
|
||||||
frames.append(frame)
|
|
||||||
|
|
||||||
frames[0].save(os.path.join(options['save_folder'], options['save_filename']), format='GIF', append_images=frames[1:], save_all=True, duration=(1/options['fps'])*1000, loop=0)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def mix_colours(colour1, colour2, options):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'number_of_colours': 10,
|
|
||||||
'weights': None
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
if not options['weights']:
|
|
||||||
options['weights'] = [x/options['number_of_colours'] for x in range(options['number_of_colours'])]
|
|
||||||
|
|
||||||
colours = []
|
|
||||||
for weight in options['weights']:
|
|
||||||
colour = []
|
|
||||||
|
|
||||||
for c1, c2 in zip(colour1, colour2):
|
|
||||||
colour.append(np.round(((1-weight)*c1 + weight*c2), 5))
|
|
||||||
|
|
||||||
colours.append(colour)
|
|
||||||
|
|
||||||
|
|
||||||
return colours
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import io, plot
|
|
||||||
|
|
@ -1,129 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
|
|
||||||
def read_data(path, options={}):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'split': False,
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
index = find_start(path)
|
|
||||||
|
|
||||||
df = pd.read_csv(path, skiprows=index+1)
|
|
||||||
|
|
||||||
df = df[['Comment', 'Time Stamp (sec)', 'Temperature (K)', 'Magnetic Field (Oe)',
|
|
||||||
'DC Moment (emu)', 'DC Std. Err. (emu)', 'DC Quad. Moment (emu)',
|
|
||||||
'AC=1 DC=2 Locate=3', 'Max. Field (Oe)', 'Pressure (Torr)', 'Temp. Status (code)',
|
|
||||||
]]
|
|
||||||
|
|
||||||
new_columns = ['Comment', 'Time', 'Temperature', 'Magnetic_Field',
|
|
||||||
'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment',
|
|
||||||
'Status', 'Max_Field', 'Pressure', 'Temperature_Status']
|
|
||||||
|
|
||||||
df.columns = new_columns
|
|
||||||
|
|
||||||
|
|
||||||
df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']] = df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']].astype(float)
|
|
||||||
|
|
||||||
df = df.loc[df['DC_Std_Err'] < 0.001]
|
|
||||||
|
|
||||||
if all([option in options.keys() for option in ['molar_mass', 'sample_mass']]):
|
|
||||||
df = calculate_emu_per_mol_oe(df, options)
|
|
||||||
df = calculate_bohr_magnetons(df, options)
|
|
||||||
df = calculate_chi_inverse(df, options)
|
|
||||||
|
|
||||||
if options['split']:
|
|
||||||
mask = df.loc[df['Comment'].notna()]
|
|
||||||
dfs = []
|
|
||||||
for i in range(1,len(mask.index)):
|
|
||||||
dfs.append(df.iloc[mask.index[i-1]:mask.index[i]])
|
|
||||||
|
|
||||||
return dfs
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def read_hysteresis(path):
|
|
||||||
|
|
||||||
index = find_start(path)
|
|
||||||
|
|
||||||
df = pd.read_csv(path, skiprows=index+1)
|
|
||||||
|
|
||||||
df = df[['Comment', 'Time Stamp (sec)', 'Temperature (K)', 'Magnetic Field (Oe)',
|
|
||||||
'DC Moment (emu)', 'DC Std. Err. (emu)', 'DC Quad. Moment (emu)',
|
|
||||||
'AC=1 DC=2 Locate=3', 'Max. Field (Oe)', 'Pressure (Torr)', 'Temp. Status (code)',
|
|
||||||
]]
|
|
||||||
|
|
||||||
new_columns = ['Comment', 'Time', 'Temperature', 'Magnetic_Field',
|
|
||||||
'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment',
|
|
||||||
'Status', 'Max_Field', 'Pressure', 'Temperature_Status']
|
|
||||||
|
|
||||||
df.columns = new_columns
|
|
||||||
|
|
||||||
df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']] = df[['Temperature', 'Magnetic_Field', 'DC_Moment', 'DC_Std_Err', 'DC_Quad_Moment', 'Max_Field', 'Pressure']].astype(float)
|
|
||||||
|
|
||||||
df = df.loc[df['DC_Std_Err'] < 0.001]
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
def find_start(path):
|
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
|
|
||||||
i = 0
|
|
||||||
line = f.readline()
|
|
||||||
|
|
||||||
while '[Data]' not in line:
|
|
||||||
line = f.readline()
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
|
|
||||||
if i > 1000:
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
return i
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_emu_per_mol_oe(df, options={}):
|
|
||||||
|
|
||||||
m = options['sample_mass'] / 1000 # convert from mg to g
|
|
||||||
n = m / options['molar_mass']
|
|
||||||
|
|
||||||
|
|
||||||
df['DC_Moment_emu_per_mol'] = df['DC_Moment'] / n
|
|
||||||
df['DC_Moment_emu_per_mol_oe'] = df['DC_Moment'] / (n * df['Magnetic_Field'])
|
|
||||||
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_bohr_magnetons(df, options={}):
|
|
||||||
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'units': 'cgs',
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
if options['units'] == 'cgs':
|
|
||||||
df['bohr_magnetons'] = df['DC_Moment_emu_per_mol'] * 1.07828E20 / 6.023E23 ## mu_B per emu divided by Avogadro's number
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def calculate_chi_inverse(df, options={}):
|
|
||||||
|
|
||||||
df['chi_inverse'] = 1/ df['DC_Moment_emu_per_mol']
|
|
||||||
|
|
||||||
return df
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
# pytest.ini
|
|
||||||
|
|
||||||
[pytest]
|
|
||||||
minversion = 6.0
|
|
||||||
testpaths =
|
|
||||||
.
|
|
||||||
|
|
||||||
filterwarnings =
|
|
||||||
ignore::DeprecationWarning
|
|
||||||
|
|
@ -1,76 +0,0 @@
|
||||||
import nafuma.auxillary as aux
|
|
||||||
import os
|
|
||||||
|
|
||||||
def test_swap_values():
|
|
||||||
|
|
||||||
|
|
||||||
test_dict = {'test1': 1, 'test2': 2}
|
|
||||||
key1 = 'test1'
|
|
||||||
key2 = 'test2'
|
|
||||||
|
|
||||||
oldval1 = test_dict[key1]
|
|
||||||
oldval2 = test_dict[key2]
|
|
||||||
|
|
||||||
new_dict = aux.swap_values(options=test_dict, key1=key1, key2=key2)
|
|
||||||
|
|
||||||
assert (test_dict[key1] == oldval2) and (test_dict[key2] == oldval1)
|
|
||||||
|
|
||||||
|
|
||||||
def test_ceil() -> None:
|
|
||||||
|
|
||||||
assert aux.ceil(1.05, 0.5) == 1.5
|
|
||||||
assert aux.ceil(1.05, 1) == 2.0
|
|
||||||
assert aux.ceil(1.1, 0.2) == 1.2
|
|
||||||
|
|
||||||
|
|
||||||
def test_floor() -> None:
|
|
||||||
|
|
||||||
assert aux.floor(2.02, 1) == 2.0
|
|
||||||
assert aux.floor(2.02, 0.01) == 2.02
|
|
||||||
assert aux.floor(2.013, 0.01) == 2.01
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_options() -> None:
|
|
||||||
|
|
||||||
|
|
||||||
options = {}
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'test1': 1,
|
|
||||||
'test2': 2,
|
|
||||||
'test3': 3,
|
|
||||||
'test4': 4,
|
|
||||||
'test5': 5,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
assert options['test1'] == default_options['test1']
|
|
||||||
|
|
||||||
|
|
||||||
def test_save_options() -> None:
|
|
||||||
|
|
||||||
options = {'test1': 1, 'test2': 2}
|
|
||||||
path = 'tmp.dat'
|
|
||||||
|
|
||||||
aux.save_options(options, path)
|
|
||||||
|
|
||||||
assert os.path.isfile(path)
|
|
||||||
|
|
||||||
os.remove(path)
|
|
||||||
|
|
||||||
|
|
||||||
def test_load_options() -> None:
|
|
||||||
|
|
||||||
options = {'test1': 1, 'test2': 2}
|
|
||||||
path = 'tmp.dat'
|
|
||||||
|
|
||||||
aux.save_options(options, path)
|
|
||||||
|
|
||||||
loaded_options = aux.load_options(path)
|
|
||||||
|
|
||||||
assert (loaded_options['test1'] == 1) and (loaded_options['test2'] == 2)
|
|
||||||
|
|
||||||
os.remove(path)
|
|
||||||
|
|
@ -1,181 +0,0 @@
|
||||||
import nafuma.plotting as btp
|
|
||||||
from cycler import cycler
|
|
||||||
import itertools
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import matplotlib as mpl
|
|
||||||
|
|
||||||
|
|
||||||
def test_generate_colours() -> None:
|
|
||||||
|
|
||||||
assert type(btp.generate_colours('black', kind='single')) == itertools.cycle
|
|
||||||
|
|
||||||
palettes = [('qualitative', 'Dark2_8')]
|
|
||||||
colour_cycle = btp.generate_colours(palettes)
|
|
||||||
|
|
||||||
assert type(colour_cycle) == itertools.cycle
|
|
||||||
|
|
||||||
|
|
||||||
# Test that it actually loaded 8 colours when given a set of 8 colours to
|
|
||||||
|
|
||||||
same_colour = None
|
|
||||||
for i in range(10):
|
|
||||||
colour = next(colour_cycle)
|
|
||||||
if i == 0:
|
|
||||||
first_colour = colour
|
|
||||||
|
|
||||||
if colour == first_colour:
|
|
||||||
repeat_colour_index = i
|
|
||||||
|
|
||||||
|
|
||||||
assert repeat_colour_index == 8
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_update_rc_params() -> None:
|
|
||||||
|
|
||||||
rc_params = {
|
|
||||||
'lines.linewidth': 100
|
|
||||||
}
|
|
||||||
|
|
||||||
prev_params = plt.rcParams['lines.linewidth']
|
|
||||||
|
|
||||||
# Update run commands if any is passed (will pass an empty dictionary if not passed)
|
|
||||||
btp.update_rc_params(rc_params)
|
|
||||||
|
|
||||||
new_params = plt.rcParams['lines.linewidth']
|
|
||||||
|
|
||||||
assert new_params == 100
|
|
||||||
assert prev_params != new_params
|
|
||||||
|
|
||||||
|
|
||||||
# Reset run commands
|
|
||||||
plt.rcdefaults()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_scale_figure() -> None:
|
|
||||||
|
|
||||||
width, height = 1, 1
|
|
||||||
|
|
||||||
format_params = {
|
|
||||||
'upscaling_factor': 2,
|
|
||||||
'compress_width': 1,
|
|
||||||
'compress_height': 1
|
|
||||||
}
|
|
||||||
|
|
||||||
width1, height1 = btp.scale_figure(format_params=format_params, width=width, height=height)
|
|
||||||
|
|
||||||
assert width1 == 2 and height1 == 2
|
|
||||||
|
|
||||||
format_params = {
|
|
||||||
'upscaling_factor': 1,
|
|
||||||
'compress_width': 0.5,
|
|
||||||
'compress_height': 1
|
|
||||||
}
|
|
||||||
|
|
||||||
width2, height2 = btp.scale_figure(format_params=format_params, width=width, height=height)
|
|
||||||
|
|
||||||
assert width2 == 0.5 and height2 == 1
|
|
||||||
|
|
||||||
format_params = {
|
|
||||||
'upscaling_factor': 2,
|
|
||||||
'compress_width': 0.5,
|
|
||||||
'compress_height': 0.2
|
|
||||||
}
|
|
||||||
|
|
||||||
width2, height2 = btp.scale_figure(format_params=format_params, width=width, height=height)
|
|
||||||
|
|
||||||
assert width2 == 1 and height2 == 0.4
|
|
||||||
|
|
||||||
|
|
||||||
def test_determine_width() -> None:
|
|
||||||
|
|
||||||
conversion_cm_inch = 0.3937008 # cm to inch
|
|
||||||
|
|
||||||
format_params = {
|
|
||||||
'column_type': 'single',
|
|
||||||
'single_column_width': 5,
|
|
||||||
'double_column_width': 10,
|
|
||||||
'width_ratio': '1:1'
|
|
||||||
}
|
|
||||||
|
|
||||||
assert np.round(btp.determine_width(format_params),6) == np.round(5*conversion_cm_inch,6)
|
|
||||||
|
|
||||||
format_params['column_type'] = 'double'
|
|
||||||
|
|
||||||
assert np.round(btp.determine_width(format_params), 6) == np.round(10*conversion_cm_inch, 6)
|
|
||||||
|
|
||||||
|
|
||||||
format_params['column_type'] = 'single'
|
|
||||||
format_params['width_ratio'] = '1:2'
|
|
||||||
|
|
||||||
assert np.round(btp.determine_width(format_params), 6) == np.round(2.5*conversion_cm_inch, 6)
|
|
||||||
|
|
||||||
def test_determine_height() -> None:
|
|
||||||
|
|
||||||
|
|
||||||
width = 1
|
|
||||||
|
|
||||||
format_params = {
|
|
||||||
'aspect_ratio': '1:1'
|
|
||||||
}
|
|
||||||
|
|
||||||
assert btp.determine_height(format_params=format_params, width=width) == 1
|
|
||||||
|
|
||||||
format_params['aspect_ratio'] = '3:1'
|
|
||||||
|
|
||||||
assert (btp.determine_height(format_params=format_params, width=width) - 0.333333333333333) < 10e-7
|
|
||||||
|
|
||||||
assert True
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_prepare_plot() -> None:
|
|
||||||
|
|
||||||
fig, ax = btp.prepare_plot()
|
|
||||||
|
|
||||||
assert type(fig) == plt.Figure
|
|
||||||
assert fig.get_dpi() == 600
|
|
||||||
assert ax.get_xlim() == (0.0, 1.0)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_adjust_plot() -> None:
|
|
||||||
|
|
||||||
fig, ax = btp.prepare_plot()
|
|
||||||
|
|
||||||
options = {
|
|
||||||
'xlim': (0.0, 2.0),
|
|
||||||
'title': 'Test'
|
|
||||||
}
|
|
||||||
|
|
||||||
fig, ax = btp.adjust_plot(fig, ax, options)
|
|
||||||
|
|
||||||
|
|
||||||
assert ax.get_xlim() == (0.0, 2.0)
|
|
||||||
assert ax.get_title() == 'Test'
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_ipywidgets_update() -> None:
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_func(data, options):
|
|
||||||
test1 = options['test1']
|
|
||||||
test2 = options['test2']
|
|
||||||
|
|
||||||
assert type(data) == dict
|
|
||||||
assert test1 == 1
|
|
||||||
assert test2 == 2
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
data = {}
|
|
||||||
options = {}
|
|
||||||
|
|
||||||
btp.ipywidgets_update(func=test_func, data=data, options=options, test1=1, test2=2)
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import io, calib, plot, edges
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,30 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
from scipy.constants import c, h
|
|
||||||
|
|
||||||
# From 2019 redefinition of SI base units: https://en.wikipedia.org/wiki/2019_redefinition_of_the_SI_base_units
|
|
||||||
keV_per_J = (1 / 1.602176634e-19) / 1000
|
|
||||||
|
|
||||||
# kXu values taken from International Tables for Crystallography Volume , Kulwer Academic Publishers - Dordrect / Boston / London (1992)
|
|
||||||
K = { 'Z': [ 1, 2,
|
|
||||||
3, 4, 5, 6, 7, 8, 9, 10,
|
|
||||||
11, 12, 13, 14, 15, 16, 17, 18,
|
|
||||||
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
|
|
||||||
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48],
|
|
||||||
'Atom': [ 'H', 'He',
|
|
||||||
'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
|
|
||||||
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
|
|
||||||
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr',
|
|
||||||
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd'],
|
|
||||||
'kXu': [ np.nan, np.nan,
|
|
||||||
226.5, np.nan, np.nan, 43.68, 30.99, 23.32, np.nan, np.nan,
|
|
||||||
np.nan, 9.5117, 7.9511, 6.7446, 5.7866, 5.0182, 4.3969, 3.8707,
|
|
||||||
3.43645, 3.07016, 2.7573, 2.49730, 2.26902, 2.07012, 1.89636, 1.74334, 1.60811, 1.48802, 1.38043, 1.2833, 1.19567, 1.11652, 1.04497, 0.97978, 0.91995, 0.86547,
|
|
||||||
0.81549, 0.76969, 0.72762, 0.68877, 0.65291, 0.61977, 0.5891, 0.56047, 0.53378, 0.50915, 0.48582, 0.46409]}
|
|
||||||
|
|
||||||
|
|
||||||
K = pd.DataFrame(K)
|
|
||||||
K['keV'] = np.round(h*c/(K['kXu']*10**-10) * keV_per_J, 3)
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME If needed, add energies for L-edges as well.
|
|
||||||
|
|
@ -1,523 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
from nafuma.xanes.calib import find_element
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
def split_scan_data(data: dict, options={}) -> list:
|
|
||||||
''' Splits a XANES-file from BM31 into different files depending on the edge. Has the option to add intensities of all scans of same edge into the same file.
|
|
||||||
As of now only picks out xmap_rois (fluoresence mode) and for Mn, Fe, Co and Ni K-edges.'''
|
|
||||||
|
|
||||||
required_options = ['log', 'logfile', 'save', 'save_folder', 'replace', 'active_roi', 'add_rois', 'return', 'skip_if_no_roi']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'log': False,
|
|
||||||
'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}_split_edges.log',
|
|
||||||
'save': False, # whether to save the files or not
|
|
||||||
'save_folder': '.', # root folder of where to save the files
|
|
||||||
'replace': False, # whether to replace the files if they already exist
|
|
||||||
'active_roi': None,
|
|
||||||
'add_rois': False, # Whether to add the rois of individual scans of the same edge together
|
|
||||||
'return': True,
|
|
||||||
'skip_if_no_roi': True
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
if not isinstance(data['path'], list):
|
|
||||||
data['path'] = [data['path']]
|
|
||||||
|
|
||||||
all_scans = []
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message='Starting file splitting...', options=options)
|
|
||||||
|
|
||||||
for filename in data['path']:
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'Reading {filename}...', options=options)
|
|
||||||
|
|
||||||
with open(filename, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
timestamps = []
|
|
||||||
scan_datas, scan_data = [], []
|
|
||||||
headers, header = [], ''
|
|
||||||
read_data = False
|
|
||||||
|
|
||||||
for i, line in enumerate(lines):
|
|
||||||
# Header line starts with #L - reads headers, and toggles data read-in on
|
|
||||||
if 'zapline mono' in line:
|
|
||||||
timestamps.append(lines[i+1].strip('#D'))
|
|
||||||
|
|
||||||
elif line[0:2] == "#L":
|
|
||||||
header, read_data = line[2:].split(), True
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message='... Found scan data. Starting read-in...', options=options)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# First line after data started with #C - stops data read-in
|
|
||||||
elif line[0:2] == "#C" or line[0:2] == '#S':
|
|
||||||
read_data = False
|
|
||||||
|
|
||||||
if scan_data:
|
|
||||||
scan_datas.append(scan_data); scan_data = []
|
|
||||||
|
|
||||||
if header:
|
|
||||||
headers.append(header); header = ''
|
|
||||||
|
|
||||||
# Ignore line if read-in not toggled
|
|
||||||
if read_data == False:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Read in data if it is
|
|
||||||
else:
|
|
||||||
scan_data.append(line.split())
|
|
||||||
|
|
||||||
|
|
||||||
edges = {'Mn': [], 'Fe': [], 'Co': [], 'Ni': []}
|
|
||||||
|
|
||||||
|
|
||||||
for i, scan_data in enumerate(scan_datas):
|
|
||||||
|
|
||||||
if 'ZapEnergy' not in headers[i]:
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... No valid scan data found... ({i+1}/{len(scan_datas)})', options=options)
|
|
||||||
continue
|
|
||||||
|
|
||||||
xanes_df = pd.DataFrame(scan_data).apply(pd.to_numeric)
|
|
||||||
xanes_df.columns = headers[i]
|
|
||||||
|
|
||||||
|
|
||||||
edge = find_element({'xanes_data_original': xanes_df})
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... Starting data clean-up ({edge}-edge)... ({i+1}/{len(scan_datas)})', options=options)
|
|
||||||
|
|
||||||
|
|
||||||
if not ('xmap_roi00' in headers[i]) and (not 'xmap_roi01' in headers[i]):
|
|
||||||
if options['skip_if_no_roi']:
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message='... ... Did not find fluoresence data. Skipping...', options=options)
|
|
||||||
continue
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message='... ... Did not find fluoresence data, but still proceeding ...', options=options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
edges[edge].append(xanes_df)
|
|
||||||
|
|
||||||
|
|
||||||
if options['add_rois']:
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... Addition of rois enabled. Starting addition...', options=options)
|
|
||||||
|
|
||||||
added_edges = {'Mn': [], 'Fe': [], 'Co': [], 'Ni': []}
|
|
||||||
for edge, scans in edges.items():
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... ... Adding rois of the {edge}-edge...', options=options)
|
|
||||||
|
|
||||||
if scans:
|
|
||||||
xanes_df = scans[0]
|
|
||||||
|
|
||||||
for i, scan in enumerate(scans):
|
|
||||||
if i > 0:
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... ... ... Adding {i+1}/{len(scans)}', options=options)
|
|
||||||
|
|
||||||
if 'xmap_roi00' in xanes_df.columns:
|
|
||||||
xanes_df['xmap_roi00'] += scan['xmap_roi00']
|
|
||||||
if 'xmap_roi01' in xanes_df.columns:
|
|
||||||
xanes_df['xmap_roi01'] += scan['xmap_roi01']
|
|
||||||
|
|
||||||
added_edges[edge].append(xanes_df)
|
|
||||||
|
|
||||||
edges = added_edges
|
|
||||||
|
|
||||||
if options['save']:
|
|
||||||
#FIXME If there is something wrong with the input file, the file will not be saved but log-file still sais it is saved. Goes from "Saving data to ..." to "All done!" no matter if it fals or not.
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... Saving data to {options["save_folder"]}', options=options)
|
|
||||||
|
|
||||||
if not os.path.isdir(options['save_folder']):
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... ... {options["save_folder"]} does not exist. Creating folder.', options=options)
|
|
||||||
|
|
||||||
os.makedirs(options['save_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
filename = os.path.basename(filename).split('.')[0]
|
|
||||||
|
|
||||||
for edge, scans in edges.items():
|
|
||||||
for i, scan in enumerate(scans):
|
|
||||||
count = '' if options['add_rois'] else '_'+str(i).zfill(4)
|
|
||||||
path = os.path.join(options['save_folder'], f'{filename}_{edge}{count}.dat')
|
|
||||||
|
|
||||||
if not os.path.isfile(path):
|
|
||||||
|
|
||||||
with open(path, 'w', newline = '\n') as f:
|
|
||||||
|
|
||||||
f.write(f'# Time: {timestamps[i]}')
|
|
||||||
scan.to_csv(f)
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... ... Scan saved to {path}', options=options)
|
|
||||||
|
|
||||||
elif options['replace'] and os.path.isfile(path):
|
|
||||||
with open(path, 'w', newline = '\n') as f:
|
|
||||||
scan.to_csv(f)
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... ... File already exists. Overwriting to {path}', options=options)
|
|
||||||
|
|
||||||
elif not options['replace'] and os.path.isfile(path):
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'... ... File already exists. Skipping...', options=options)
|
|
||||||
|
|
||||||
all_scans.append(edges)
|
|
||||||
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'All done!', options=options)
|
|
||||||
|
|
||||||
|
|
||||||
if options['return']:
|
|
||||||
return all_scans
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def save_data(data: dict, options={}) -> None:
|
|
||||||
|
|
||||||
required_options = ['save_folder', 'overwrite', 'log', 'logfile', 'filename']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'log': False,
|
|
||||||
'logfile': f'{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}_save_files.log',
|
|
||||||
'save_folder': 'saved_scans',
|
|
||||||
'overwrite': False,
|
|
||||||
'filename': f'{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}_exported_data.dat',
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
# Check if there is any data to be saved
|
|
||||||
if not 'xanes_data' in data.keys():
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'There is not saved scan data in data. Exiting without saving...', options=options)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
if not isinstance(data['xanes_data'], pd.DataFrame):
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'data["xanes_data"] has an invalid format. Exiting without saving...', options=options)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
# Make folder(s) if it/they do(es)n't exist
|
|
||||||
if not os.path.exists(options['save_folder']):
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'Destination folder does not exist. Creating folder...', options=options)
|
|
||||||
|
|
||||||
os.makedirs(options['save_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if os.path.exists(os.path.join('save_folder', options['filename'])):
|
|
||||||
if not options['overwrite']:
|
|
||||||
if options['log']:
|
|
||||||
aux.write_log(message=f'File already exists and overwrite disabled. Exiting without saving...', options=options)
|
|
||||||
return None
|
|
||||||
|
|
||||||
with open(os.path.join(options['save_folder'], options['filename']), 'w') as f:
|
|
||||||
|
|
||||||
if 'e0_diff' in data.keys():
|
|
||||||
f.write(f'# Number of header lines: {len(data["path"])+1} \n')
|
|
||||||
|
|
||||||
for i, (path, e0) in enumerate(data['e0_diff'].items()):
|
|
||||||
f.write(f'# Scan_{i} \t {e0} \n')
|
|
||||||
|
|
||||||
else:
|
|
||||||
f.write(f'# Number of header lines: {1}')
|
|
||||||
|
|
||||||
|
|
||||||
data['xanes_data'].to_csv(f, sep='\t', index=False)
|
|
||||||
|
|
||||||
|
|
||||||
#data['xanes_data'].to_csv(os.path.join(options['save_folder'], options['filename']), sep='\t', index=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def load_data(path: str) -> dict:
|
|
||||||
# FIXME Let this function be called by read_data() if some criterium is passed
|
|
||||||
|
|
||||||
data = {}
|
|
||||||
|
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
line = f.readline()
|
|
||||||
header_lines = int(line.split()[-1])
|
|
||||||
|
|
||||||
if header_lines > 1:
|
|
||||||
edge_positions = []
|
|
||||||
line = f.readline()
|
|
||||||
while line[0] == '#':
|
|
||||||
edge_positions.append(line.split()[-1])
|
|
||||||
line = f.readline()
|
|
||||||
|
|
||||||
data['xanes_data'] = pd.read_csv(path, sep='\t', skiprows=header_lines)
|
|
||||||
data['path'] = data['xanes_data'].columns.to_list()
|
|
||||||
data['path'].remove('ZapEnergy')
|
|
||||||
|
|
||||||
if header_lines > 1:
|
|
||||||
data['e0_diff'] = {}
|
|
||||||
|
|
||||||
for path, edge_position in zip(data['path'], edge_positions):
|
|
||||||
data['e0_diff'][path] = float(edge_position)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def read_data(data: dict, options={}) -> pd.DataFrame:
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME Handle the case when dataseries are not the same size
|
|
||||||
# FIXME Add possibility to extract TIME (for operando runs) and Blower Temp (for variable temperature runs)
|
|
||||||
# FIXME Add possibility to iport transmission data
|
|
||||||
required_options = ['adjust', 'mode']
|
|
||||||
default_options = {
|
|
||||||
'adjust': 0,
|
|
||||||
'mode': 'fluoresence',
|
|
||||||
'active_roi': None
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
columns = ['ZapEnergy']
|
|
||||||
|
|
||||||
if not isinstance(data['path'], list):
|
|
||||||
data['path'] = [data['path']]
|
|
||||||
|
|
||||||
# Initialise DataFrame with only ZapEnergy-column
|
|
||||||
xanes_data = pd.read_csv(data['path'][0], skiprows=1)[['ZapEnergy']]
|
|
||||||
xanes_data['ZapEnergy'] += options['adjust']
|
|
||||||
|
|
||||||
|
|
||||||
for filename in data['path']:
|
|
||||||
columns.append(filename)
|
|
||||||
|
|
||||||
scan_data = pd.read_csv(filename, skiprows=1)
|
|
||||||
|
|
||||||
if options['mode'] == 'fluoresence':
|
|
||||||
if not options['active_roi']:
|
|
||||||
scan_data = scan_data[[determine_active_roi(scan_data)]]
|
|
||||||
else:
|
|
||||||
scan_data = scan_data[options['active_roi']]
|
|
||||||
|
|
||||||
elif options['mode'] == 'transmission':
|
|
||||||
scan_data = scan_data['MonEx'] / scan_data['Ion1']
|
|
||||||
|
|
||||||
xanes_data = pd.concat([xanes_data, scan_data], axis=1)
|
|
||||||
|
|
||||||
|
|
||||||
xanes_data.columns = columns
|
|
||||||
|
|
||||||
|
|
||||||
return xanes_data
|
|
||||||
|
|
||||||
|
|
||||||
def read_metadata(data: dict, options={}) -> dict:
|
|
||||||
|
|
||||||
required_options = ['get_temperature', 'get_timestamp', 'adjust_time', 'convert_time', 'time_unit', 'reference_time']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'get_temperature': True,
|
|
||||||
'get_timestamp': True,
|
|
||||||
'adjust_time': False,
|
|
||||||
'convert_time': False,
|
|
||||||
'reference_time': None,
|
|
||||||
'time_unit': 's'
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
temperatures = []
|
|
||||||
timestamps = []
|
|
||||||
|
|
||||||
for filename in data['path']:
|
|
||||||
scan_data = pd.read_csv(filename, skiprows=1)
|
|
||||||
|
|
||||||
if options['get_temperature']:
|
|
||||||
temperatures.append(scan_data['ZBlower2'].mean())
|
|
||||||
|
|
||||||
if options['get_timestamp']:
|
|
||||||
|
|
||||||
with open(filename, 'r') as f:
|
|
||||||
#time = f.readline().strip('# Time: ') #<-- Previous code
|
|
||||||
time = f.readline().split('# Time: ')[-1] #Hope this does not fuck you up, Rasmus - but I needed another space here
|
|
||||||
split_operator=time[-9] #This should be the operator that splits hours, minutes and seconds
|
|
||||||
if split_operator == ".":
|
|
||||||
time = datetime.datetime.strptime(time, "%a %b %d %H.%M.%S %Y ")
|
|
||||||
if split_operator == ":":
|
|
||||||
time = datetime.datetime.strptime(time, "%a %b %d %H:%M:%S %Y ")
|
|
||||||
|
|
||||||
if options['adjust_time']:
|
|
||||||
time_elapsed = scan_data['Htime'].iloc[-1] - scan_data['Htime'].iloc[0]
|
|
||||||
|
|
||||||
time += datetime.timedelta(microseconds=time_elapsed)/2
|
|
||||||
|
|
||||||
|
|
||||||
timestamps.append(time)
|
|
||||||
|
|
||||||
|
|
||||||
if options['reference_time'] and options['convert_time']:
|
|
||||||
from . import unit_tables
|
|
||||||
new_times = []
|
|
||||||
|
|
||||||
if isinstance(options['reference_time'], str):
|
|
||||||
options['reference_time'] = datetime.datetime.strptime(options['reference_time'], "%d.%b %y %H.%M.%S")
|
|
||||||
|
|
||||||
for time in timestamps:
|
|
||||||
new_time = (time.timestamp() - options['reference_time'].timestamp()) * unit_tables.time()['s'].loc[options['time_unit']]
|
|
||||||
|
|
||||||
new_times.append(new_time)
|
|
||||||
|
|
||||||
|
|
||||||
timestamps = new_times
|
|
||||||
|
|
||||||
|
|
||||||
metadata = {'time': timestamps, 'temperature': temperatures}
|
|
||||||
|
|
||||||
# Match timestamps against electrochemistry-data
|
|
||||||
# TODO This could be generalised to match up against any other dataset with timestamps.
|
|
||||||
if 'cycles' in data.keys():
|
|
||||||
ions, specific_capacity = [], []
|
|
||||||
i = 0
|
|
||||||
for timestamp in timestamps:
|
|
||||||
if timestamp < 0:
|
|
||||||
ions.append(0)
|
|
||||||
|
|
||||||
else:
|
|
||||||
closest_chg = aux.find_neighbours(value=timestamp, df=data['cycles'][i][0], colname='time')
|
|
||||||
closest_dchg = aux.find_neighbours(value=timestamp, df=data['cycles'][i][1], colname='time')
|
|
||||||
|
|
||||||
if not isinstance(closest_chg, list):
|
|
||||||
closest_chg = [closest_chg, closest_chg]
|
|
||||||
if not isinstance(closest_dchg, list):
|
|
||||||
closest_dchg = [closest_dchg, closest_dchg]
|
|
||||||
|
|
||||||
|
|
||||||
if all([x==x for x in closest_chg]):
|
|
||||||
ions.append(np.mean([data['cycles'][i][0]['ions'].loc[data['cycles'][i][0].index == closest_chg[0]], data['cycles'][i][0]['ions'].loc[data['cycles'][i][0].index == closest_chg[1]]]))
|
|
||||||
specific_capacity.append(np.mean([data['cycles'][i][0]['specific_capacity'].loc[data['cycles'][i][0].index == closest_chg[0]], data['cycles'][i][0]['specific_capacity'].loc[data['cycles'][i][0].index == closest_chg[1]]]))
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif all([x==x for x in closest_dchg]):
|
|
||||||
ions.append(np.mean([data['cycles'][i][1]['ions'].loc[data['cycles'][i][1].index == closest_dchg[0]], data['cycles'][i][1]['ions'].loc[data['cycles'][i][1].index == closest_dchg[1]]]))
|
|
||||||
specific_capacity.append(np.mean([data['cycles'][i][1]['specific_capacity'].loc[data['cycles'][i][1].index == closest_dchg[0]], data['cycles'][i][1]['specific_capacity'].loc[data['cycles'][i][1].index == closest_dchg[1]]]))
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif aux.isnan(closest_chg[1]) and aux.isnan(closest_dchg[0]):
|
|
||||||
ions.append(np.nan)
|
|
||||||
specific_capacity.append(np.nan)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
ions.append(np.nan)
|
|
||||||
specific_capacity.append(np.nan)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
if i > len(data['cycles'])-1:
|
|
||||||
break
|
|
||||||
|
|
||||||
for i, (ion, cap) in enumerate(zip(ions, specific_capacity)):
|
|
||||||
if aux.isnan(ion): # if a resting step, assign a meaningful value
|
|
||||||
if i < len(ions)-1: # if resting step in the middle of the run, take the mean between the last of previous and first of next run
|
|
||||||
ions[i] = np.mean([ions[i-1], ions[i+1]])
|
|
||||||
|
|
||||||
else: # If last element, set to last values plus the delta between the last two previous measurements
|
|
||||||
ions[i] = ions[i-1] + (ions[i-1]-ions[i-2])
|
|
||||||
|
|
||||||
if aux.isnan(cap) and i < len(specific_capacity)-1: # do same thing for specific capacity
|
|
||||||
if i < len(specific_capacity)-1:
|
|
||||||
specific_capacity[i] = np.mean([specific_capacity[i-1], specific_capacity[i+1]])
|
|
||||||
|
|
||||||
else:
|
|
||||||
specific_capacity[i] = specific_capacity[i-1] + (specific_capacity[i-1]-specific_capacity[i-2])
|
|
||||||
|
|
||||||
|
|
||||||
metadata['ions'] = ions
|
|
||||||
metadata['specific_capacity'] = specific_capacity
|
|
||||||
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def determine_active_roi(scan_data):
|
|
||||||
|
|
||||||
# FIXME For Co-edge, this gave a wrong scan
|
|
||||||
|
|
||||||
#Trying to pick the roi with the highest difference between maximum and minimum intensity --> biggest edge shift
|
|
||||||
# if max(scan_data["xmap_roi00"])-min(scan_data["xmap_roi00"])>max(scan_data["xmap_roi01"])-min(scan_data["xmap_roi01"]):
|
|
||||||
# active_roi = 'xmap_roi00'
|
|
||||||
# else:
|
|
||||||
# active_roi = 'xmap_roi01'
|
|
||||||
|
|
||||||
|
|
||||||
if not ('xmap_roi00' in scan_data.columns) or not ('xmap_roi01' in scan_data.columns):
|
|
||||||
if 'xmap_roi00' in scan_data.columns:
|
|
||||||
active_roi = 'xmap_roi00'
|
|
||||||
elif 'xmap_roi01' in scan_data.columns:
|
|
||||||
active_roi = 'xmap_roi01'
|
|
||||||
|
|
||||||
elif (scan_data['xmap_roi00'].iloc[0:100].mean() < scan_data['xmap_roi00'].iloc[-100:].mean()) and (scan_data['xmap_roi01'].iloc[0:100].mean() < scan_data['xmap_roi01'].iloc[-100:].mean()):
|
|
||||||
if (scan_data['xmap_roi00'].iloc[:int(scan_data.shape[0]/2)].max() - scan_data['xmap_roi00'].iloc[0])/scan_data['xmap_roi00'].max() > (scan_data['xmap_roi01'].iloc[:int(scan_data.shape[0]/2)].max() - scan_data['xmap_roi01'].iloc[0])/scan_data['xmap_roi01'].max():
|
|
||||||
active_roi = 'xmap_roi00'
|
|
||||||
else:
|
|
||||||
active_roi = 'xmap_roi01'
|
|
||||||
|
|
||||||
elif scan_data['xmap_roi00'].iloc[0:100].mean() < scan_data['xmap_roi00'].iloc[-100:].mean():
|
|
||||||
active_roi = 'xmap_roi00'
|
|
||||||
|
|
||||||
elif scan_data['xmap_roi01'].iloc[0:100].mean() < scan_data['xmap_roi01'].iloc[-100:].mean():
|
|
||||||
active_roi = 'xmap_roi01'
|
|
||||||
|
|
||||||
else:
|
|
||||||
active_roi = None
|
|
||||||
|
|
||||||
return active_roi
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def write_data(data: dict, options={}):
|
|
||||||
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'save_filenames': None,
|
|
||||||
'save_dir': '.',
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options, required_options=default_options.keys())
|
|
||||||
|
|
||||||
|
|
||||||
if not options['save_filenames']:
|
|
||||||
options['save_filenames'] = [os.path.basename(col).split('.')[0]+'_exported.dat' for col in data['xanes_data'].columns if 'ZapEnergy' not in col]
|
|
||||||
|
|
||||||
|
|
||||||
print(options['save_filenames'])
|
|
||||||
|
|
@ -1,182 +0,0 @@
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,AutoMinorLocator)
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import math
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
#import ipywidgets as widgets
|
|
||||||
#from IPython.display import display
|
|
||||||
|
|
||||||
import nafuma.xanes as xas
|
|
||||||
import nafuma.plotting as btp
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
|
|
||||||
|
|
||||||
def plot_xanes(data, options={}):
|
|
||||||
|
|
||||||
|
|
||||||
# Update options
|
|
||||||
default_options = {
|
|
||||||
'which_scans': 'all', # Use real numbers, not indices - update_scans_list() will adjust.
|
|
||||||
'highlight': [],
|
|
||||||
'xlabel': 'Energy', 'ylabel': 'Intensity',
|
|
||||||
'xunit': 'keV', 'yunit': 'arb. u.',
|
|
||||||
'exclude_scans': [],
|
|
||||||
'colours': None,
|
|
||||||
'gradient': False,
|
|
||||||
'rc_params': {},
|
|
||||||
'format_params': {}}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
if not 'xanes_data' in data.keys():
|
|
||||||
data['xanes_data'] = xas.io.load_data(data=data, options=options)
|
|
||||||
|
|
||||||
# Update list of cycles to correct indices
|
|
||||||
update_scans_list(data=data, options=options)
|
|
||||||
|
|
||||||
colours = generate_colours(scans=options['which_scans'], options=options)
|
|
||||||
|
|
||||||
# Prepare plot, and read and process data
|
|
||||||
|
|
||||||
fig, ax = btp.prepare_plot(options=options)
|
|
||||||
|
|
||||||
|
|
||||||
# Add counter to pick out correct colour
|
|
||||||
counter = 0
|
|
||||||
for i, path in enumerate(data['path']):
|
|
||||||
if i in options['which_scans']:
|
|
||||||
lw = plt.rcParams['lines.linewidth']*5 if i in options['highlight'] else plt.rcParams['lines.linewidth']
|
|
||||||
|
|
||||||
data['xanes_data'].plot(x='ZapEnergy', y=path, ax=ax, c=colours[counter], lw=lw)
|
|
||||||
counter += 1
|
|
||||||
|
|
||||||
|
|
||||||
fig, ax = btp.adjust_plot(fig=fig, ax=ax, options=options)
|
|
||||||
|
|
||||||
#if options['interactive_session_active']:
|
|
||||||
|
|
||||||
|
|
||||||
return fig, ax
|
|
||||||
|
|
||||||
|
|
||||||
def pick_out_scans(metadata: dict, timestamp: list):
|
|
||||||
|
|
||||||
# If either start or end are None, set to way back when or way into the future
|
|
||||||
split_operator=timestamp[0][-3] #Adding this to enable reading of both "." and ":" as operators to split hour:minute:second
|
|
||||||
|
|
||||||
if not timestamp[0]:
|
|
||||||
timestamp[0] = datetime.datetime.strptime('1970 01 01 00:00:00', '%Y %m %d %H:%M:%S')
|
|
||||||
else:
|
|
||||||
if split_operator == ".":
|
|
||||||
timestamp[0] = datetime.datetime.strptime(timestamp[0], "%d.%b %y %H.%M.%S")
|
|
||||||
if split_operator == ":":
|
|
||||||
timestamp[0] = datetime.datetime.strptime(timestamp[0], "%d.%b %y %H:%M:%S")
|
|
||||||
if not timestamp[1]:
|
|
||||||
timestamp[1] = datetime.datetime.strptime('3000 01 01 00:00:00', '%Y %m %d %H:%M:%S')
|
|
||||||
else:
|
|
||||||
if split_operator == ".":
|
|
||||||
timestamp[1] = datetime.datetime.strptime(timestamp[1], "%d.%b %y %H.%M.%S")
|
|
||||||
if split_operator == ":":
|
|
||||||
timestamp[1] = datetime.datetime.strptime(timestamp[1], "%d.%b %y %H:%M:%S")
|
|
||||||
|
|
||||||
scans = []
|
|
||||||
for i, time in enumerate(metadata['time']):
|
|
||||||
if time >= timestamp[0] and time <= timestamp[1]:
|
|
||||||
scans.append(i)
|
|
||||||
|
|
||||||
|
|
||||||
return scans
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def update_scans_list(data, options: dict) -> None:
|
|
||||||
|
|
||||||
if options['which_scans'] == 'all':
|
|
||||||
options['which_scans'] = [i for i in range(len(data['path']))]
|
|
||||||
|
|
||||||
|
|
||||||
elif isinstance(options['which_scans'], list):
|
|
||||||
|
|
||||||
scans =[]
|
|
||||||
|
|
||||||
for scan in options['which_scans']:
|
|
||||||
if isinstance(scan, int):
|
|
||||||
scans.append(scan-1)
|
|
||||||
|
|
||||||
elif isinstance(scan, tuple):
|
|
||||||
interval = [i-1 for i in range(scan[0], scan[1]+1)]
|
|
||||||
scans.extend(interval)
|
|
||||||
|
|
||||||
|
|
||||||
options['which_scans'] = scans
|
|
||||||
|
|
||||||
|
|
||||||
# Tuple is used to define an interval - as elements tuples can't be assigned, I convert it to a list here.
|
|
||||||
elif isinstance(options['which_scans'], tuple):
|
|
||||||
which_scans = list(options['which_scans'])
|
|
||||||
|
|
||||||
if which_scans[0] <= 0:
|
|
||||||
which_scans[0] = 1
|
|
||||||
|
|
||||||
elif which_scans[1] < 0:
|
|
||||||
which_scans[1] = len(options['which_scans'])
|
|
||||||
|
|
||||||
|
|
||||||
options['which_scans'] = [i-1 for i in range(which_scans[0], which_scans[1]+1)]
|
|
||||||
|
|
||||||
|
|
||||||
for i, scan in enumerate(options['which_scans']):
|
|
||||||
if scan in options['exclude_scans']:
|
|
||||||
del options['which_scans'][i]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def generate_colours(scans, options):
|
|
||||||
# FIXME Make this a generalised function and use this instead of this and in the electrochemsitry submodule
|
|
||||||
|
|
||||||
# Assign colours from the options dictionary if it is defined, otherwise use standard colours.
|
|
||||||
if options['colours']:
|
|
||||||
colour = options['colours']
|
|
||||||
|
|
||||||
else:
|
|
||||||
#colour = (214/255, 143/255, 214/255) # Plum Web (#D68FD6), coolors.co
|
|
||||||
colour = (90/255, 42/255, 39/255) # Caput Mortuum(#5A2A27), coolors.co
|
|
||||||
|
|
||||||
# If gradient is enabled, find start and end points for each colour
|
|
||||||
if options['gradient']:
|
|
||||||
|
|
||||||
if isinstance(colour, list) and len(colour) == 2:
|
|
||||||
options['number_of_colours'] = len(scans)
|
|
||||||
colours = btp.mix_colours(colour1=colour[0], colour2=colour[1], options=options)
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
add = min([(1-x)*0.75 for x in colour])
|
|
||||||
|
|
||||||
colour_start = colour
|
|
||||||
colour_end = [x+add for x in colour]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Generate lists of colours
|
|
||||||
if not isinstance(colour, list):
|
|
||||||
colours = []
|
|
||||||
for scan_number in range(0, len(scans)):
|
|
||||||
|
|
||||||
if options['gradient']:
|
|
||||||
weight_start = (len(scans) - scan_number)/len(scans)
|
|
||||||
weight_end = scan_number/len(scans)
|
|
||||||
|
|
||||||
colour = [weight_start*start_colour + weight_end*end_colour for start_colour, end_colour in zip(colour_start, colour_end)]
|
|
||||||
|
|
||||||
colours.append(colour)
|
|
||||||
|
|
||||||
return colours
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
def time():
|
|
||||||
# Define matrix for unit conversion for time
|
|
||||||
time = {'h': [1, 60, 3600, 3600000], 'min': [1/60, 1, 60, 60000], 's': [1/3600, 1/60, 1, 1000], 'ms': [1/3600000, 1/60000, 1/1000, 1]}
|
|
||||||
time = pd.DataFrame(time)
|
|
||||||
time.index = ['h', 'min', 's', 'ms']
|
|
||||||
|
|
||||||
return time
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import io, plot, refinement
|
|
||||||
939
nafuma/xrd/io.py
939
nafuma/xrd/io.py
|
|
@ -1,939 +0,0 @@
|
||||||
from sympy import re
|
|
||||||
import fabio, pyFAI
|
|
||||||
import pandas as pd
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import sys
|
|
||||||
import datetime
|
|
||||||
|
|
||||||
import zipfile
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
|
|
||||||
|
|
||||||
import nafuma.auxillary as aux
|
|
||||||
|
|
||||||
|
|
||||||
def get_image_array(path):
|
|
||||||
|
|
||||||
beamline_extension = ['.edf', '.cbf', '.mar3450']
|
|
||||||
|
|
||||||
if path.endswith(tuple(beamline_extension)):
|
|
||||||
image = fabio.open(path)
|
|
||||||
image_array = image.data
|
|
||||||
|
|
||||||
elif path.endswith('.dat'):
|
|
||||||
image_array = np.loadtxt(path, skiprows=1, delimiter=';')
|
|
||||||
|
|
||||||
return image_array
|
|
||||||
|
|
||||||
|
|
||||||
def get_image_headers(path):
|
|
||||||
|
|
||||||
image = fabio.open(path)
|
|
||||||
|
|
||||||
return image.header
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def integrate_scans(data: dict, options={}):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'extension': '.dat',
|
|
||||||
'save': True,
|
|
||||||
'integration_save_folder': './integrated/',
|
|
||||||
'filename_base': 'integrated',
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
if not isinstance(data['path'], list):
|
|
||||||
imgs = aux.get_filenames(data['path'], ext=options['extension'])
|
|
||||||
|
|
||||||
|
|
||||||
diffractograms, wavelengths = [], []
|
|
||||||
for i, img in enumerate(imgs):
|
|
||||||
data['image'] = get_image_array(img)
|
|
||||||
|
|
||||||
options['integration_save_filename'] = options['filename_base'] + '_' + f'{i}'.zfill(4) + '.xy'
|
|
||||||
|
|
||||||
diff, wl = integrate_1d(data=data, options=options)
|
|
||||||
|
|
||||||
diffractograms.append(diff)
|
|
||||||
wavelengths.append(wl)
|
|
||||||
|
|
||||||
return diffractograms, wavelengths
|
|
||||||
|
|
||||||
|
|
||||||
def integrate_1d(data, options={}, index=0):
|
|
||||||
''' Integrates an image file to a 1D diffractogram.
|
|
||||||
|
|
||||||
Required content of data:
|
|
||||||
calibrant (str): path to .poni-file
|
|
||||||
nbins (int): Number of bins to divide image into
|
|
||||||
path (str) (optional, dependent on image): path to image file - either this or image must be specified. If both is passed, image is prioritsed
|
|
||||||
image (NumPy 2D Array) (optional, dependent on path): image array as extracted from get_image_array
|
|
||||||
|
|
||||||
Output:
|
|
||||||
df: DataFrame contianing 1D diffractogram if option 'return' is True
|
|
||||||
'''
|
|
||||||
|
|
||||||
required_options = ['unit', 'npt', 'save', 'integration_save_filename', 'save_extension', 'integration_save_folder', 'overwrite', 'extract_folder', 'error_model']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'unit': '2th_deg',
|
|
||||||
'npt': 5000,
|
|
||||||
'extract_folder': 'tmp',
|
|
||||||
'error_model': None,
|
|
||||||
'save': False,
|
|
||||||
'integration_save_filename': None,
|
|
||||||
'save_extension': '_integrated.xy',
|
|
||||||
'integration_save_folder': '.',
|
|
||||||
'overwrite': False}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
if not isinstance(data['path'], list):
|
|
||||||
data['path'] = [data['path']]
|
|
||||||
|
|
||||||
|
|
||||||
# Get image array from filename if not passed
|
|
||||||
if 'image' not in data.keys() or not isinstance(data['image'], np.ndarray):
|
|
||||||
data['image'] = get_image_array(data['path'][index])
|
|
||||||
|
|
||||||
|
|
||||||
# Load mask
|
|
||||||
if 'mask' in data.keys():
|
|
||||||
mask = get_image_array(data['mask'])
|
|
||||||
else:
|
|
||||||
mask = None
|
|
||||||
|
|
||||||
|
|
||||||
# Instanciate the azimuthal integrator from pyFAI from the calibrant (.poni-file)
|
|
||||||
ai = pyFAI.load(data['calibrant'])
|
|
||||||
|
|
||||||
# Determine filename
|
|
||||||
filename = make_filename(options=options, path=data['path'][index])
|
|
||||||
|
|
||||||
# Make save_folder if this does not exist already
|
|
||||||
if not os.path.isdir(options['extract_folder']):
|
|
||||||
os.makedirs(options['extract_folder'])
|
|
||||||
|
|
||||||
if not os.path.isdir(options['integration_save_folder']):
|
|
||||||
os.makedirs(options['integration_save_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
res = ai.integrate1d(data['image'], npt=options['npt'], mask=mask, error_model=options['error_model'], unit=options['unit'], filename=filename)
|
|
||||||
|
|
||||||
data['path'][index] = filename
|
|
||||||
diffractogram, _ = read_xy(data=data, options=options, index=index)
|
|
||||||
wavelength = find_wavelength_from_poni(path=data['calibrant'])
|
|
||||||
|
|
||||||
if not options['save']:
|
|
||||||
os.remove(filename)
|
|
||||||
shutil.rmtree(f'tmp')
|
|
||||||
|
|
||||||
return diffractogram, wavelength
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def make_filename(options, path=None):
|
|
||||||
|
|
||||||
# Define save location for integrated diffractogram data
|
|
||||||
if not options['save']:
|
|
||||||
filename = os.path.join(options['extract_folder'], 'tmp_diff.dat')
|
|
||||||
|
|
||||||
elif options['save']:
|
|
||||||
|
|
||||||
# Case 1: No filename is given.
|
|
||||||
if not options['integration_save_filename']:
|
|
||||||
# If a path is given instead of an image array, the path is taken as the trunk of the savename
|
|
||||||
if path:
|
|
||||||
# Make filename by joining the save_folder, the filename (with extension deleted) and adding the save_extension
|
|
||||||
filename = os.path.join(options['integration_save_folder'], os.path.split(path)[-1].split('.')[0] + options['save_extension'])
|
|
||||||
else:
|
|
||||||
# Make filename just "integrated.dat" in the save_folder
|
|
||||||
filename = os.path.join(options['integration_save_folder'], 'integrated.xy')
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
filename = os.path.join(options['integration_save_folder'], options['integration_save_filename'])
|
|
||||||
|
|
||||||
if not options['overwrite']:
|
|
||||||
trunk = filename.split('.')[0]
|
|
||||||
extension = filename.split('.')[-1]
|
|
||||||
counter = 0
|
|
||||||
|
|
||||||
while os.path.isfile(filename):
|
|
||||||
|
|
||||||
# Rename first file to match naming scheme if already exists
|
|
||||||
if counter == 0:
|
|
||||||
os.rename(filename, trunk + '_' + str(counter).zfill(4) + '.' + extension)
|
|
||||||
|
|
||||||
# Increment counter and make new filename
|
|
||||||
counter += 1
|
|
||||||
counter_string = str(counter)
|
|
||||||
filename = trunk + '_' + counter_string.zfill(4) + '.' + extension
|
|
||||||
|
|
||||||
|
|
||||||
return filename
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def generate_image_list(path, options=None):
|
|
||||||
''' Generates a list of paths to pass to the average_images() function'''
|
|
||||||
|
|
||||||
required_options = ['scans_per_image']
|
|
||||||
default_options = {
|
|
||||||
'scans_per_image': 5
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def process_2d_scans(data: dict, options={}):
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'scans': 15, # number of scans per image
|
|
||||||
'img_filename': 'img_',
|
|
||||||
'extension': '.edf',
|
|
||||||
'darks': True, # whether there are darks
|
|
||||||
'dark_filename': 'dark_',
|
|
||||||
'save': False,
|
|
||||||
'save_folder': './average/',
|
|
||||||
'save_filename': 'avg_',
|
|
||||||
'save_extension': '.dat'
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=default_options.keys(), default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
all_imgs = [os.path.join(data['path'], img) for img in os.listdir(data['path']) if img.endswith(options['extension']) and img.startswith(options['img_filename'])]
|
|
||||||
|
|
||||||
if options['darks']:
|
|
||||||
all_darks = [os.path.join(data['path'], img) for img in os.listdir(data['path']) if img.endswith(options['extension']) and img.startswith(options['dark_filename'])]
|
|
||||||
|
|
||||||
|
|
||||||
scans = int(len(all_imgs) / options['scans'])
|
|
||||||
|
|
||||||
|
|
||||||
assert scans - (len(all_imgs) / options['scans']) == 0
|
|
||||||
|
|
||||||
|
|
||||||
imgs = []
|
|
||||||
darks = []
|
|
||||||
|
|
||||||
for i in range(scans):
|
|
||||||
img = []
|
|
||||||
dark = []
|
|
||||||
|
|
||||||
for j in range(options['scans']):
|
|
||||||
img.append(all_imgs.pop(0))
|
|
||||||
|
|
||||||
if options['darks']:
|
|
||||||
dark.append(all_darks.pop(0))
|
|
||||||
|
|
||||||
imgs.append(img)
|
|
||||||
|
|
||||||
if options['darks']:
|
|
||||||
darks.append(dark)
|
|
||||||
|
|
||||||
|
|
||||||
img_avgs = []
|
|
||||||
headers = []
|
|
||||||
|
|
||||||
for img, dark in zip(imgs,darks):
|
|
||||||
img_avg = average_images(img)
|
|
||||||
header = get_image_headers(img[0])
|
|
||||||
|
|
||||||
if options['darks']:
|
|
||||||
dark_avg = average_images(dark)
|
|
||||||
img_avg = subtract_dark(img_avg, dark_avg)
|
|
||||||
|
|
||||||
img_avgs.append(img_avg)
|
|
||||||
headers.append(header)
|
|
||||||
|
|
||||||
|
|
||||||
if options['save']:
|
|
||||||
if not os.path.isdir(options['save_folder']):
|
|
||||||
os.makedirs(options['save_folder'])
|
|
||||||
|
|
||||||
for i, img in enumerate(img_avgs):
|
|
||||||
if options['save_extension'] == '.dat':
|
|
||||||
with open(os.path.join(options['save_folder'], options['save_filename']+f'{i}'.zfill(4)+options['save_extension']), 'w') as f:
|
|
||||||
f.write(f'# Time: {headers[i]["time"]}\n')
|
|
||||||
np.savetxt(f, img, fmt='%.2f', delimiter=";")
|
|
||||||
|
|
||||||
|
|
||||||
return img_avgs
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def average_images(images):
|
|
||||||
''' Takes a list of path to image files, reads them and averages them before returning the average image'''
|
|
||||||
|
|
||||||
image_arrays = []
|
|
||||||
|
|
||||||
for image in images:
|
|
||||||
image_array = get_image_array(image)
|
|
||||||
image_arrays.append(image_array)
|
|
||||||
|
|
||||||
|
|
||||||
image_arrays = np.array(image_arrays)
|
|
||||||
|
|
||||||
image_average = image_arrays.mean(axis=0)
|
|
||||||
|
|
||||||
|
|
||||||
return image_average
|
|
||||||
|
|
||||||
|
|
||||||
def subtract_dark(image, dark):
|
|
||||||
|
|
||||||
return image - dark
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def view_integrator(calibrant):
|
|
||||||
''' Prints out information about the azimuthal integrator
|
|
||||||
|
|
||||||
Input:
|
|
||||||
calibrant: Path to the azimuthal integrator file (.PONI)
|
|
||||||
|
|
||||||
Output:
|
|
||||||
None'''
|
|
||||||
|
|
||||||
ai = pyFAI.load(calibrant)
|
|
||||||
|
|
||||||
print("pyFAI version:", pyFAI.version)
|
|
||||||
print("\nIntegrator: \n", ai)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def read_brml(data, options={}, index=0):
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME: Can't read RECX1-data, apparently must be formatted differently from RECX2. Check the RawData-files and compare between the two files.
|
|
||||||
|
|
||||||
|
|
||||||
required_options = ['extract_folder', 'save_folder']
|
|
||||||
default_options = {
|
|
||||||
'extract_folder': 'tmp',
|
|
||||||
'save_folder': None
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
|
|
||||||
if not os.path.isdir(options['extract_folder']):
|
|
||||||
os.mkdir(options['extract_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
# Extract the RawData0.xml file from the brml-file
|
|
||||||
with zipfile.ZipFile(data['path'][index], 'r') as brml:
|
|
||||||
for info in brml.infolist():
|
|
||||||
if "RawData" in info.filename:
|
|
||||||
brml.extract(info.filename, options['extract_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Parse the RawData0.xml file
|
|
||||||
path = os.path.join(options['extract_folder'], 'Experiment0/RawData0.xml')
|
|
||||||
|
|
||||||
tree = ET.parse(path)
|
|
||||||
root = tree.getroot()
|
|
||||||
|
|
||||||
shutil.rmtree(options['extract_folder'])
|
|
||||||
|
|
||||||
diffractogram = []
|
|
||||||
|
|
||||||
for chain in root.findall('./DataRoutes/DataRoute'):
|
|
||||||
|
|
||||||
|
|
||||||
# Get the scan type to be able to handle different data formats
|
|
||||||
scantype = chain.findall('ScanInformation')[0].get('VisibleName')
|
|
||||||
|
|
||||||
# Check if the chain is the right one to extract the data from
|
|
||||||
if chain.get('Description') == 'Originally measured data.':
|
|
||||||
|
|
||||||
|
|
||||||
if scantype == 'TwoTheta':
|
|
||||||
for scandata in chain.findall('Datum'):
|
|
||||||
scandata = scandata.text.split(',')
|
|
||||||
twotheta, intensity = float(scandata[2]), float(scandata[3])
|
|
||||||
|
|
||||||
if twotheta > 0:
|
|
||||||
diffractogram.append({'2th': twotheta, 'I': intensity})
|
|
||||||
|
|
||||||
elif scantype == 'Coupled TwoTheta/Theta':
|
|
||||||
for scandata in chain.findall('Datum'):
|
|
||||||
scandata = scandata.text.split(',')
|
|
||||||
twotheta, intensity = float(scandata[2]), float(scandata[4])
|
|
||||||
|
|
||||||
if twotheta > 0:
|
|
||||||
diffractogram.append({'2th': twotheta, 'I': intensity})
|
|
||||||
|
|
||||||
elif scantype == 'Still (Eiger2R_500K (1D mode))':
|
|
||||||
|
|
||||||
start = float(chain.findall('ScanInformation/ScaleAxes/ScaleAxisInfo/Start')[0].text)
|
|
||||||
stop = float(chain.findall('ScanInformation/ScaleAxes/ScaleAxisInfo/Stop')[0].text)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
for scandata in chain.findall('Datum'):
|
|
||||||
scandata = scandata.text.split(',')
|
|
||||||
raw = [float(i) for i in scandata]
|
|
||||||
|
|
||||||
intensity = []
|
|
||||||
for r in raw:
|
|
||||||
if r > 601:
|
|
||||||
intensity.append(r)
|
|
||||||
|
|
||||||
intensity = np.array(intensity)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
twotheta = np.linspace(start, stop, len(intensity))
|
|
||||||
|
|
||||||
diffractogram = {'2th': twotheta, 'I': intensity}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#if 'wavelength' not in data.keys():
|
|
||||||
# Find wavelength
|
|
||||||
|
|
||||||
if not data['wavelength'][index]:
|
|
||||||
for chain in root.findall('./FixedInformation/Instrument/PrimaryTracks/TrackInfoData/MountedOptics/InfoData/Tube/WaveLengthAlpha1'):
|
|
||||||
wavelength = float(chain.attrib['Value'])
|
|
||||||
else:
|
|
||||||
wavelength = data['wavelength'][index]
|
|
||||||
|
|
||||||
|
|
||||||
diffractogram = pd.DataFrame(diffractogram)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if options['save_folder']:
|
|
||||||
if not os.path.isdir(options['save_folder']):
|
|
||||||
os.makedirs(options['save_folder'])
|
|
||||||
|
|
||||||
diffractogram.to_csv(options['save_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return diffractogram, wavelength
|
|
||||||
|
|
||||||
|
|
||||||
def read_htxrd(data, options={}, index=0):
|
|
||||||
|
|
||||||
required_options = ['extract_folder', 'save_folder', 'save_filename', 'adjust_time']
|
|
||||||
default_options = {
|
|
||||||
'extract_folder': 'tmp',
|
|
||||||
'save_folder': None,
|
|
||||||
'save_filename': None,
|
|
||||||
'adjust_time': True
|
|
||||||
}
|
|
||||||
|
|
||||||
if not isinstance(data['path'], list):
|
|
||||||
data['path'] = [data['path']]
|
|
||||||
|
|
||||||
if 'wavelength' not in data.keys():
|
|
||||||
data['wavelength'] = [None for i in range(len(data['path']))]
|
|
||||||
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
# Extract the RawData0.xml file from the brml-file
|
|
||||||
with zipfile.ZipFile(data['path'][index], 'r') as brml:
|
|
||||||
for info in brml.infolist():
|
|
||||||
if "RawData" in info.filename:
|
|
||||||
brml.extract(info.filename, options['extract_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
# Get all filenames
|
|
||||||
files = os.listdir(os.path.join(options['extract_folder'], 'Experiment0'))
|
|
||||||
|
|
||||||
# initalise empty list to store all DataFrames
|
|
||||||
diffractograms = []
|
|
||||||
wavelengths = []
|
|
||||||
|
|
||||||
active_scan = False
|
|
||||||
timestamps = []
|
|
||||||
|
|
||||||
# Loop through all RawData-files and extract all data and temperatures
|
|
||||||
for i, file in enumerate(files):
|
|
||||||
|
|
||||||
# Create all filenames as strings
|
|
||||||
filename = os.path.join('tmp/Experiment0/', f'RawData{i}.xml')
|
|
||||||
|
|
||||||
# Parse the .xml-files
|
|
||||||
tree = ET.parse(filename)
|
|
||||||
root = tree.getroot()
|
|
||||||
|
|
||||||
# initalise empty list to store data from this particular scan
|
|
||||||
diffractogram = []
|
|
||||||
|
|
||||||
for chain in root.findall('./DataRoutes/DataRoute'):
|
|
||||||
|
|
||||||
scantypes = chain.findall('ScanInformation')
|
|
||||||
|
|
||||||
for scantype in scantypes:
|
|
||||||
if scantype.get('VisibleName') == 'Still (TCU1000N)':
|
|
||||||
continue
|
|
||||||
|
|
||||||
else:
|
|
||||||
active_scan = True
|
|
||||||
if chain.get('RouteFlag') == 'Final':
|
|
||||||
for scandata in chain.findall('Datum'):
|
|
||||||
scandata = scandata.text.split(',')
|
|
||||||
twotheta, intensity, temperature = float(scandata[2]), float(scandata[3]), float(scandata[5])
|
|
||||||
|
|
||||||
diffractogram.append({'2th': twotheta, 'I': intensity, 'T': temperature})
|
|
||||||
|
|
||||||
diffractogram = pd.DataFrame(diffractogram)
|
|
||||||
diffractograms.append(diffractogram)
|
|
||||||
|
|
||||||
|
|
||||||
if not data['wavelength'][index]:
|
|
||||||
for chain in root.findall('./FixedInformation/Instrument/PrimaryTracks/TrackInfoData/MountedOptics/InfoData/Tube/WaveLengthAlpha1'):
|
|
||||||
wavelength = float(chain.attrib['Value'])
|
|
||||||
else:
|
|
||||||
wavelength = data['wavelength'][index]
|
|
||||||
|
|
||||||
wavelengths.append(wavelength)
|
|
||||||
|
|
||||||
|
|
||||||
if active_scan:
|
|
||||||
for chain in root.findall('./TimeStampStarted'):
|
|
||||||
time_start = datetime.datetime.strptime(chain.text[:-7], "%Y-%m-%dT%H:%M:%S.%f")
|
|
||||||
for chain in root.findall('./TimeStampFinished'):
|
|
||||||
time_end = datetime.datetime.strptime(chain.text[:-7], "%Y-%m-%dT%H:%M:%S.%f")
|
|
||||||
|
|
||||||
|
|
||||||
time_diff = time_end - time_start
|
|
||||||
|
|
||||||
if options['adjust_time']:
|
|
||||||
timestamps.append(time_start + time_diff/2)
|
|
||||||
|
|
||||||
|
|
||||||
if options['save_folder']:
|
|
||||||
for i, (diffractogram, wavelength, timestamp) in enumerate(zip(diffractograms, wavelengths, timestamps)):
|
|
||||||
if not options['save_filename']:
|
|
||||||
filename = os.path.basename(data['path'][index]).split('.')[0] + '_' + str(i).zfill(4) +'.xy'
|
|
||||||
else:
|
|
||||||
filename = options['save_filename'] + '_' + str(i).zfill(4) +'.xy'
|
|
||||||
|
|
||||||
|
|
||||||
if not os.path.isdir(options['save_folder']):
|
|
||||||
os.makedirs(options['save_folder'])
|
|
||||||
|
|
||||||
save_htxrd_as_xy(diffractogram, wavelength, timestamp, filename, options['save_folder'])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
shutil.rmtree(options['extract_folder'])
|
|
||||||
|
|
||||||
return diffractograms, wavelengths
|
|
||||||
|
|
||||||
def save_htxrd_as_xy(diffractogram, wavelength, timestamp, filename, save_path):
|
|
||||||
|
|
||||||
headers = '\n'.join(
|
|
||||||
[line for line in
|
|
||||||
[f'# Temperature {np.round(diffractogram["T"].mean())}',
|
|
||||||
f'# Wavelength {wavelength}',
|
|
||||||
f'# Time {timestamp}',
|
|
||||||
'# 2th \t I'
|
|
||||||
]
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
diffractogram = diffractogram.drop('T', axis=1)
|
|
||||||
|
|
||||||
with open(os.path.join(save_path, filename), 'w', newline='\n') as f:
|
|
||||||
for line in headers:
|
|
||||||
f.write(line)
|
|
||||||
|
|
||||||
f.write('\n')
|
|
||||||
|
|
||||||
diffractogram.to_csv(f, index=False, header=False, sep='\t')
|
|
||||||
|
|
||||||
|
|
||||||
def read_xy(data, options={}, index=0):
|
|
||||||
|
|
||||||
#if 'wavelength' not in data.keys():
|
|
||||||
# Get wavelength from scan
|
|
||||||
|
|
||||||
if 'wavelength' in data.keys() and not type(data['wavelength']) == list:
|
|
||||||
data['wavelength'] = [data['wavelength']]
|
|
||||||
|
|
||||||
if not 'wavelength' in data.keys() or not data['wavelength'][index]:
|
|
||||||
wavelength = read_metadata_from_xy(path=data['path'][index])['wavelength']
|
|
||||||
|
|
||||||
else:
|
|
||||||
wavelength = data['wavelength'][index]
|
|
||||||
|
|
||||||
with open(data['path'][index], 'r') as f:
|
|
||||||
position = 0
|
|
||||||
|
|
||||||
current_line = f.readline()
|
|
||||||
|
|
||||||
while current_line[0] == '#' or current_line[0] == '\'':
|
|
||||||
position = f.tell()
|
|
||||||
current_line = f.readline()
|
|
||||||
|
|
||||||
f.seek(position)
|
|
||||||
|
|
||||||
diffractogram = pd.read_csv(f, header=None, delim_whitespace=True)
|
|
||||||
|
|
||||||
|
|
||||||
if diffractogram.shape[1] == 2:
|
|
||||||
diffractogram.columns = ['2th', 'I']
|
|
||||||
elif diffractogram.shape[1] == 3:
|
|
||||||
diffractogram.columns = ['2th', 'I', 'sigma']
|
|
||||||
|
|
||||||
|
|
||||||
return diffractogram, wavelength
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def read_metadata_from_xy(path):
|
|
||||||
|
|
||||||
metadata = {}
|
|
||||||
wavelength_dict = {'Cu': 1.54059, 'Mo': 0.71073}
|
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
for line in lines:
|
|
||||||
# For .xy-files output from EVA
|
|
||||||
if 'Anode' in line:
|
|
||||||
anode = line.split()[8].strip('"')
|
|
||||||
metadata['wavelength'] = wavelength_dict[anode]
|
|
||||||
|
|
||||||
|
|
||||||
elif 'Wavelength' in line:
|
|
||||||
# For .xy-files output from pyFAI integration
|
|
||||||
if line.split()[-1] == 'm':
|
|
||||||
metadata['wavelength'] = float(line.split()[2])*10**10
|
|
||||||
|
|
||||||
else:
|
|
||||||
metadata['wavelength'] = float(line.split()[-1])
|
|
||||||
|
|
||||||
|
|
||||||
# Get temperature - exists in .xy-files saved from HTXRD-runs in .brml-files
|
|
||||||
if 'Temperature' in line:
|
|
||||||
metadata['temperature'] = line.split()[-1]
|
|
||||||
|
|
||||||
# Get timestamp - exists in .xy-files saved from .brml-files
|
|
||||||
if 'Time' in line:
|
|
||||||
metadata['time'] = " ".join(line.split()[2:])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if 'wavelength' not in metadata.keys():
|
|
||||||
metadata['wavelength'] = None
|
|
||||||
if 'temperature' not in metadata.keys():
|
|
||||||
metadata['temperature'] = None
|
|
||||||
if 'time' not in metadata.keys():
|
|
||||||
metadata['time'] = None
|
|
||||||
|
|
||||||
return metadata
|
|
||||||
|
|
||||||
|
|
||||||
def find_wavelength_from_poni(path):
|
|
||||||
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
for line in lines:
|
|
||||||
if 'Wavelength' in line:
|
|
||||||
wavelength = float(line.split()[-1])*10**10
|
|
||||||
|
|
||||||
|
|
||||||
return wavelength
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def strip_headers_from_xy(path: str, filename=None) -> None:
|
|
||||||
''' Strips headers from a .xy-file'''
|
|
||||||
|
|
||||||
|
|
||||||
xy = []
|
|
||||||
with open(path, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
|
|
||||||
headerlines = 0
|
|
||||||
for line in lines:
|
|
||||||
if line[0] == '#':
|
|
||||||
headerlines += 1
|
|
||||||
elif line[0] == "\'":
|
|
||||||
headerlines += 1
|
|
||||||
|
|
||||||
else:
|
|
||||||
xy.append(line)
|
|
||||||
|
|
||||||
|
|
||||||
if not filename:
|
|
||||||
ext = path.split('.')[-1]
|
|
||||||
filename = path.split(f'.{ext}')[0] + f'_noheaders.{ext}'
|
|
||||||
|
|
||||||
|
|
||||||
with open(filename, 'w') as f:
|
|
||||||
for line in xy:
|
|
||||||
f.write(line)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def read_data(data, options={}, index=0):
|
|
||||||
|
|
||||||
beamline_extensions = ['mar3450', 'edf', 'cbf']
|
|
||||||
file_extension = data['path'][index].split('.')[-1]
|
|
||||||
|
|
||||||
if file_extension in beamline_extensions:
|
|
||||||
diffractogram, wavelength = integrate_1d(data=data, options=options, index=index)
|
|
||||||
|
|
||||||
elif file_extension == 'brml':
|
|
||||||
diffractogram, wavelength = read_brml(data=data, options=options, index=index)
|
|
||||||
|
|
||||||
elif file_extension in['xy', 'xye']:
|
|
||||||
diffractogram, wavelength = read_xy(data=data, options=options, index=index)
|
|
||||||
|
|
||||||
|
|
||||||
if options['exclude']:
|
|
||||||
|
|
||||||
if not isinstance(options['exclude'], list):
|
|
||||||
options['exclude'] = [options['exclude']]
|
|
||||||
|
|
||||||
for excl in options['exclude']:
|
|
||||||
diffractogram['I'].loc[(diffractogram['2th'] > excl[0]) & (diffractogram['2th'] < excl[1])] = 0
|
|
||||||
|
|
||||||
|
|
||||||
if options['offset'] or options['normalise']:
|
|
||||||
# Make copy of the original intensities before any changes are made through normalisation or offset, to easily revert back if need to update.
|
|
||||||
diffractogram['I_org'] = diffractogram['I']
|
|
||||||
diffractogram['2th_org'] = diffractogram['2th']
|
|
||||||
|
|
||||||
diffractogram = adjust_intensities(diffractogram, wavelength, index, options)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
diffractogram = translate_wavelengths(data=diffractogram, wavelength=wavelength)
|
|
||||||
|
|
||||||
return diffractogram, wavelength
|
|
||||||
|
|
||||||
|
|
||||||
def adjust_intensities(diffractogram, wavelength, index, options):
|
|
||||||
|
|
||||||
if 'current_offset_y' not in options.keys():
|
|
||||||
options['current_offset_y'] = options['offset_y']
|
|
||||||
else:
|
|
||||||
if options['current_offset_y'] != options['offset_y']:
|
|
||||||
options['offset_change'] = True
|
|
||||||
|
|
||||||
options['current_offset_y'] = options['offset_y']
|
|
||||||
|
|
||||||
options['current_offset_x'] = options['offset_x']
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#Apply offset along y-axis
|
|
||||||
diffractogram['I'] = diffractogram['I_org'] # Reset intensities
|
|
||||||
|
|
||||||
if options['normalise']:
|
|
||||||
diffractogram['I'] = diffractogram['I'] / diffractogram['I'].max()
|
|
||||||
|
|
||||||
if not isinstance(options['multiply'], list):
|
|
||||||
options['multiply'] = [options['multiply']]
|
|
||||||
|
|
||||||
diffractogram['I'] = diffractogram['I'] * options['multiply'][index]
|
|
||||||
|
|
||||||
if options['drawdown']:
|
|
||||||
diffractogram['I'] = diffractogram['I'] - diffractogram['I'].mean()
|
|
||||||
|
|
||||||
diffractogram['I'] = diffractogram['I'] + index*options['offset_y']
|
|
||||||
|
|
||||||
# Apply offset along x-axis
|
|
||||||
relative_shift = (wavelength / 1.54059)*options['offset_x'] # Adjusts the offset-factor to account for wavelength, so that offset_x given is given in 2th_cuka-units
|
|
||||||
diffractogram['2th'] = diffractogram['2th_org']
|
|
||||||
diffractogram['2th'] = diffractogram['2th'] + index*relative_shift
|
|
||||||
|
|
||||||
|
|
||||||
return diffractogram
|
|
||||||
|
|
||||||
def revert_offset(diffractogram,which=None):
|
|
||||||
|
|
||||||
if which == 'both':
|
|
||||||
diffractogram['2th'] = diffractogram['2th_org']
|
|
||||||
diffractogram['I'] = diffractogram['I_org']
|
|
||||||
|
|
||||||
if which == 'y':
|
|
||||||
diffractogram['I'] = diffractogram['I_org']
|
|
||||||
|
|
||||||
if which == 'x':
|
|
||||||
diffractogram['2th'] = diffractogram['2th_org']
|
|
||||||
|
|
||||||
return diffractogram
|
|
||||||
|
|
||||||
def load_reflection_table(data: dict, reflections_params: dict, options={}):
|
|
||||||
|
|
||||||
required_options = ['ref_wavelength', 'to_wavelength']
|
|
||||||
|
|
||||||
default_options = {
|
|
||||||
'ref_wavelength': 1.54059,
|
|
||||||
'to_wavelength': None
|
|
||||||
}
|
|
||||||
|
|
||||||
options = aux.update_options(options=options, required_options=required_options, default_options=default_options)
|
|
||||||
|
|
||||||
# VESTA outputs the file with a header that has a space between the parameter and units - so there is some extra code to rectify the issue
|
|
||||||
# that ensues from this formatting
|
|
||||||
reflections = pd.read_csv(reflections_params['path'], delim_whitespace=True)
|
|
||||||
|
|
||||||
# Remove the extra column that appears from the headers issue
|
|
||||||
reflections.drop(reflections.columns[-1], axis=1, inplace=True)
|
|
||||||
|
|
||||||
with open(reflections_params['path'], 'r') as f:
|
|
||||||
line = f.readline()
|
|
||||||
|
|
||||||
headers = line.split()
|
|
||||||
|
|
||||||
# Delete the fourth element which is '(Å)'
|
|
||||||
del headers[4]
|
|
||||||
|
|
||||||
# Change name of column to avoid using greek letters
|
|
||||||
headers[7] = '2th'
|
|
||||||
|
|
||||||
# Set the new modified headers as the headers of
|
|
||||||
reflections.columns = headers
|
|
||||||
|
|
||||||
reflections = translate_wavelengths(data=reflections, wavelength=options['ref_wavelength'], to_wavelength=options['to_wavelength'])
|
|
||||||
|
|
||||||
if 'heatmap' in data.keys():
|
|
||||||
|
|
||||||
start_2th, stop_2th = data['diffractogram'][0]['2th'].min(), data['diffractogram'][0]['2th'].max()
|
|
||||||
len_2th = stop_2th - start_2th
|
|
||||||
#print(start_2th, stop_2th, len_2th)
|
|
||||||
|
|
||||||
start_heatmap, stop_heatmap = 0, data['heatmap'].shape[1]
|
|
||||||
len_heatmap = stop_heatmap - start_heatmap
|
|
||||||
#print(start_heatmap, stop_heatmap, len_heatmap)
|
|
||||||
|
|
||||||
scale = len_heatmap/len_2th
|
|
||||||
|
|
||||||
#print(scale)
|
|
||||||
#print(stop_2th * scale)
|
|
||||||
|
|
||||||
reflections['heatmap'] = (reflections['2th']-start_2th) * scale
|
|
||||||
|
|
||||||
return reflections
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def translate_wavelengths(data: pd.DataFrame, wavelength: float, to_wavelength=None) -> pd.DataFrame:
|
|
||||||
# FIXME Somewhere here there is an invalid arcsin-argument. Not sure where.
|
|
||||||
|
|
||||||
pd.options.mode.chained_assignment = None
|
|
||||||
|
|
||||||
# Translate to CuKalpha
|
|
||||||
cuka = 1.54059 # Å
|
|
||||||
|
|
||||||
if cuka > wavelength:
|
|
||||||
max_2th_cuka = 2*np.arcsin(wavelength/cuka) * 180/np.pi
|
|
||||||
else:
|
|
||||||
max_2th_cuka = data['2th'].max()
|
|
||||||
|
|
||||||
data['2th_cuka'] = np.NAN
|
|
||||||
|
|
||||||
data['2th_cuka'].loc[data['2th'] <= max_2th_cuka] = 2*np.arcsin(cuka/wavelength * np.sin((data['2th'].loc[data['2th'] <= max_2th_cuka]/2) * np.pi/180)) * 180/np.pi
|
|
||||||
|
|
||||||
# Translate to MoKalpha
|
|
||||||
moka = 0.71073 # Å
|
|
||||||
|
|
||||||
if moka > wavelength:
|
|
||||||
max_2th_moka = 2*np.arcsin(wavelength/moka) * 180/np.pi
|
|
||||||
else:
|
|
||||||
max_2th_moka = data['2th'].max()
|
|
||||||
|
|
||||||
data['2th_moka'] = np.NAN
|
|
||||||
|
|
||||||
data['2th_moka'].loc[data['2th'] <= max_2th_moka] = 2*np.arcsin(moka/wavelength * np.sin((data['2th'].loc[data['2th'] <= max_2th_moka]/2) * np.pi/180)) * 180/np.pi
|
|
||||||
|
|
||||||
|
|
||||||
# Convert to other parameters
|
|
||||||
data['d'] = wavelength / (2*np.sin((2*data['2th']*np.pi/180)/2))
|
|
||||||
data['1/d'] = 1/data['d']
|
|
||||||
data['q'] = np.abs((4*np.pi/wavelength)*np.sin(data['2th']/2 * np.pi/180))
|
|
||||||
data['q2'] = data['q']**2
|
|
||||||
data['q4'] = data['q']**4
|
|
||||||
|
|
||||||
|
|
||||||
if to_wavelength:
|
|
||||||
if to_wavelength >= cuka:
|
|
||||||
max_2th = 2*np.arcsin(cuka/to_wavelength) * 180/np.pi
|
|
||||||
else:
|
|
||||||
max_2th = data['2th_cuka'].max()
|
|
||||||
|
|
||||||
|
|
||||||
data['2th'] = np.NAN
|
|
||||||
data['2th'].loc[data['2th_cuka'] <= max_2th] = 2*np.arcsin(to_wavelength/cuka * np.sin((data['2th_cuka'].loc[data['2th_cuka'] <= max_2th]/2) * np.pi/180)) * 180/np.pi
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def trim_xy_region(path, region):
|
|
||||||
|
|
||||||
df = pd.read_csv(path, header=None, delim_whitespace=True)
|
|
||||||
df.columns = ['2th', 'I']
|
|
||||||
|
|
||||||
df = df.loc[(df['2th'] > region[0]) & (df['2th'] < region[1])]
|
|
||||||
|
|
||||||
folder = os.path.dirname(path)
|
|
||||||
save_folder = os.path.join(folder, 'trimmed')
|
|
||||||
|
|
||||||
if not os.path.exists(save_folder):
|
|
||||||
os.makedirs(save_folder)
|
|
||||||
|
|
||||||
df.to_csv(os.path.join(save_folder, os.path.basename(path)), sep='\t', header=None, index=None)
|
|
||||||
|
|
||||||
|
|
||||||
def raise_intensities_xy(path, region=None):
|
|
||||||
|
|
||||||
df = pd.read_csv(path, header=None, delim_whitespace=True)
|
|
||||||
df.columns = ['2th', 'I']
|
|
||||||
|
|
||||||
if region:
|
|
||||||
df = df.loc[(df['2th'] > region[0]) & (df['2th'] < region[1])]
|
|
||||||
|
|
||||||
df['I'] = df['I'] - df['I'].min()
|
|
||||||
|
|
||||||
|
|
||||||
folder = os.path.dirname(path)
|
|
||||||
save_folder = os.path.join(folder, 'raised')
|
|
||||||
|
|
||||||
if not os.path.exists(save_folder):
|
|
||||||
os.makedirs(save_folder)
|
|
||||||
|
|
||||||
df.to_csv(os.path.join(save_folder, os.path.basename(path)), sep='\t', header=None, index=None)
|
|
||||||
1114
nafuma/xrd/plot.py
1114
nafuma/xrd/plot.py
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,44 +0,0 @@
|
||||||
{
|
|
||||||
"calculation_step": "x_calculation_step = Yobs_dx_at(Xo); convolution_step {}",
|
|
||||||
"capillary": [
|
|
||||||
"local !packing_density {} min 0.1 max 1.0 'typically 0.2 to 0.5",
|
|
||||||
"local !capdia {} 'capillary diameter in mm",
|
|
||||||
"local !linab = Get(mixture_MAC) Get(mixture_density_g_on_cm3);: 100 'in cm-1",
|
|
||||||
"local muR = (capdia/20)*linab*packing_density;",
|
|
||||||
"Cylindrical_I_Correction(muR)"
|
|
||||||
],
|
|
||||||
"gauss_fwhm": "gauss_fwhm = Sqrt({} Cos(2 * Th)^4 + {} Cos(2 * Th)^2 + {});",
|
|
||||||
"lp_factor": "LP_Factor({}) 'change the LP correction or lh value if required",
|
|
||||||
"synchrotron": "lam ymin_on_ymax 0.0001 la 1.0 lo {} lh 0.1",
|
|
||||||
"neutron": [
|
|
||||||
"lam ymin_on_ymax 0.0001 la 1.0 lo {} lh 0.5",
|
|
||||||
"neutron_data"
|
|
||||||
],
|
|
||||||
"MoKa":[
|
|
||||||
"lam ymin_on_ymax 0.0001",
|
|
||||||
"la 0.6533 lo 0.7093 lh 0.2695",
|
|
||||||
"la 0.3467 lo 0.713574 lh 0.2795"
|
|
||||||
],
|
|
||||||
"RECX2": [
|
|
||||||
"Rp 280",
|
|
||||||
"Rs 280"
|
|
||||||
],
|
|
||||||
"zero_error": "Zero_Error(!zero, 0)",
|
|
||||||
"th2_offset": [
|
|
||||||
"prm !zero\t\t\t= 0 ;: 0 \t\t\t\tmin = Max(Val - 20 Yobs_dx_at(X1), -100 Yobs_dx_at(X1)); max = Min(Val + 20 Yobs_dx_at(X2), 100 Yobs_dx_at(X2)); del = .01 Yobs_dx_at(X1); val_on_continue 0",
|
|
||||||
"prm !cos_shift\t\t= 0 ;: 0 \t\t\t\tmin = Val-.8; max = Val+.8; del 0.001",
|
|
||||||
"prm !sin_shift\t\t= 0 ;: 0 \t\t\t\tmin = Val-.8; max = Val+.8; del 0.001",
|
|
||||||
"th2_offset = (zero) + (cos_shift) Cos(Th) + (sin_shift) Sin(Th) ;"
|
|
||||||
],
|
|
||||||
"fit_peak_width": "DC1( ad, 0, bd, 0, cd, 0)",
|
|
||||||
"TCHZ_Peak_Type": "TCHZ_Peak_Type(pku_1, 0, pkv_1, 0,pkw_1, 0, !pkx_1, 0.0000,pky_1, 0,!pkz_1, 0.0000)",
|
|
||||||
"Simple_Axial_Model": "Simple_Axial_Model( axial_1, 0)",
|
|
||||||
"magnetic_moment_str": "mlx = ml_x_{}_{}_XXXX ; \t mly = ml_y_{}_{}_XXXX ; \t mlz = ml_z_{}_{}_XXXX ; \t MM_CrystalAxis_Display( 0, 0, 0)",
|
|
||||||
"peak": [
|
|
||||||
"xo_Is",
|
|
||||||
"xo @ {}",
|
|
||||||
"peak_type fp",
|
|
||||||
"LVol_FWHM_CS_G_L( 1, 0, 0.89, 0,,,@, 2)",
|
|
||||||
"I @ 35.35632`"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
C:/TOPAS6/
|
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
{
|
|
||||||
"A_matrix_memory_allowed_in_Mbytes": null,
|
|
||||||
"approximate_A": false,
|
|
||||||
"bootstrap_errors": null,
|
|
||||||
"capdia": 0.5,
|
|
||||||
"chi2_convergence_criteria": 0.001,
|
|
||||||
"conserve_memory": false,
|
|
||||||
"continue_after_convergence": false,
|
|
||||||
"convolution_step": 1,
|
|
||||||
"do_errors": false,
|
|
||||||
"iters": 100000,
|
|
||||||
"lp_factor": 90,
|
|
||||||
"num_runs": null,
|
|
||||||
"packing_density": 0.5
|
|
||||||
}
|
|
||||||
11
setup.py
11
setup.py
|
|
@ -1,11 +0,0 @@
|
||||||
from setuptools import setup, find_packages
|
|
||||||
|
|
||||||
setup(name='nafuma',
|
|
||||||
version='0.4',
|
|
||||||
description='Analysis tools for inorganic materials chemistry at the NAFUMA-group at the University of Oslo',
|
|
||||||
url='https://github.com/rasmusthog/nafuma',
|
|
||||||
author='Rasmus Vester Thøgersen, Halvor Høen Hval',
|
|
||||||
author_email='code@rasmusthog.me',
|
|
||||||
license='MIT',
|
|
||||||
packages=find_packages(),
|
|
||||||
zip_safe=False)
|
|
||||||
0
test.txt
0
test.txt
Loading…
Add table
Add a link
Reference in a new issue