{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "94771e0c", "metadata": {}, "outputs": [], "source": [ "# here we are going to explore the effect of energy miscalibration in XAFS\n", "#\n", "# the nature of the problem is that XAFS measurements need a well-calibrated energy coming from\n", "# a double-cystal monochromator. That calibration is typically done by scanning across a known \n", "# (or at least reproducible) energy and changing either the d-spacing or the 0 value of angle \n", "# until the energy is correct. \n", "# \n", "# Most commonly, the XANES of some metal foil is measured and the maximumn of the first derivative \n", "# is set to the tabulated edge energy for the metal. This method will give a bit of a variation as \n", "# the energy resolution changes. These days, most good XAFS beamlines have high enough energy \n", "# resolution that this is not much of a problem, but it can be a problem for older data. It should \n", "# also be said that those tabulated values are not always accurate to better than 1 or 2 eV. In fact, \n", "# rather than use the tabulated values such as at \n", "# https://xraydb.xrayabsorption.org/\n", "# the values from Kraft et al Review of Scientific Instruments 67, p681 (1996):\n", "# https://doi.org/10.1063/1.1146657\n", "# should be used, as they were carefully measured with a single, very high-resolution and \n", "# consistently calibrated monochromator.\n", "#\n", "# Ideally, a well-calibrated monochromator would have a single d-spacing and angular offset that \n", "# stays calibrated across the edges of many edges. That is, if the energy needs to be recalibrated\n", "# at every edge, the energy will be drifting between those edges, and the reason for any need to\n", "# recalibrate should be investigated. If the beamline is well collimated and the angle of the beam \n", "# incident on the monochomator is stable, it is certainly possible to have a monochromator set up t\n", "# that stays calibrated for weeks and over an energy range of 10 keV or more. \n", "####\n", "# \n", "# For any XAS analysis, the energy scale is critical. Because of the variations above in calibrating \n", "# energy, small energy shifts between beamlines or even runs at the same beamline but different months \n", "# are not uncommon. If the shift is relatively small, say 1 to 5 eV at 10 keV, simply adding a constant \n", "# energy offset is satisfactory.\n", "# \n", "# You may hear people say that poor calibration leads to large errors in XAFS results or even leads \n", "# \"the wrong results\". You should know that when people say things are Wrong is describing scientific\n", "# measurements and analysis, they are almost certainly wrong. \n", "#\n", "\n", "\n" ] }, { "cell_type": "code", "execution_count": 25, "id": "c734246b", "metadata": {}, "outputs": [], "source": [ "# we start with some imports of python functions we'll need to read, modify, and fit xafs data:\n", "\n", "import numpy as np\n", "import scipy.constants as consts\n", "\n", "from larch.io import read_ascii\n", "from larch import Group\n", "from larch.fitting import param_group, param, guess\n", "from larch.xafs import (autobk,\n", " feffpath, feffit_transform,\n", " feffit_dataset, feffit, feffit_report)\n", "\n", "from larch.plot.plotly_xafsplots import PlotlyFigure, plot_chifit\n", "\n", "PLANCK_HC = 1.e10 * consts.Planck * consts.c / consts.e\n", "\n", "RAD2DEG = 180.0/np.pi\n", "DEG2RAD = np.pi/180.0" ] }, { "cell_type": "code", "execution_count": 2, "id": "29cd4798", "metadata": {}, "outputs": [], "source": [ "# next we'll define a function to shift X-ray energy either by changing the angular offset or the \n", "# d-spacing of the monochromator\n", "\n", "def shift_energy(energy, dspace, theta_off=0, dspace_off=0.0):\n", " \"\"\" shift energy: first convert to theta (in degrees), then apply an angular offset and/or dspace offset\"\"\" \n", " theta = RAD2DEG * np.arcsin(PLANCK_HC / (2.0*dspace*energy))\n", " return PLANCK_HC/(2*(dspace-dspace_off)*np.sin(DEG2RAD*(theta-theta_off)))\n" ] }, { "cell_type": "code", "execution_count": 18, "id": "d46c8f68", "metadata": {}, "outputs": [], "source": [ "# then we'll write a function to take an XAFS group from Larch, apply an energy shift, and fit the data\n", "\n", "def fit_shifted_data(dgroup, theta_off=0, dspace_off=0, with_report=False):\n", " enew = shift_energy(dgroup.energy, dspace, theta_off=theta_off, dspace_off=dspace_off)\n", "\n", "\n", " # make a new group using that shifted energy, as if the mono was poorly set\n", " dat = Group(energy=enew, mu=dgroup.mu*1.0)\n", "\n", " # extract the EXAFS chi(k)\n", " autobk(dat.energy, dat.mu, group=dat, rbkg=1.0, kw=2)\n", "\n", " # define fitting parameter group\n", " pars = param_group(amp = param(1.0, vary=True),\n", " del_e0 = param(0.0, vary=True),\n", " sig2 = param(0.0, vary=True),\n", " del_r = guess(0.0, vary=True) )\n", "\n", " # define a Feff Path, give expressions for Path Parameters\n", " path1 = feffpath('../feffit/feffcu01.dat',\n", " s02 = 'amp',\n", " e0 = 'del_e0',\n", " sigma2 = 'sig2',\n", " deltar = 'del_r')\n", "\n", " # do the fit\n", " trans = feffit_transform(kmin=2.5, kmax=15, kw=2, dk=4, window='kaiser', rmin=1.4, rmax=3.0)\n", " dset = feffit_dataset(data=dat, pathlist=[path1], transform=trans)\n", " out = feffit(pars, dset)\n", " \n", " # if asked write out the full fit repport\n", " if with_report:\n", " print(feffit_report(out))\n", " return dat, out\n", " # vals = [f\"{theta_off:10.4f}\"\n", " # f\"{dspace_off:10.4f}\",\n", " # f\"{dat.e0:10.3f}\",\n", " # f\"{dat.e0-dgroup.e0:+10.3f}\",\n", " # f\"{out.params['del_r'].value:+.4f}\",\n", " # f\"{out.params['del_r'].stderr:.4f}\"]\n", " # print(' '.join(vals))\n", " # return(out, dat.e0-dgroup.e0, out.params['del_r'].value, out.params['del_r'].stderr)\n" ] }, { "cell_type": "code", "execution_count": 26, "id": "626d3939", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "=================== FEFFIT RESULTS ====================\n", "[[Statistics]]\n", " nvarys, npts = 4, 104\n", " n_independent = 13.732\n", " chi_square = 9370.60716\n", " reduced chi_square = 962.826388\n", " r-factor = 0.90571999\n", " Akaike info crit = 97.6117833\n", " Bayesian info crit = 100.090814\n", " \n", "[[Data]]\n", " fit space = 'r'\n", " r-range = 1.400, 3.000\n", " k-range = 2.500, 15.000\n", " k window, dk = 'kaiser', 4.000\n", " paths used in fit = ['../feffit/feffcu01.dat']\n", " k-weight = 2\n", " epsilon_k = Array(mean=0.00654573, std=0.02933566)\n", " epsilon_r = 0.16089890\n", " n_independent = 13.732\n", " \n", "[[Variables]]\n", " amp = -0.53104167 +/- 1.49115874 (init= 1.00000000)\n", " del_e0 = 49.7358163 +/- 26.0775690 (init= 0.00000000)\n", " del_r = 0.21044797 +/- 0.13186677 (init= 0.00000000)\n", " sig2 = 0.00276363 +/- 0.01776436 (init= 0.00000000)\n", " \n", "[[Correlations]] (unreported correlations are < 0.100)\n", " amp, sig2 = -0.933\n", " del_e0, del_r = 0.880\n", " amp, del_r = -0.263\n", " del_r, sig2 = 0.218\n", " amp, del_e0 = -0.103\n", " \n", "[[Paths]]\n", " = Path 'p3tf6mewgr' = Cu K Edge\n", " feffdat file = ../feffit/feffcu01.dat, from feff run 'feffit'\n", " geometry atom x y z ipot\n", " Cu 0.0000, 0.0000, 0.0000 0 (absorber)\n", " Cu 0.0000, -1.8016, 1.8016 1\n", " reff = 2.54780000\n", " degen = 12.0000000\n", " n*s02 = -0.53104167 +/- 1.49115874 := 'amp'\n", " e0 = 49.7358163 +/- 26.0775690 := 'del_e0'\n", " r = 2.75824797 +/- 0.13186677 := 'reff + del_r'\n", " deltar = 0.21044797 +/- 0.13186677 := 'del_r'\n", " sigma2 = 0.00276363 +/- 0.01776436 := 'sig2'\n", "\n", "=======================================================\n" ] } ], "source": [ "# next, let's read in data and do the fit without any shift as a baseline\n", "\n", "cudat = read_ascii('../xafsdata/cu_10k.xmu', labels='energy, mu')\n", "autobk(cudat.energy, cudat.mu, group=cudat, rbkg=1.0, kw=2)\n", "\n", "# this data set says that hc_2d = 1977.260\n", "hc_2d = 1977.260\n", "\n", "# we'll use the current value of HC to get the dspace they would have used\n", "dspace = PLANCK_HC / (2 * hc_2d)\n", "\n", "grp, result = fit_shifted_data(cudat, dspace, with_report=True)\n" ] }, { "cell_type": "code", "execution_count": 32, "id": "1afbd106", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2\n" ] }, { "ename": "NameError", "evalue": "name 'ndarray' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[32], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m dset \u001b[38;5;241m=\u001b[39m result\u001b[38;5;241m.\u001b[39mdatasets[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28mprint\u001b[39m(dset\u001b[38;5;241m.\u001b[39mtransform\u001b[38;5;241m.\u001b[39mkweight)\n\u001b[0;32m----> 3\u001b[0m \u001b[43mplot_chifit\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdset\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m~/Codes/xraylarch/larch/plot/plotly_xafsplots.py:761\u001b[0m, in \u001b[0;36mplot_chifit\u001b[0;34m(dataset, kmin, kmax, kweight, rmax, show_mag, show_real, show_imag, title, new, delay_draw, offset, win, _larch)\u001b[0m\n\u001b[1;32m 759\u001b[0m kweight \u001b[38;5;241m=\u001b[39m dataset\u001b[38;5;241m.\u001b[39mtransform\u001b[38;5;241m.\u001b[39mkweight\n\u001b[1;32m 760\u001b[0m \u001b[38;5;66;03m#endif\u001b[39;00m\n\u001b[0;32m--> 761\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(kweight, (\u001b[38;5;28mlist\u001b[39m, \u001b[38;5;28mtuple\u001b[39m, \u001b[43mndarray\u001b[49m)): kweight\u001b[38;5;241m=\u001b[39mkweight[\u001b[38;5;241m0\u001b[39m]\n\u001b[1;32m 763\u001b[0m data_chik \u001b[38;5;241m=\u001b[39m dataset\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mchi \u001b[38;5;241m*\u001b[39m dataset\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mk\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkweight\n\u001b[1;32m 764\u001b[0m model_chik \u001b[38;5;241m=\u001b[39m dataset\u001b[38;5;241m.\u001b[39mmodel\u001b[38;5;241m.\u001b[39mchi \u001b[38;5;241m*\u001b[39m dataset\u001b[38;5;241m.\u001b[39mmodel\u001b[38;5;241m.\u001b[39mk\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkweight\n", "\u001b[0;31mNameError\u001b[0m: name 'ndarray' is not defined" ] } ], "source": [ "dset = result.datasets[0]\n", "print(dset.transform.kweight)\n", "plot_chifit(dset)" ] }, { "cell_type": "code", "execution_count": 33, "id": "862f31a7", "metadata": {}, "outputs": [ { "ename": "SyntaxError", "evalue": "incomplete input (2481089151.py, line 1)", "output_type": "error", "traceback": [ "\u001b[0;36m Cell \u001b[0;32mIn[33], line 1\u001b[0;36m\u001b[0m\n\u001b[0;31m dir(result.dat\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m incomplete input\n" ] } ], "source": [ "dir(result.dat" ] }, { "cell_type": "code", "execution_count": null, "id": "d704ed37", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.10" } }, "nbformat": 4, "nbformat_minor": 5 }