{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "* TODO: lire directement les fichiers NeXus pour la calibration.\n", "* TODO: faire une calibration en utilisant toutes les images d'une série.\n", "* Le bras détecteur bouge et il faut le prendre en compte.\n", "\n", "voir ce qu'a fait marco voltolini" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# list of imports\n", "import os\n", "import sys\n", "import time\n", "import fabio\n", "import pyFAI\n", "import numpy\n", "import numpy.ma\n", "\n", "from typing import Iterator, List, NamedTuple, Optional, Text, Tuple, Union\n", "\n", "from collections import namedtuple\n", "from functools import partial\n", "from h5py import Dataset, File\n", "from numpy import ndarray\n", "from matplotlib import pyplot\n", "from scipy.ndimage import generic_filter\n", "\n", "from pyFAI.detectors import detector_factory\n", "\n", "# A local import with Soleil common methodes.\n", "from soleil import *" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": true }, "outputs": [], "source": [ "# define a bunch of constants\n", "LAMBDA = 0.6887\n", "CALIBRANT = \"CeO2\"\n", "CEO2 = \"XRD18keV_27.nxs\"\n", "\n", "ROOT = os.path.join(\"/nfs\", \"ruche-diffabs\", \"diffabs-soleil\", \"com-diffabs\", \"2016\", \"Run2\")\n", "PUBLISHED_DATA = os.path.join(\"/nfs\", \"ruche-diffabs\", \"diffabs-users\", \"99160066\", \"published-data\")\n", "\n", "# temporary until the ruch is ON\n", "ROOT = os.path.join(\"/home\", \"experiences\", \"instrumentation\", \"picca\", \"data\", \"99160066\", \"2016\", \"Run2\")\n", "PUBLISHED_DATA = os.path.join(\"/home\", \"experiences\", \"instrumentation\", \"picca\", \"data\", \"99160066\", \"published-data\")\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# define a bunch of generic hdf5 structures.\n", "\n", "DatasetPathContains = NamedTuple(\"DatasetPathContains\", [(\"path\", Text)])\n", "\n", "DatasetPathWithAttribute = NamedTuple(\"DatasetPathWithAttribute\", [('attribute', Text),\n", " ('value', bytes)])\n", "\n", "DatasetPath = Union[DatasetPathContains,\n", " DatasetPathWithAttribute]\n", "\n", "def _v_attrs(attribute: Text, value: Text, _name: Text, obj) -> Dataset:\n", " \"\"\"extract all the images and accumulate them in the acc variable\"\"\"\n", " if isinstance(obj, Dataset):\n", " if attribute in obj.attrs and obj.attrs[attribute] == value:\n", " return obj\n", "\n", "\n", "def _v_item(key: Text, name: Text, obj: Dataset) -> Dataset:\n", " if key in name:\n", " return obj\n", "\n", "def get_dataset(h5file: File, path: DatasetPath) -> Dataset:\n", " res = None\n", " if isinstance(path, DatasetPathContains):\n", " res = h5file.visititems(partial(_v_item, path.path))\n", " elif isinstance(path, DatasetPathWithAttribute):\n", " res = h5file.visititems(partial(_v_attrs,\n", " path.attribute, path.value))\n", " return res" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# Compute the mask and the dark\n", "# 3.5 could be simplify with 3.6\n", "\n", "DarkMaskSources = NamedTuple('DarkMaskSources', [('filenames', List[Text]), # name of the files\n", " ('threshold', int), # mask all pixel above this treshold\n", " ('detector', Optional[Text]),\n", " ('images_path', DatasetPath)])\n", "\n", "\n", "def create_dark_and_mask(params: DarkMaskSources) -> Tuple[ndarray, ndarray]:\n", " \"\"\"\n", " génère un dark moyen ainsi qu'un mask avec les dark mesurés. Sur\n", " un XPAD, le dark doit valoir zero (si le détecteur est bien\n", " calibré). Nous utilisons tous les pixels dont la valeur est\n", " supérieur à 2 pour définir un masque (mask ;). Pour la prochaine\n", " expérience penser à faire non pas un seul dark mais des séries de\n", " dark.\n", " \"\"\"\n", " for i, filename in enumerate(params.filenames):\n", " with File(filename, mode='r') as f:\n", " img = get_dataset(f, params.images_path)[0] # read the first image\n", " _mask = numpy.where(img > params.threshold, True, False)\n", " if i == 0:\n", " dark = img.copy()\n", " mask = _mask.copy()\n", " else:\n", " dark += img\n", " mask = numpy.logical_or(mask, _mask)\n", "\n", " if params.detector is not None:\n", " det = detector_factory(params.detector)\n", " mask = numpy.logical_or(mask, det.calc_mask())\n", " # on a repere des discidents\n", " mask[480:482, 480:482] = True\n", "\n", " dark = dark.astype(float)\n", " dark /= len(params.filenames)\n", " dark = dark.astype('uint32')\n", "\n", " return dark, mask\n", "\n", "\n", "dark, mask = create_dark_and_mask(DarkMaskSources([os.path.join(ROOT, \"2016-03-26\", \"dark_%d.nxs\" % n) for n in range(7, 12)],\n", " 1, 'Xpad_flat',\n", " DatasetPathWithAttribute(\"interpretation\", b\"image\")))" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# compute the flat\n", "\n", "FlatParams = NamedTuple('FlatParams', [('filename', Text), # name of the file\n", " ('threshold', float), # mask all pixel above this treshold\n", " ('dark', Optional[ndarray]),\n", " ('images_path', DatasetPath)])\n", "\n", "def get_flat(params: FlatParams) -> ndarray:\n", " \"\"\"\n", " :param filename: name of the files\n", " :type filename: list(str)\n", "\n", " génère un flat corrigé du dark si dark is not None\n", " \"\"\"\n", " with File(params.filename, mode='r') as f:\n", " images = get_dataset(f, params.images_path)[:]\n", " flat = images.mean(axis=0)\n", " if dark is not None:\n", " flat -= dark\n", "\n", " flat = numpy.where(numpy.abs(flat) <= params.threshold, params.threshold, flat)\n", "\n", " return flat\n", "\n", "flat = get_flat(FlatParams(os.path.join(ROOT, \"2016-03-26\", \"flat_12.nxs\"),\n", " 1, dark,\n", " DatasetPathWithAttribute(\"interpretation\", b\"image\")))" ] }, { "cell_type": "code", "execution_count": 25, "metadata": { "collapsed": false }, "outputs": [ { "ename": "TypeError", "evalue": "require_dataset() missing 2 required positional arguments: 'shape' and 'dtype'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mUnfold\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mTOMOSOURCES\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mPONI\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdark\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mflat\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1000\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m360\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m save_unfolded(os.path.join(PUBLISHED_DATA, \"P14_13_57_unfold.h5\"),\n\u001b[0;32m--> 120\u001b[0;31m params)\n\u001b[0m\u001b[1;32m 121\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"unfold time: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtime\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m-\u001b[0m \u001b[0mt0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m\u001b[0m in \u001b[0;36msave_unfolded\u001b[0;34m(filename, params)\u001b[0m\n\u001b[1;32m 96\u001b[0m dtype='float32') \n\u001b[1;32m 97\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdark\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 98\u001b[0;31m \u001b[0mgroup\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequire_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"dark\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdark\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 99\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mflat\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 100\u001b[0m \u001b[0mgroup\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequire_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"flat\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mflat\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mTypeError\u001b[0m: require_dataset() missing 2 required positional arguments: 'shape' and 'dtype'" ] } ], "source": [ "# Unfold the images\n", "\n", "# defines the namedtuple used for the tomogaphie.\n", "# 3.5 could be simplify with 3.6\n", "\n", "TomoSources = NamedTuple('TomoSources', [('filename', Text),\n", " ('images_path', DatasetPath),\n", " ('rotations_path', DatasetPath),\n", " ('translations_path', DatasetPath)])\n", "\n", "TomoFrame = NamedTuple('TomoFrame', [(\"image\", ndarray),\n", " (\"shape\", Tuple[int, int]),\n", " (\"index\", Tuple[int, int]),\n", " (\"rotation\", float),\n", " (\"translation\", float)]\n", " )\n", "\n", "UnfoldFrame = NamedTuple('UnfoldFrame', [(\"tomoframe\", TomoFrame),\n", " (\"unfolded\", ndarray)])\n", "\n", "Unfold = NamedTuple('Unfold', [('sources', TomoSources), # sources of the data's\n", " ('poni', Text), # the poni file use to do the unfold\n", " ('mask', Optional[ndarray]), # name of the poni file used for the integration.\n", " ('dark', Optional[ndarray]), # mask used for the computation.\n", " ('flat', Optional[ndarray]), # number of bins used for the powder spectrum.\n", " ('npt_rad', int), # he number of bins used for the powder spectrum.\n", " ('npt_azim', int), # he number of bins used for the powder spectrum.\n", " ]\n", " )\n", "\n", "TomoSave = NamedTuple('TomoSave', [('volume', ndarray),\n", " ('rotation', ndarray),\n", " ('translation', ndarray)])\n", "\n", "\n", "def read_multi(params: TomoSources) -> Iterator[TomoFrame]:\n", " \"\"\"\n", " :param sources: list des fichiers à traiter.\n", " :type sources: list(str)\n", "\n", " :return: yield frames of data contain in all NxEntries located at the data_path location.\n", " :rtype: Frame\n", " \"\"\"\n", " with File(params.filename, mode='r') as f:\n", " images = get_dataset(f, params.images_path)\n", " rotations = get_dataset(f, params.rotations_path)\n", " translations = get_dataset(f, params.translations_path)\n", " shape = images.shape[0], images.shape[1]\n", " for rotation_idx, rotation in enumerate(rotations):\n", " for translation_idx, translation in enumerate(translations):\n", " yield TomoFrame(images[rotation_idx, translation_idx,:],\n", " shape,\n", " (rotation_idx, translation_idx),\n", " rotation, translation)\n", "\n", "\n", "def unfold(params: Unfold) -> Iterator[UnfoldFrame]:\n", " \"\"\"\n", " :return: the tomography datas\n", " :rtype: numpy.ndarray\n", "\n", " the return data is a 5 dimensions numpy array (rotation,\n", " translation, 2, Nr, Na). the third dimension contain the x and y\n", " coordinates of the powder diffraction (0 for X and 1 for Y)\n", " \"\"\"\n", " # load the Azimuthal Integration parameters\n", " ai = pyFAI.load(params.poni)\n", "\n", " for tomoframe in read_multi(params.sources):\n", " data = tomoframe.image\n", "\n", " \"\"\" mask all the non counting pixels \"\"\"\n", " # TODO comment optimiser ce calcul du mask dynamic qui prend vraiment du temps\n", " # we will use dummy value (0) in order to define the dynamic mask\n", " data[data>500] = 0\n", "\n", " unfolded, r, a = ai.integrate2d(data, params.npt_rad, params.npt_azim,\n", " filename=None, dark=dark, flat=flat, dummy=0)\n", "\n", " yield UnfoldFrame(tomoframe, unfolded)\n", "\n", "\n", "def save_unfolded(filename: Text,\n", " params: Unfold) -> None:\n", " with File(filename, mode=\"r+\") as f:\n", " # now fill with the values.\n", " for i, frame in enumerate(unfold(params)):\n", " if i == 0:\n", " # first create output volums at the right place\n", " group = f.require_group(\"tomography_unfolded\")\n", " unfolded = group.require_dataset(\"unfolded\", shape=frame.tomoframe.shape + (params.npt_azim, params.npt_rad),\n", " dtype=frame.unfolded.dtype)\n", " rotation = group.require_dataset(\"rotation\", shape=(frame.tomoframe.shape[0],),\n", " dtype='float32')\n", " translation = group.require_dataset(\"translation\", shape=(frame.tomoframe.shape[1],),\n", " dtype='float32') \n", " if dark is not None:\n", " dark = group.require_dataset(\"dark\", data=params.dark, dtype='uint32')\n", " dark = params.dark\n", " if flat is not None:\n", " group.require_dataset(\"flat\", data=params.flat, dtype='uint32')\n", " if mask is not None:\n", " group.require_dataset(\"mask\", data=params.mask.astype('uint32'), dtype='uint32')\n", "\n", " unfolded[frame.tomoframe.index[0], frame.tomoframe.index[1], :] = frame.unfolded\n", " rotation[frame.tomoframe.index[0]] = frame.tomoframe.rotation\n", " translation[frame.tomoframe.index[1]] = frame.tomoframe.translation\n", "\n", "\n", "PONI = os.path.join(PUBLISHED_DATA, 'calibration', 'XRD18keV_27_0.poni')\n", "PONI = os.path.join(PUBLISHED_DATA, 'calibration', 'XRD18keV_26.nxs_03.poni')\n", "\n", "TOMOSOURCES = TomoSources(os.path.join(ROOT, \"2016-03-27\", 'P14_13_57.nxs'),\n", " DatasetPathWithAttribute(\"interpretation\", b\"image\"),\n", " DatasetPathContains(\"scan_data/trajectory_2_1\"),\n", " DatasetPathContains(\"scan_data/trajectory_1_1\"))\n", "\n", "t0 = time.time()\n", "params = Unfold(TOMOSOURCES, PONI, mask, dark, flat, 1000, 360)\n", "save_unfolded(os.path.join(PUBLISHED_DATA, \"P14_13_57_unfold.h5\"),\n", " params)\n", "print(\"unfold time: \", time.time() - t0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# ici tout un tas de variables utilisées pour trouver les fichiers sur\n", "# le file system SOLEIL. Il y a de grand chance que cela ne\n", "# fonctionne pas pour vous :p donc veiller à adapter ces chemins.\n", "\n", "\n", "\n", "\n", "def convert(filename, numbers=None, data_path='scan_data/data_15'):\n", " \"\"\"\n", " :param filename: name of the nexus file\n", " :type filename: str\n", " :param numbers: index of the image to extract if None extract all images\n", " :type numbers: list(int)\n", "\n", " conversion d'image nxs -> edf. L'image edf est ensuite utilisée\n", " avec pyFAI-calib pour générer le fichier .poni\n", " \"\"\"\n", " imgs = read(filename, numbers, data_path)\n", " for index, img in enumerate(imgs):\n", " edf = fabio.edfimage.edfimage(img)\n", " name = os.path.splitext(os.path.basename(filename))[0]\n", " name += '_%d' % index + '.edf'\n", " saved_name = os.path.join(TREATMENT_PATH, name)\n", " edf.write(saved_name)\n", "\n", "\n", "\n", "\n", "def integrate2(sources, poni, mask=None, dark=None, flat=None, N=600,\n", " data_path=None,\n", " rotation_path=None,\n", " translation_path=None,\n", " monitor_path=None):\n", " \"\"\"\n", " :param sources: list of nexus files to treat\n", " :type sources: list(str)\n", " :param poni: the name of the poni file used for the integration.\n", " :type poni: str\n", " :param mask: the mask used for the computation.\n", " :type mask: None or numpy.ndarray\n", " :param N: the number of bins used for the powder spectrum.\n", " :type N: int\n", " :param data_path: location of the images in the nexus file\n", " :type data_path: str\n", " :param rotation_path: location of the rotation coordinated in the nexus file\n", " :type rotation_path: str\n", " :param translation_path: location of the translation coordinates in the nexus file\n", " :type translation_path: str\n", "\n", " :return: the tomography datas\n", " :rtype: numpy.ndarray\n", "\n", " the return data is a 4 dimensions numpy array (rotation,\n", " translation, 2, N). the third dimension contain the x and y\n", " coordinates of the powder diffraction (0 for X and 1 for Y)\n", " \"\"\"\n", " # load the Azimuthal Integration parameters\n", " ai = pyFAI.load(poni)\n", "\n", " volume = numpy.array([])\n", " rotation = numpy.array([])\n", " translation = numpy.array([])\n", " for i, frame in enumerate(read_multi(sources,\n", " data_path,\n", " rotation_path,\n", " translation_path,\n", " monitor_path)):\n", " if i == 0:\n", " volume = numpy.empty(frame.shape + (2, N))\n", " rotation = numpy.empty((frame.shape[0],))\n", " translation = numpy.empty((frame.shape[1],))\n", "\n", " data = frame.img\n", "\n", " \"\"\" mask all the non counting pixels \"\"\"\n", " # TODO comment optimiser ce calcul du mask dynamic qui prend vraiment du temps\n", "\n", " # we will use dummy value (0) in order to define the dynamic mask\n", " data[data>500] = 0\n", "\n", " #mask_data = numpy.where(data == 0, True, False)\n", " #mask_data = numpy.logical_or(mask_data, numpy.where(data > 500, True, False))\n", " # mask_data = numpy.logical_or(mask_data,\n", " # numpy.where(data > 500, True, False))\n", " #if mask is not None:\n", " # mask_data = numpy.logical_or(mask_data, mask)\n", "\n", " #filename = os.path.splitext(f)[0] + \".txt\"\n", " t0 = time.time()\n", " spectrum_x_y = ai.integrate1d(data, N, filename=None, dark=dark, flat=flat, dummy=0, method=\"csr_ocl_0,0\") #dark=dark, flat=flat) #, method=\"lut\") #, method=\"csr_ocl_0,1\")\n", " #spectrum_x_y = ai.integrate1d(data, N, mask=mask_data, method=\"csr_ocl_0,0\") #, method=\"csr_ocl_0,1\")\n", " print(i, frame.rotation, frame.translation, time.time() - t0)\n", " if frame.monitor:\n", " volume[frame.rotation, frame.translation] = spectrum_x_y[0], spectrum_x_y[1] / frame.monitor\n", " else:\n", " volume[frame.rotation, frame.translation] = spectrum_x_y\n", "\n", " rotation[frame.rotation] = frame.rotation_value\n", " translation[frame.translation] = frame.translation_value\n", "\n", " return volume, rotation, translation\n", "\n", "\n", "def from_hdf5(filename):\n", " \"\"\"\n", " :param filename: name of the file where the tomographie was stored\n", " :type filename: str\n", " :return: volume, rotation, translation\n", " :rtype: truple(numpy.ndarray, numpy.ndarray, numpy.ndarray)\n", "\n", " read the datas from the filename\n", " \"\"\"\n", " with tables.openFile(filename, mode=\"r\") as h5file:\n", " volume = h5file.getNode(\"/tomography/diffraction\")[:]\n", " rotation = h5file.getNode(\"/tomography/rotation\")[:]\n", " translation = h5file.getNode(\"/tomography/translation\")[:]\n", "\n", " return volume, rotation, translation\n", "\n", "\n", "def load_volume(filename, basedir):\n", " return from_hdf5(os.path.join(basedir, filename + \".h5\"))\n", "\n", "\n", "\n", "# ugly but for now it is ok\n", "SIZE = 3\n", "\n", "\n", "def reject_outliers(data, m=5.):\n", " \"\"\"\n", " :param data: the data to filter\n", " :type data: numpy.ndarray\n", " :param m: the filter rejection parameter\n", " :type m: float\n", " \"\"\"\n", " center = SIZE / 2\n", "\n", " median = numpy.median(data)\n", " d = numpy.abs(data - median)\n", " mdev = numpy.median(d)\n", " return data[center] if (d[center] / mdev) < m else median\n", "\n", "\n", "def filter_sinogramme(data):\n", " \"\"\"\n", " :param volume: the volume containing all the sinogram\n", " :param type: numpy.ndarray\n", " \"\"\"\n", " return generic_filter(data, reject_outliers, size=SIZE)\n", "\n", "\n", "def all_conversions():\n", " convert(SI_0_100, data_path='scan_data/data_03')\n", " convert(SI_14, data_path='scan_data/data_15')\n", "\n", "\n", "def padd_sino(sino, offset=0):\n", " n = (256 - sino.shape[0]) / 2\n", " n1 = n + offset\n", " n2 = 256 - n1 - sino.shape[0]\n", " padd1 = numpy.tile(sino[0], (n1, 1))\n", " padd2 = numpy.tile(sino[-1], (n2, 1))\n", " tmp = numpy.vstack([padd1, sino, padd2])\n", " print(tmp.shape)\n", " return tmp\n", "\n", "\n", "\n", "############\n", "# P7_19_12 #\n", "############\n", "\n", "def p14_13_57():\n", "\n", " def create_unfold():\n", " t0 = time.time()\n", " params = Unfold(TOMOSOURCES, PONI, mask, dark, flat)\n", " save = unfold(params)\n", " print(\"unfold time: \", time.time() - t0)\n", " # TODO sauver la transposé de façon a ce que les données\n", " # soient organisée correctement pour les reconstructions et la\n", " # filtration look at\n", " to_hdf5(os.path.join(PUBLISHED_DATA, \"P14_13_57_unfold.h5\"),\n", " save, params)\n", " \n", " def create_volume():\n", " t0 = time.time()\n", " dark, mask = create_mask_and_dark(MASKS_15S, detector='Xpad_flat', data_path=DATAPATH)\n", " #with h5py.File(P7_19_12, mode='r') as f:\n", " # dark += f[\"scan_13/scan_data/data_58\"][35, 65]\n", " flat = get_flat(FLAT, dark=dark, data_path=DATAPATH, threshold=1) # TODO moyenner les flats. 12, 13\n", " volume, rotation, translation = \\\n", " integrate2(P14_13_57, PONI, mask=mask,\n", " dark=dark, flat=flat,\n", " data_path=DATAPATH,\n", " rotation_path=\"scan_data/trajectory_2_1\",\n", " translation_path=\"scan_data/trajectory_1_1\")\n", " # monitor_path=\"scan_data/data_01\")\n", "\n", " print(\"integration time: \", time.time() - t0)\n", " # TODO sauver la transposé de façon a ce que les données\n", " # soient organisée correctement pour les reconstructions et la\n", " # filtration look at\n", " save_volume(\"P14_13_57\", volume[:,:, 1,:], rotation, translation, dark, flat, mask, basedir=PUBLISHED_DATA)\n", "\n", " def filter_volume():\n", " volume, rotation, translation = load_volume(\"P14_13_57\", basedir=PUBLISHED_DATA)\n", " new_volume = volume.T.copy()\n", " for i, img in enumerate(new_volume):\n", " print(\"treat the %i sinogram\" % i)\n", " new_volume[i] = filter_sinogramme(img)\n", " # new_volume[i] = medfilt2d(img)\n", "\n", " save_volume(\"P14_13_57_filtered\", new_volume, rotation, translation, basedir=PUBLISHED_DATA)\n", "\n", " def _reconstruction(filename, transpose=False):\n", " CHANNEL = 366\n", "\n", " from skimage.transform import iradon\n", " volume, rotation, translation = load_volume(filename, basedir=PUBLISHED_DATA)\n", " if transpose:\n", " volume = volume.T.copy()\n", " print(rotation.shape)\n", " # sino = volume.mean(axis=0)\n", " # sino = volume[192:201].mean(axis=0)[:, :-1] # quartz\n", " sino = volume[CHANNEL][:, :-1] # iron\n", " # sino = volume[52:63].mean(axis=0)[:,:-1]\n", " sino = padd_sino(sino, 3)\n", " pyplot.subplot(121)\n", " pyplot.title(filename + str(CHANNEL) + \" channel\")\n", " pyplot.imshow(sino, interpolation='nearest')\n", " pyplot.colorbar()\n", " rec = iradon(sino, theta=rotation[:-1], filter='shepp-logan')\n", " pyplot.subplot(122)\n", " pyplot.title(filename + str(CHANNEL) + \" channel\")\n", " pyplot.imshow(rec, interpolation='nearest')\n", "\n", " def reconstruction():\n", " _reconstruction(\"P14_13_57_filtered\")\n", " pyplot.figure()\n", " _reconstruction(\"P14_13_57\", transpose=True)\n", " pyplot.show()\n", "\n", " create_unfold()\n", " #create_volume()\n", " #filter_volume()\n", " #reconstruction()\n", "\n", "#p14_13_57()" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": false }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.3" } }, "nbformat": 4, "nbformat_minor": 2 }