diff --git a/bin/Notebooks/PyRadiomics Example.ipynb b/bin/Notebooks/PyRadiomics Example.ipynb index 1b164bb1..46d1b66d 100644 --- a/bin/Notebooks/PyRadiomics Example.ipynb +++ b/bin/Notebooks/PyRadiomics Example.ipynb @@ -23,8 +23,8 @@ }, "outputs": [], "source": [ + "from __future__ import print_function, unicode_literals, division, absolute_import\n", "import os # needed navigate the system to get the input data\n", - "\n", "from radiomics import featureextractor # This module is used for interaction with pyradiomics" ] }, @@ -55,9 +55,9 @@ "name": "stdout", "output_type": "stream", "text": [ - "dataDir, relative path: R:\\GitRepos\\pyradiomics\\bin\\Notebooks\\..\\..\\data\n", - "dataDir, absolute path: R:\\GitRepos\\pyradiomics\\data\n", - "Parameter file, absolute path: R:\\GitRepos\\pyradiomics\\bin\\Params.yaml\n" + "dataDir, relative path: E:\\Git-Repos\\4Quant\\pyradiomics\\bin\\Notebooks\\..\\..\\data\n", + "dataDir, absolute path: E:\\Git-Repos\\4Quant\\pyradiomics\\data\n", + "Parameter file, absolute path: E:\\Git-Repos\\4Quant\\pyradiomics\\bin\\Params.yaml\n" ] } ], @@ -70,8 +70,8 @@ "# \"..\" points to the parent directory: \\pyradiomics\\bin\\Notebooks\\..\\ is equal to \\pyradiomics\\bin\\\n", "# Move up 2 directories (i.e. go to \\pyradiomics\\) and then move into \\pyradiomics\\data\n", "dataDir = os.path.join(os.getcwd(), \"..\", \"..\", \"data\")\n", - "print \"dataDir, relative path:\", dataDir\n", - "print \"dataDir, absolute path:\", os.path.abspath(dataDir)\n", + "print(\"dataDir, relative path:\", dataDir)\n", + "print(\"dataDir, absolute path:\", os.path.abspath(dataDir))\n", "\n", "# Store the file paths of our testing image and label map into two variables\n", "imagePath = os.path.join(dataDir, testCase + \"_image.nrrd\")\n", @@ -79,7 +79,7 @@ "\n", "# Additonally, store the location of the example parameter file, stored in \\pyradiomics\\bin\n", "paramPath = os.path.join(os.getcwd(), \"..\", \"Params.yaml\")\n", - "print \"Parameter file, absolute path:\", os.path.abspath(paramPath)" + "print(\"Parameter file, absolute path:\", os.path.abspath(paramPath))" ] }, { @@ -122,11 +122,11 @@ "output_type": "stream", "text": [ "Extraction parameters:\n", - "\t{'resampledPixelSpacing': None, 'interpolator': 3, 'verbose': False, 'padDistance': 5, 'label': 1}\n", + "\t {'label': 1, 'interpolator': 3, 'padDistance': 5, 'verbose': False, 'resampledPixelSpacing': None}\n", "Enabled filters:\n", - "\t{'Original': {}}\n", + "\t {'Original': {}}\n", "Enabled features:\n", - "\t{'firstorder': [], 'glcm': [], 'shape': [], 'glrlm': [], 'glszm': []}\n" + "\t {'shape': [], 'firstorder': [], 'glrlm': [], 'glszm': [], 'glcm': []}\n" ] } ], @@ -134,9 +134,9 @@ "# Instantiate the extractor\n", "extractor = featureextractor.RadiomicsFeaturesExtractor()\n", "\n", - "print \"Extraction parameters:\\n\\t\", extractor.kwargs\n", - "print \"Enabled filters:\\n\\t\", extractor.inputImages\n", - "print \"Enabled features:\\n\\t\", extractor.enabledFeatures" + "print(\"Extraction parameters:\\n\\t\", extractor.kwargs)\n", + "print(\"Enabled filters:\\n\\t\", extractor.inputImages)\n", + "print(\"Enabled features:\\n\\t\", extractor.enabledFeatures)" ] }, { @@ -158,11 +158,11 @@ "output_type": "stream", "text": [ "Extraction parameters:\n", - "\t{'verbose': True, 'binWidth': 20, 'label': 1, 'interpolator': 3, 'resampledPixelSpacing': None, 'sigma': [1, 2, 3], 'padDistance': 5}\n", + "\t {'binWidth': 20, 'verbose': True, 'sigma': [1, 2, 3], 'label': 1, 'interpolator': 3, 'padDistance': 5, 'resampledPixelSpacing': None}\n", "Enabled filters:\n", - "\t{'Original': {}}\n", + "\t {'Original': {}}\n", "Enabled features:\n", - "\t{'firstorder': [], 'glcm': [], 'shape': [], 'glrlm': [], 'glszm': []}\n" + "\t {'shape': [], 'firstorder': [], 'glrlm': [], 'glszm': [], 'glcm': []}\n" ] } ], @@ -176,9 +176,9 @@ "# Instantiate the extractor\n", "extractor = featureextractor.RadiomicsFeaturesExtractor(**params) # ** 'unpacks' the dictionary in the function call\n", "\n", - "print \"Extraction parameters:\\n\\t\", extractor.kwargs\n", - "print \"Enabled filters:\\n\\t\", extractor.inputImages # Still the default settings\n", - "print \"Enabled features:\\n\\t\", extractor.enabledFeatures # Still the default settings" + "print(\"Extraction parameters:\\n\\t\", extractor.kwargs)\n", + "print(\"Enabled filters:\\n\\t\", extractor.inputImages) # Still the default settings\n", + "print(\"Enabled features:\\n\\t\", extractor.enabledFeatures) # Still the default settings" ] }, { @@ -193,11 +193,11 @@ "output_type": "stream", "text": [ "Extraction parameters:\n", - "\t{'verbose': True, 'binWidth': 20, 'label': 1, 'interpolator': 3, 'resampledPixelSpacing': None, 'sigma': [1, 2, 3], 'padDistance': 5}\n", + "\t {'binWidth': 20, 'verbose': True, 'sigma': [1, 2, 3], 'label': 1, 'interpolator': 3, 'padDistance': 5, 'resampledPixelSpacing': None}\n", "Enabled filters:\n", - "\t{'Original': {}}\n", + "\t {'Original': {}}\n", "Enabled features:\n", - "\t{'firstorder': [], 'glcm': [], 'shape': [], 'glrlm': [], 'glszm': []}\n" + "\t {'shape': [], 'firstorder': [], 'glrlm': [], 'glszm': [], 'glcm': []}\n" ] } ], @@ -205,9 +205,9 @@ "# This cell is equivalent to the previous cell\n", "extractor = featureextractor.RadiomicsFeaturesExtractor(binWidth=20, sigma=[1, 2, 3], verbose=True) # Equivalent of code above\n", "\n", - "print \"Extraction parameters:\\n\\t\", extractor.kwargs\n", - "print \"Enabled filters:\\n\\t\", extractor.inputImages # Still the default settings\n", - "print \"Enabled features:\\n\\t\", extractor.enabledFeatures # Still the default settings" + "print(\"Extraction parameters:\\n\\t\", extractor.kwargs)\n", + "print(\"Enabled filters:\\n\\t\", extractor.inputImages) # Still the default settings\n", + "print(\"Enabled features:\\n\\t\", extractor.enabledFeatures) # Still the default settings" ] }, { @@ -223,32 +223,32 @@ "text": [ "\n", "Enabled filters:\n", - "\t{'Original': {}, 'LoG': {}}\n", + "\t {'LoG': {}, 'Original': {}}\n", "\n", "Enabled features:\n", - "\t{'firstorder': []}\n", + "\t {'firstorder': []}\n", "\n", "Enabled features:\n", - "\t{'firstorder': [], 'glcm': ['Autocorrelation', 'Homogeneity1', 'SumSquares']}\n" + "\t {'firstorder': [], 'glcm': ['Autocorrelation', 'Homogeneity1', 'SumSquares']}\n" ] } ], "source": [ "# Enable a filter (in addition to the 'Original' filter already enabled)\n", "extractor.enableInputImageByName('LoG')\n", - "print \"\"\n", - "print \"Enabled filters:\\n\\t\", extractor.inputImages\n", + "print(\"\")\n", + "print(\"Enabled filters:\\n\\t\", extractor.inputImages)\n", "\n", "# Disable all feature classes, save firstorder\n", "extractor.disableAllFeatures()\n", "extractor.enableFeatureClassByName('firstorder')\n", - "print \"\"\n", - "print \"Enabled features:\\n\\t\", extractor.enabledFeatures\n", + "print(\"\")\n", + "print(\"Enabled features:\\n\\t\", extractor.enabledFeatures)\n", "\n", "# Specify some additional features in the GLCM feature class\n", "extractor.enableFeaturesByName(glcm=['Autocorrelation', 'Homogeneity1', 'SumSquares'])\n", - "print \"\"\n", - "print \"Enabled features:\\n\\t\", extractor.enabledFeatures" + "print(\"\")\n", + "print(\"Enabled features:\\n\\t\", extractor.enabledFeatures)" ] }, { @@ -270,11 +270,11 @@ "output_type": "stream", "text": [ "Extraction parameters:\n", - "\t{'verbose': True, 'binWidth': 25, 'label': 1, 'interpolator': 'sitkBSpline', 'resampledPixelSpacing': None, 'weightingNorm': None, 'padDistance': 5}\n", + "\t {'binWidth': 25, 'verbose': True, 'weightingNorm': None, 'label': 1, 'interpolator': 'sitkBSpline', 'padDistance': 5, 'resampledPixelSpacing': None}\n", "Enabled filters:\n", - "\t{'Original': {}}\n", + "\t {'Original': {}}\n", "Enabled features:\n", - "\t{'firstorder': [], 'glcm': None, 'shape': None, 'glrlm': None, 'glszm': None}\n" + "\t {'shape': None, 'firstorder': [], 'glrlm': None, 'glszm': None, 'glcm': None}\n" ] } ], @@ -282,9 +282,9 @@ "# Instantiate the extractor\n", "extractor = featureextractor.RadiomicsFeaturesExtractor(paramPath)\n", "\n", - "print \"Extraction parameters:\\n\\t\", extractor.kwargs\n", - "print \"Enabled filters:\\n\\t\", extractor.inputImages\n", - "print \"Enabled features:\\n\\t\", extractor.enabledFeatures" + "print(\"Extraction parameters:\\n\\t\", extractor.kwargs)\n", + "print(\"Enabled filters:\\n\\t\", extractor.inputImages)\n", + "print(\"Enabled features:\\n\\t\", extractor.enabledFeatures)" ] }, { @@ -315,41 +315,41 @@ "output_type": "stream", "text": [ "\t\tComputing shape\n", - "\t\tComputing firstorder\n" + "\t\tComputing firstorder\n", + "\t\tComputing glrlm\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "calculate GLCM: 100%|██████████████████████████████████████████████████████████████████| 33/33 [00:00<00:00, 38.73it/s]\n" + "calculate GLSZM: 100%|█████████████████████████████████████████████████████████████████| 33/33 [00:00<00:00, 81.68it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\t\tComputing glcm\n", - "\t\tComputing glrlm\n" + "\t\tComputing glszm\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "calculate GLSZM: 100%|█████████████████████████████████████████████████████████████████| 33/33 [00:00<00:00, 56.90it/s]\n" + "calculate GLCM: 100%|██████████████████████████████████████████████████████████████████| 33/33 [00:00<00:00, 51.00it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\t\tComputing glszm\n" + "\t\tComputing glcm\n" ] } ], "source": [ - "result = extractor.execute(imagePath, labelPath)" + "result = extractor.execute(os.path.abspath(imagePath), os.path.abspath(labelPath))" ] }, { @@ -367,139 +367,140 @@ "Result type: \n", "\n", "Calculated features\n", - "\tgeneral_info_BoundingBox : (162; 84; 11; 47; 70; 7)\n", - "\tgeneral_info_GeneralSettings : {'verbose': True; 'binWidth': 25; 'label': 1; 'interpolator': 'sitkBSpline'; 'resampledPixelSpacing': None; 'weightingNorm': None; 'padDistance': 5}\n", - "\tgeneral_info_ImageHash : 5c9ce3ca174f0f8324aa4d277e0fef82dc5ac566\n", - "\tgeneral_info_ImageSpacing : (0.7812499999999999; 0.7812499999999999; 6.499999999999998)\n", - "\tgeneral_info_InputImages : {'Original': {}}\n", - "\tgeneral_info_MaskHash : 9dc2c3137b31fd872997d92c9a92d5178126d9d3\n", - "\tgeneral_info_Version : v1.0.post25.dev0+g00e9e5d\n", - "\tgeneral_info_VolumeNum : 2\n", - "\tgeneral_info_VoxelNum : 4137\n", - "\toriginal_shape_Maximum3DDiameter : 65.5366145873\n", - "\toriginal_shape_Compactness2 : 0.114127701901\n", - "\toriginal_shape_Maximum2DDiameterSlice : 47.2187913633\n", - "\toriginal_shape_Sphericity : 0.485061744222\n", - "\toriginal_shape_Compactness1 : 26.7546787215\n", - "\toriginal_shape_Elongation : 1.7789885567\n", - "\toriginal_shape_SurfaceVolumeRatio : 0.392308261863\n", - "\toriginal_shape_Volume : 16412.6586914\n", - "\toriginal_shape_Flatness : 1.21918505897\n", - "\toriginal_shape_SphericalDisproportion : 2.06159321347\n", - "\toriginal_shape_Roundness : 0.61469066615\n", - "\toriginal_shape_SurfaceArea : 6438.82160378\n", - "\toriginal_shape_Maximum2DDiameterColumn : 44.5487904052\n", - "\toriginal_shape_Maximum2DDiameterRow : 61.5801767135\n", - "\toriginal_firstorder_InterquartileRange : 253.0\n", - "\toriginal_firstorder_Skewness : 0.275650859086\n", - "\toriginal_firstorder_Uniformity : 0.0451569635559\n", - "\toriginal_firstorder_MeanAbsoluteDeviation : 133.447261953\n", - "\toriginal_firstorder_Energy : 33122817481.0\n", - "\toriginal_firstorder_RobustMeanAbsoluteDeviation : 103.00138343\n", - "\toriginal_firstorder_Median : 812.0\n", - "\toriginal_firstorder_TotalEnergy : 131407662126.0\n", - "\toriginal_firstorder_Maximum : 1266.0\n", - "\toriginal_firstorder_RootMeanSquared : 2829.57282108\n", - "\toriginal_firstorder_90Percentile : 1044.4\n", - "\toriginal_firstorder_Minimum : 468.0\n", - "\toriginal_firstorder_Entropy : 4.6019355539\n", - "\toriginal_firstorder_StandardDeviation : 156.611235894\n", - "\toriginal_firstorder_Range : 798.0\n", - "\toriginal_firstorder_Variance : 24527.0792084\n", - "\toriginal_firstorder_10Percentile : 632.0\n", - "\toriginal_firstorder_Kurtosis : 2.18077293939\n", - "\toriginal_firstorder_Mean : 825.235436307\n", - "\toriginal_glcm_SumVariance : 895.891808819\n", - "\toriginal_glcm_Homogeneity1 : 0.276140402104\n", - "\toriginal_glcm_Homogeneity2 : 0.189156155892\n", - "\toriginal_glcm_ClusterShade : -52.9707943386\n", - "\toriginal_glcm_MaximumProbability : 0.00792784235012\n", - "\toriginal_glcm_Idmn : 0.957796447609\n", - "\toriginal_glcm_SumVariance2 : 103.142793792\n", - "\toriginal_glcm_Contrast : 52.2310659277\n", - "\toriginal_glcm_DifferenceEntropy : 3.79686113536\n", - "\toriginal_glcm_InverseVariance : 0.188666637795\n", - "\toriginal_glcm_Entropy : 8.79428086119\n", - "\toriginal_glcm_Dissimilarity : 5.58932678922\n", - "\toriginal_glcm_DifferenceVariance : 17.6107741076\n", - "\toriginal_glcm_Idn : 0.866370546902\n", - "\toriginal_glcm_Idm : 0.189156155892\n", - "\toriginal_glcm_Correlation : 0.335214788202\n", - "\toriginal_glcm_Autocorrelation : 292.684050471\n", - "\toriginal_glcm_SumEntropy : 5.31547876648\n", - "\toriginal_glcm_AverageIntensity : 17.1242601309\n", - "\toriginal_glcm_Energy : 0.00290880217681\n", - "\toriginal_glcm_SumSquares : 39.9781084143\n", - "\toriginal_glcm_ClusterProminence : 26251.1709801\n", - "\toriginal_glcm_SumAverage : 33.4497492152\n", - "\toriginal_glcm_Imc2 : 0.692033706271\n", - "\toriginal_glcm_Imc1 : -0.091940840043\n", - "\toriginal_glcm_DifferenceAverage : 5.58932678922\n", - "\toriginal_glcm_Id : 0.276140402104\n", - "\toriginal_glcm_ClusterTendency : 103.142793792\n", - "\toriginal_glrlm_ShortRunLowGrayLevelEmphasis : 0.00822976624416\n", - "\toriginal_glrlm_GrayLevelVariance : 39.118151022\n", - "\toriginal_glrlm_LowGrayLevelRunEmphasis : 0.00860039789166\n", - "\toriginal_glrlm_GrayLevelNonUniformityNormalized : 0.0451412381498\n", - "\toriginal_glrlm_RunVariance : 0.0847945778959\n", - "\toriginal_glrlm_GrayLevelNonUniformity : 175.635192315\n", - "\toriginal_glrlm_LongRunEmphasis : 1.22684403826\n", - "\toriginal_glrlm_ShortRunHighGrayLevelEmphasis : 268.974179841\n", - "\toriginal_glrlm_RunLengthNonUniformity : 3500.04323157\n", - "\toriginal_glrlm_ShortRunEmphasis : 0.955939173141\n", - "\toriginal_glrlm_LongRunHighGrayLevelEmphasis : 341.286579098\n", - "\toriginal_glrlm_RunPercentage : 0.940406463249\n", - "\toriginal_glrlm_LongRunLowGrayLevelEmphasis : 0.0106011704787\n", - "\toriginal_glrlm_RunEntropy : 4.91503800316\n", - "\toriginal_glrlm_HighGrayLevelRunEmphasis : 281.066493909\n", - "\toriginal_glrlm_RunLengthNonUniformityNormalized : 0.895049465948\n", - "\toriginal_glszm_GrayLevelVariance : 40.6031399239\n", - "\toriginal_glszm_LowIntensityLargeAreaEmphasis : 0.127238415533\n", - "\toriginal_glszm_HighIntensitySmallAreaEmphasis : 193.438051926\n", - "\toriginal_glszm_SmallAreaEmphasis : 0.656447899959\n", - "\toriginal_glszm_LargeAreaEmphasis : 13.6155080214\n", - "\toriginal_glszm_ZoneVariance : 8.72123909749\n", - "\toriginal_glszm_SizeZoneVariabilityNormalized : 0.399784380451\n", - "\toriginal_glszm_LowIntensitySmallAreaEmphasis : 0.0064169820551\n", - "\toriginal_glszm_HighIntensityEmphasis : 288.623529412\n", - "\toriginal_glszm_IntensityVariabilityNormalized : 0.0440573079013\n", - "\toriginal_glszm_ZonePercentage : 0.4520183708\n", - "\toriginal_glszm_LowIntensityEmphasis : 0.00910094202771\n", - "\toriginal_glszm_SizeZoneVariability : 747.596791444\n", - "\toriginal_glszm_IntensityVariability : 82.3871657754\n", - "\toriginal_glszm_ZoneEntropy : 6.5082149862\n", - "\toriginal_glszm_HighIntensityLargeAreaEmphasis : 3514.76149733\n" + "\t general_info_BoundingBox : (162; 84; 11; 47; 70; 7)\n", + "\t general_info_GeneralSettings : {'binWidth': 25; 'verbose': True; 'weightingNorm': None; 'label': 1; 'interpolator': 'sitkBSpline'; 'padDistance': 5; 'resampledPixelSpacing': None}\n", + "\t general_info_ImageHash : 5c9ce3ca174f0f8324aa4d277e0fef82dc5ac566\n", + "\t general_info_ImageSpacing : (0.7812499999999999; 0.7812499999999999; 6.499999999999998)\n", + "\t general_info_InputImages : {'Original': {}}\n", + "\t general_info_MaskHash : 9dc2c3137b31fd872997d92c9a92d5178126d9d3\n", + "\t general_info_Version : v1.0.1.post6.dev0+g5b1e8bb\n", + "\t general_info_VolumeNum : 2\n", + "\t general_info_VoxelNum : 4137\n", + "\t original_shape_SurfaceArea : 6438.82160378\n", + "\t original_shape_Maximum2DDiameterSlice : 47.2187913633\n", + "\t original_shape_Maximum2DDiameterRow : 61.5801767135\n", + "\t original_shape_Volume : 16412.65869140624\n", + "\t original_shape_Maximum3DDiameter : 65.53661458728622\n", + "\t original_shape_Elongation : 1.7789885567018646\n", + "\t original_shape_Flatness : 1.2191850589688844\n", + "\t original_shape_Maximum2DDiameterColumn : 44.5487904052\n", + "\t original_shape_SphericalDisproportion : 2.06159321347\n", + "\t original_shape_Roundness : 0.6146906661500379\n", + "\t original_shape_Compactness1 : 26.7546787215\n", + "\t original_shape_Sphericity : 0.485061744222\n", + "\t original_shape_Compactness2 : 0.114127701901\n", + "\t original_shape_SurfaceVolumeRatio : 0.392308261863\n", + "\t original_firstorder_Minimum : 468.0\n", + "\t original_firstorder_Maximum : 1266.0\n", + "\t original_firstorder_Median : 812.0\n", + "\t original_firstorder_10Percentile : 632.0\n", + "\t original_firstorder_RobustMeanAbsoluteDeviation : 103.00138343\n", + "\t original_firstorder_StandardDeviation : 156.611235894\n", + "\t original_firstorder_InterquartileRange : 253.0\n", + "\t original_firstorder_Range : 798.0\n", + "\t original_firstorder_MeanAbsoluteDeviation : 133.447261953\n", + "\t original_firstorder_90Percentile : 1044.4\n", + "\t original_firstorder_Uniformity : 0.0451569635559\n", + "\t original_firstorder_Kurtosis : 2.18077293939\n", + "\t original_firstorder_Entropy : 4.6019355539\n", + "\t original_firstorder_Skewness : 0.275650859086\n", + "\t original_firstorder_Variance : 24527.0792084\n", + "\t original_firstorder_RootMeanSquared : 2829.57282108\n", + "\t original_firstorder_TotalEnergy : 131407662126.0\n", + "\t original_firstorder_Mean : 825.235436307\n", + "\t original_firstorder_Energy : 33122817481.0\n", + "\t original_glrlm_LongRunLowGrayLevelEmphasis : 0.0104694333711\n", + "\t original_glrlm_ShortRunLowGrayLevelEmphasis : 0.0080944625119\n", + "\t original_glrlm_ShortRunHighGrayLevelEmphasis : 269.366654415\n", + "\t original_glrlm_LongRunEmphasis : 1.22741432468\n", + "\t original_glrlm_LongRunHighGrayLevelEmphasis : 341.908225705\n", + "\t original_glrlm_GrayLevelNonUniformityNormalized : 0.0451916226163\n", + "\t original_glrlm_RunLengthNonUniformity : 3473.88954354\n", + "\t original_glrlm_RunLengthNonUniformityNormalized : 0.894736783551\n", + "\t original_glrlm_RunEntropy : 4.91343459806\n", + "\t original_glrlm_RunPercentage : 0.934010152284\n", + "\t original_glrlm_GrayLevelNonUniformity : 174.640108304\n", + "\t original_glrlm_ShortRunEmphasis : 0.955813827306\n", + "\t original_glrlm_GrayLevelVariance : 39.074009627\n", + "\t original_glrlm_RunVariance : 0.0849939088453\n", + "\t original_glrlm_HighGrayLevelRunEmphasis : 281.50156957\n", + "\t original_glrlm_LowGrayLevelRunEmphasis : 0.00846567390082\n", + "\t original_glszm_HighIntensityLargeAreaEmphasis : 3514.76149733\n", + "\t original_glszm_LowIntensityLargeAreaEmphasis : 0.127238415533\n", + "\t original_glszm_LargeAreaEmphasis : 13.6155080214\n", + "\t original_glszm_SizeZoneVariabilityNormalized : 0.399784380451\n", + "\t original_glszm_LowIntensitySmallAreaEmphasis : 0.0064169820551\n", + "\t original_glszm_IntensityVariability : 82.3871657754\n", + "\t original_glszm_ZoneEntropy : 6.5082149862\n", + "\t original_glszm_HighIntensityEmphasis : 288.623529412\n", + "\t original_glszm_GrayLevelVariance : 40.6031399239\n", + "\t original_glszm_IntensityVariabilityNormalized : 0.0440573079013\n", + "\t original_glszm_SizeZoneVariability : 747.596791444\n", + "\t original_glszm_ZonePercentage : 0.4520183708\n", + "\t original_glszm_ZoneVariance : 8.72123909749\n", + "\t original_glszm_LowIntensityEmphasis : 0.00910094202771\n", + "\t original_glszm_HighIntensitySmallAreaEmphasis : 193.438051926\n", + "\t original_glszm_SmallAreaEmphasis : 0.656447899959\n", + "\t original_glcm_Imc2 : 0.692033706271\n", + "\t original_glcm_MaximumProbability : 0.00792784235012\n", + "\t original_glcm_Autocorrelation : 292.684050471\n", + "\t original_glcm_ClusterTendency : 103.142793792\n", + "\t original_glcm_Idn : 0.866370546902\n", + "\t original_glcm_DifferenceEntropy : 3.79686113536\n", + "\t original_glcm_SumSquares : 39.9781084143\n", + "\t original_glcm_SumEntropy : 5.31547876648\n", + "\t original_glcm_DifferenceAverage : 5.58932678922\n", + "\t original_glcm_SumAverage : 33.4497492152\n", + "\t original_glcm_Energy : 0.00290880217681\n", + "\t original_glcm_ClusterShade : -52.9707943386\n", + "\t original_glcm_Entropy : 8.79428086119\n", + "\t original_glcm_Homogeneity1 : 0.276140402104\n", + "\t original_glcm_SumVariance2 : 103.142793792\n", + "\t original_glcm_Contrast : 52.2310659277\n", + "\t original_glcm_Dissimilarity : 5.58932678922\n", + "\t original_glcm_InverseVariance : 0.188666637795\n", + "\t original_glcm_DifferenceVariance : 17.6107741076\n", + "\t original_glcm_Idmn : 0.957796447609\n", + "\t original_glcm_Id : 0.276140402104\n", + "\t original_glcm_Imc1 : -0.091940840043\n", + "\t original_glcm_Idm : 0.189156155892\n", + "\t original_glcm_ClusterProminence : 26251.1709801\n", + "\t original_glcm_Homogeneity2 : 0.189156155892\n", + "\t original_glcm_AverageIntensity : 17.1242601309\n", + "\t original_glcm_Correlation : 0.335214788202\n", + "\t original_glcm_SumVariance : 895.891808819\n" ] } ], "source": [ - "print \"Result type:\", type(result) # result is returned in a Python ordered dictionary\n", - "print \"\"\n", - "print \"Calculated features\"\n", - "for key, value in result.iteritems():\n", - " print \"\\t\", key, \":\", value" + "print(\"Result type:\", type(result)) # result is returned in a Python ordered dictionary\n", + "print(\"\")\n", + "print(\"Calculated features\")\n", + "for key, value in result.items():\n", + " print(\"\\t\", key, \":\", value)" ] } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.11" + "pygments_lexer": "ipython3", + "version": "3.5.2" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 0 } diff --git a/bin/Notebooks/Python3Notebook.ipynb b/bin/Notebooks/Python3Notebook.ipynb new file mode 100644 index 00000000..c7e3823f --- /dev/null +++ b/bin/Notebooks/Python3Notebook.ipynb @@ -0,0 +1,440 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from __future__ import print_function, unicode_literals, division, absolute_import\n", + "import matplotlib.pyplot as plt\n", + "import SimpleITK as sitk\n", + "import numpy as np\n", + "import pandas as pd" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import radiomics as pyrad\n", + "from radiomics.featureextractor import RadiomicsFeaturesExtractor" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Make Test Image and Mask\n", + "Here we make test images (a simple trianglular mask and a uniform progression)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "test_mask_arr = np.stack(32*[np.eye(32)],0).astype(int)\n", + "test_img_arr = np.linspace(0,100, num = np.prod(test_mask_arr.shape)).reshape(test_mask_arr.shape)\n", + "test_mask = sitk.GetImageFromArray(test_mask_arr)\n", + "test_img = sitk.GetImageFromArray(test_img_arr)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAeQAAADtCAYAAABu1gaFAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEyFJREFUeJzt3V+s5GV5wPHvc85ZXOSPgLtsN0C7VkksMXVNNgSjFxRj\nQ40JmBoiTQ0XpOuFJpp4Q7jBmppoUqVeGJO1bNgmViT+KaQhbQkhQW+oZ5X6h62pEoiQ/YfsCiiy\nu2eeXswPe9zumTMz5/d75z2/+X6SzZn5nZnzPu+cec6zv5nnnTcyE0mSNFsLsw5AkiRZkCVJqoIF\nWZKkCliQJUmqgAVZkqQKWJAlSaqABVmSpApYkCVJqoAFWZKkCixt5M4RcSPwRWAR+MfM/Oyo22/b\nti137dr1/44fPHhwI2FIffR8Zm4vOeAk+bw1FvOijf35WD+eTn96WSXm0qvHq0eTOZKnxs7lqTMq\nIhaBLwHvBZ4FvhcRD2bmk2vdZ9euXSwvL5/rZ00bhtRXz5QcbNJ8vogl/pKdncZU6uW7xQJ/fhYL\n/I0rMY/hOM5lEp859dTYubyR5/y1wM8y86nMPAXcB9y0gZ8naXbMZ2nGNlKQrwB+ser6s82x3xMR\neyNiOSKWjx8/voHhJHVo3Xxencu/ZaVocNI86PxVoczcl5l7MnPP9u1F3xKT1KLVubyVxVmHI/XO\nRgryc8BVq65f2RyTtPmYz9KMbaRN8nvA1RHxJoaJ+yHgr0bd4eDBg+ds4Bq1J7MNX1IRE+XzloAd\n53V7llyi4WY4TokxbISabIzOh2jGKTDQqfFvOnVBzswzEfEx4N8ZLpPYn5k/mfbnSZod81mavQ0t\nJMzMh4CHWopF0gyZz9Js+UldkiRVwIIsSVIFLMiSJFXAgixJUgW6/XT4MY1a2rTWkiiXQ0mzs2Uh\n+IOt3f75KLXsqcRZSV+WVg3HKTFGf+YyCc+QJUmqgAVZkqQKWJAlSaqABVmSpApYkCVJqkAVXdaj\nrNVNbfe1NDtbItjxuq67rDv98b+zsND9QAuL3Z/7RKEHLHryeEGhx+z4+Df1DFmSpApYkCVJqoAF\nWZKkCliQJUmqgAVZkqQKVN9lvZZJu69H3UfSZBaWFrh42/mdjlGqa7hIB3SBzuRePV7F5lJgnJ+O\nf1PPkCVJqoAFWZKkCliQJUmqgAVZkqQKWJAlSaqABVmSpApsaNlTRDwNvASsAGcyc08bQW3EqKVN\nbkghrW2SfF5cWuCCyy/oNh6X8UykyBIeyswlFsqcK5Z6zMbVxjrkP8vM51v4OZJmz3yWZsSXrCVJ\nqsBGC3IC/xERByNi77luEBF7I2I5IpY3OJakbo3M59W5fOL06RmEJ/XbRl+yfndmPhcRlwMPR8R/\nZ+Zjq2+QmfuAfQARsfbnWkqatZH5vDqX/+Sii8xlqWUbOkPOzOear8eAbwPXthGUpPLMZ2m2pj5D\njogLgIXMfKm5/OfAp1uLrAOTbkhh97XmxaT5vLBlgQt2dNtl3aeu4TIbWJRpCYoiXemF5lLoMRvX\nRl6y3gF8uylaS8A/Z+a/tRKVpNLMZ2nGpi7ImfkU8PYWY5E0I+azNHt1na9LkjSnLMiSJFXAgixJ\nUgXa+OjMTW/S7utR95HmwcKWRV5/+cXdjtGjTttedSb3qGO81HNsXHVFI0nSnLIgS5JUAQuyJEkV\nsCBLklQBC7IkSRWwIEuSVAGXPY0wammTG1Joni0sLfH6yy/tdAw3S5jMgo9XteOMq65oJEmaUxZk\nSZIqYEGWJKkCFmRJkipgQZYkqQJ2WU9p0g0p7L5WnywsLXL+9ks6HaNUB2yJDQb6soHFcJzF7gdZ\nKDAGQKHO9HHVFY0kSXPKgixJUgUsyJIkVcCCLElSBSzIkiRVYN0u64jYD7wfOJaZb2uOXQZ8HdgF\nPA3ckpknugtz85i0+3rUfaS2tZXPsbTE67a9sdtge9RpGyXmUujxKvIZ4yU6uaHcc2xM4zyy9wI3\nnnXsDuCRzLwaeKS5Lql+92I+S1VatyBn5mPAC2cdvgk40Fw+ANzcclySOmA+S/Wa9rWHHZl5uLl8\nBNix1g0jYm9ELEfE8pRjSerWWPm8Opeff/HlctFJc2LDbwbk8M3RNd8gzcx9mbknM/dsdCxJ3RqV\nz6tzedvFFxaOTOq/aQvy0YjYCdB8PdZeSJIKM5+lCkxbkB8Ebmsu3wY80E44kmbAfJYqMM6yp68B\n1wPbIuJZ4C7gs8D9EXE78AxwS5dB9sGopU1uSKFS2srnWFpi8dLLuwy13Af/F1j6UmbZU48erzld\n9rRuQc7MW9f41ntajkVSx8xnqV5+UpckSRWwIEuSVAELsiRJFbAgS5JUgXWbutS9STeksPtaM7e4\nxMKl2zsdokhnMpTptO3LBhbQqy7rrOxvqWfIkiRVwIIsSVIFLMiSJFXAgixJUgUsyJIkVcAu64pN\n2n096j5Sm2JxiYU3bOt4kELnCwXGyZ6MAfTm8RqOU9ffS8+QJUmqgAVZkqQKWJAlSaqABVmSpApY\nkCVJqoAFWZKkCrjsaRMatbTJDSlUQi4sMtj6hm4HKbAhA9CfZTw9WvbUq7lMoK5oJEmaUxZkSZIq\nYEGWJKkCFmRJkipgQZYkqQLrdllHxH7g/cCxzHxbc+xTwN8Ax5ub3ZmZD3UVpMY36YYUdl/Pl9by\neWGRwfkdd1n3qdO2QMd4nzaX6NXvfgLjRHMvcOM5jt+dmbubfxZjaXO4F/NZqtK6BTkzHwNeKBCL\npI6Zz1K9NnK+/rGI+GFE7I+IS9e6UUTsjYjliFjewFiSurVuPq/O5ePPW9Oltk1bkL8MvBnYDRwG\nPr/WDTNzX2buycw9U44lqVtj5fPqXN6+7bKS8UlzYaqCnJlHM3MlMwfAV4Br2w1LUinms1SHqT7L\nOiJ2Zubh5uoHgB+3F5K6MGn39aj7qF+myeeMRQZbL+40rsHaT81WlRimxFxG5XKbisxl0P0YAINS\nA41pnGVPXwOuB7ZFxLPAXcD1EbGb4XP5aeAjHcYoqSXms1SvdQtyZt56jsP3dBCLpI6Zz1K96loV\nLUnSnLIgS5JUAQuyJEkVsCBLklSBqZY9qT9GLW1yQwqtZZDw8ulul4wUW/ZUYJxBgUEKPVyFlnB1\nPwbAoNijNh7PkCVJqoAFWZKkCliQJUmqgAVZkqQKWJAlSaqAXdZakxtSaC0rCS+f6kuXdfcDldjC\noFhncoku60Ldz6WeY+PyDFmSpApYkCVJqoAFWZKkCliQJUmqgAVZkqQKWJAlSaqAy540MTek0CCz\n82VPpVakrBRY+1Jic4mVQuueBgXWcBWbS6m1YmPyDFmSpApYkCVJqoAFWZKkCliQJUmqgAVZkqQK\nrNtlHRFXAf8E7GDY+LgvM78YEZcBXwd2AU8Dt2Tmie5C1WYw6YYUdl+X1VY+nxkkv3zldKexDgp9\n8n+Jjt4yXdadDwGU+b2U67IuMszYxjlDPgN8MjOvAa4DPhoR1wB3AI9k5tXAI811SXUzn6VKrVuQ\nM/NwZn6/ufwScAi4ArgJONDc7ABwc1dBSmqH+SzVa6L3kCNiF/AO4HFgR2Yebr51hOFLYOe6z96I\nWI6I5Q3EKallk+bz6lz+1Qu/LBanNC/GLsgRcSHwTeATmfni6u/l8A3Cc74an5n7MnNPZu7ZUKSS\nWjNNPq/O5Tdc9sZCkUrzY6yCHBFbGCbvVzPzW83hoxGxs/n+TuBYNyFKapP5LNVpnC7rAO4BDmXm\nF1Z960HgNuCzzdcHOolQvTBp9/Wo+2h6beXzmUHy/G9OdRYnlOuyLjFMXzq5odRnf3c+BFCum3tc\n42wu8S7gw8CPIuKJ5tidDBP3/oi4HXgGuKWbECW1yHyWKrVuQc7M7wJrnaq8p91wJHXJfJbq5Sd1\nSZJUAQuyJEkVsCBLklQBC7IkSRUYp8ta6syopU1uSFGvM4Pklx0ve1oZdPrjf6fMxg8Fxii1GUeJ\nzSV6NJdJeIYsSVIFLMiSJFXAgixJUgUsyJIkVcCCLElSBeyyVrUm3ZDC7utyTq8MOPLiq52O0adO\n2zJjlGlL78vjVXKccXmGLElSBSzIkiRVwIIsSVIFLMiSJFXAgixJUgXsstamM2n39aj7aDpnVgYc\nOflKp2OU6oA905Ou4T51JvdpLpPwDFmSpApYkCVJqoAFWZKkCliQJUmqgAVZkqQKrFuQI+KqiHg0\nIp6MiJ9ExMeb45+KiOci4onm3/u6D1fStMxlqW7jLHs6A3wyM78fERcBByPi4eZ7d2fm33cXnjS+\nUUub3JACaDGXT60kh0/+tpMgX9OnpS9Fxhix7K9NgwJzyUK/+1LjjGvdgpyZh4HDzeWXIuIQcEXX\ngUlql7ks1W2i95AjYhfwDuDx5tDHIuKHEbE/Ii5tOTZJHTGXpfqMXZAj4kLgm8AnMvNF4MvAm4Hd\nDP/X/fk17rc3IpYjYrmFeCVtUBu5fOqlk8XilebFWAU5IrYwTOCvZua3ADLzaGauZOYA+Apw7bnu\nm5n7MnNPZu5pK2hJ02krl8+76JJyQUtzYpwu6wDuAQ5l5hdWHd+56mYfAH7cfniS2mIuS3Ubp8v6\nXcCHgR9FxBPNsTuBWyNiN5DA08BHOolQasGkG1L0tPu6tVxeWRnwwq+67bIetVlIq+MU6LTtVWdy\ngd9LiccLIAdFhhnbOF3W3wXO9dfpofbDkdQVc1mqm5/UJUlSBSzIkiRVwIIsSVIFLMiSJFVgnC5r\nqbcm7b4edZ95MlhJfvPiq92O0aMu6750ckOZLutSHeOlHrNxeYYsSVIFLMiSJFXAgixJUgUsyJIk\nVcCCLElSBSzIkiRVwGVP0jmMWto0ZxtSnNNgZcCvX+x4c4lCH/xfYnlVX5ZWQamNMsr88nOwUmSc\ncXmGLElSBSzIkiRVwIIsSVIFLMiSJFXAgixJUgXsspYmNOmGFH3svh6sDPjNr17udIxSHbAlxhkU\nGKNPj1exLusVu6wlSdJZLMiSJFXAgixJUgUsyJIkVcCCLElSBdbtso6IrcBjwOua238jM++KiDcB\n9wFvBA4CH87MU10GK9Vs0u7rUffpSlv5PDhzmldOHO001n51DfdjDOhZx/gm7LJ+FbghM98O7AZu\njIjrgM8Bd2fmW4ATwO3dhSmpJeazVKl1C3IOvbbgcEvzL4EbgG80xw8AN3cSoaTWmM9SvcZ6Dzki\nFiPiCeAY8DDwc+BkZp5pbvIscMUa990bEcsRsdxGwJI2Ztp8Xp3Lg1O/LhewNCfGKsiZuZKZu4Er\ngWuBt447QGbuy8w9mblnyhgltWjafF6dywvnXdBpjNI8mqjLOjNPAo8C7wQuiYjXmsKuBJ5rOTZJ\nHTKfpbqsW5AjYntEXNJcPh94L3CIYSJ/sLnZbcADXQUpqR3ms1SvcTaX2AkciIhFhgX8/sz814h4\nErgvIv4O+AFwT4dxSpvWqKVNM9iQopV8Hqyc5pUTR7qKEXDZU41jlBqn3FzKbGIxrnULcmb+EHjH\nOY4/xfD9J0mbhPks1ctP6pIkqQIWZEmSKmBBliSpAhZkSZIqEKM++L71wSKOA880V7cBzxcbvD7z\nPH/nvr4/ysztXQczrbNyGfydOvf5NM78x87logX59waOWJ7nT++a5/k79/7Nva/zGodzn8+5Q/vz\n9yVrSZIqYEGWJKkCsyzI+2Y4dg3mef7OvX/6Oq9xOPf51er8Z/YesiRJ+j++ZC1JUgUsyJIkVWAm\nBTkiboyIn0bEzyLijlnEUEpE7I+IYxHx41XHLouIhyPif5qvl84yxq5ExFUR8WhEPBkRP4mIjzfH\n52X+WyPiPyPiv5r5/21z/E0R8Xjz/P96RJw361inNU+5DObzvOZzqVwuXpCbbd++BPwFcA1wa0Rc\nUzqOgu4Fbjzr2B3AI5l5NfBIc72PzgCfzMxrgOuAjza/63mZ/6vADZn5dmA3cGNEXAd8Drg7M98C\nnABun2GMU5vDXAbzeV7zuUguz+IM+VrgZ5n5VGaeAu4DbppBHEVk5mPAC2cdvgk40Fw+ANxcNKhC\nMvNwZn6/ufwScAi4gvmZf2bmy83VLc2/BG4AvtEc38zzn6tcBvN5XvO5VC7PoiBfAfxi1fVnm2Pz\nZEdmHm4uHwF2zDKYEiJiF8N9eB9njuYfEYsR8QRwDHgY+DlwMjPPNDfZzM9/c3lobp7Pr5nHfC6R\nyzZ1zVgO1531eu1ZRFwIfBP4RGa+uPp7fZ9/Zq5k5m7gSoZnlG+dcUjqUN+fzzC/+Vwil2dRkJ8D\nrlp1/crm2Dw5GhE7AZqvx2YcT2ciYgvD5P1qZn6rOTw3839NZp4EHgXeCVwSEUvNtzbz899cHpqb\n57P53G0uz6Igfw+4uulOOw/4EPDgDOKYpQeB25rLtwEPzDCWzkREAPcAhzLzC6u+NS/z3x4RlzSX\nzwfey/B9t0eBDzY328zzN5eH5uX5PLf5XCqXZ/JJXRHxPuAfgEVgf2Z+pngQhUTE14DrGW7TdRS4\nC/gX4H7gDxluYXdLZp7dKLLpRcS7ge8APwIGzeE7Gb7vNA/z/1OGjR6LDP/ze39mfjoi/phhA9Rl\nwA+Av87MV2cX6fTmKZfBfGZO87lULvvRmZIkVcCmLkmSKmBBliSpAhZkSZIqYEGWJKkCFmRJkipg\nQZYkqQIWZEmSKvC/TUjq9LvqvxIAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "%matplotlib inline\n", + "fig, (ax1, ax2) = plt.subplots(1,2, figsize = (8, 4))\n", + "ax1.imshow(test_mask_arr[0], cmap = 'bone')\n", + "ax2.imshow(test_img_arr[0], cmap = 'RdBu')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Extract simple stats\n", + "Here is just the simple statistics for testing" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t\tComputing firstorder\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
general_info_BoundingBoxgeneral_info_GeneralSettingsgeneral_info_ImageHashgeneral_info_ImageSpacinggeneral_info_InputImagesgeneral_info_MaskHashgeneral_info_Versiongeneral_info_VolumeNumgeneral_info_VoxelNumoriginal_firstorder_Uniformity...original_firstorder_Varianceoriginal_firstorder_RootMeanSquaredoriginal_firstorder_InterquartileRangeoriginal_firstorder_MeanAbsoluteDeviationoriginal_firstorder_RobustMeanAbsoluteDeviationoriginal_firstorder_Energyoriginal_firstorder_Skewnessoriginal_firstorder_Maximumoriginal_firstorder_StandardDeviationoriginal_firstorder_TotalEnergy
0(0; 0; 0; 32; 32; 32){'padDistance': 5; 'verbose': True; 'interpola...dd1063b4904affacbcfa769510e55d35690f6961(1.0; 1.0; 1.0){'Original': {}}1daf886d07071ceba6a23f52cdcb6460dc91c19av1.0.1.post6.dev0+g5b1e8bb110240.25...833.4350142050.20326750.025.00076319.970664.304213e+090.0100.028.8692754.304213e+09
\n", + "

1 rows × 28 columns

\n", + "
" + ], + "text/plain": [ + " general_info_BoundingBox general_info_GeneralSettings \\\n", + "0 (0; 0; 0; 32; 32; 32) {'padDistance': 5; 'verbose': True; 'interpola... \n", + "\n", + " general_info_ImageHash general_info_ImageSpacing \\\n", + "0 dd1063b4904affacbcfa769510e55d35690f6961 (1.0; 1.0; 1.0) \n", + "\n", + " general_info_InputImages general_info_MaskHash \\\n", + "0 {'Original': {}} 1daf886d07071ceba6a23f52cdcb6460dc91c19a \n", + "\n", + " general_info_Version general_info_VolumeNum general_info_VoxelNum \\\n", + "0 v1.0.1.post6.dev0+g5b1e8bb 1 1024 \n", + "\n", + " original_firstorder_Uniformity ... \\\n", + "0 0.25 ... \n", + "\n", + " original_firstorder_Variance original_firstorder_RootMeanSquared \\\n", + "0 833.435014 2050.203267 \n", + "\n", + " original_firstorder_InterquartileRange \\\n", + "0 50.0 \n", + "\n", + " original_firstorder_MeanAbsoluteDeviation \\\n", + "0 25.000763 \n", + "\n", + " original_firstorder_RobustMeanAbsoluteDeviation \\\n", + "0 19.97066 \n", + "\n", + " original_firstorder_Energy original_firstorder_Skewness \\\n", + "0 4.304213e+09 0.0 \n", + "\n", + " original_firstorder_Maximum original_firstorder_StandardDeviation \\\n", + "0 100.0 28.869275 \n", + "\n", + " original_firstorder_TotalEnergy \n", + "0 4.304213e+09 \n", + "\n", + "[1 rows x 28 columns]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "extractor = RadiomicsFeaturesExtractor( verbose = True)\n", + "extractor.disableAllFeatures()\n", + "extractor.enableFeaturesByName(firstorder = [])\n", + "out_dict = extractor.execute(test_img, test_mask)\n", + "pd.DataFrame([out_dict])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Run using everything\n", + "Here we run the extractor with everything, which causes a few issues but returns over 100 columns worth of radiomics features!" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t\tComputing shape\n", + "\t\tComputing firstorder\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "calculate GLCM: 100%|███████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 222.22it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t\tComputing glcm\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "calculate GLSZM: 100%|███████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 48.78it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\t\tComputing glszm\n", + "\t\tComputing glrlm\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
general_info_BoundingBoxgeneral_info_GeneralSettingsgeneral_info_ImageHashgeneral_info_ImageSpacinggeneral_info_InputImagesgeneral_info_MaskHashgeneral_info_Versiongeneral_info_VolumeNumgeneral_info_VoxelNumoriginal_shape_Maximum2DDiameterColumn...original_glrlm_HighGrayLevelRunEmphasisoriginal_glrlm_GrayLevelNonUniformityoriginal_glrlm_LongRunHighGrayLevelEmphasisoriginal_glrlm_LongRunEmphasisoriginal_glrlm_GrayLevelNonUniformityNormalizedoriginal_glrlm_ShortRunLowGrayLevelEmphasisoriginal_glrlm_RunVarianceoriginal_glrlm_RunPercentageoriginal_glrlm_ShortRunHighGrayLevelEmphasisoriginal_glrlm_ShortRunEmphasis
0(0; 0; 0; 32; 32; 32){'padDistance': 5; 'verbose': True; 'interpola...dd1063b4904affacbcfa769510e55d35690f6961(1.0; 1.0; 1.0){'Original': {}}1daf886d07071ceba6a23f52cdcb6460dc91c19av1.0.1.post6.dev0+g5b1e8bb1102431.0...7.49722528.9532892276.534562296.3993510.2502580.0169992.4217620.1130370.2987980.044432
\n", + "

1 rows × 102 columns

\n", + "
" + ], + "text/plain": [ + " general_info_BoundingBox general_info_GeneralSettings \\\n", + "0 (0; 0; 0; 32; 32; 32) {'padDistance': 5; 'verbose': True; 'interpola... \n", + "\n", + " general_info_ImageHash general_info_ImageSpacing \\\n", + "0 dd1063b4904affacbcfa769510e55d35690f6961 (1.0; 1.0; 1.0) \n", + "\n", + " general_info_InputImages general_info_MaskHash \\\n", + "0 {'Original': {}} 1daf886d07071ceba6a23f52cdcb6460dc91c19a \n", + "\n", + " general_info_Version general_info_VolumeNum general_info_VoxelNum \\\n", + "0 v1.0.1.post6.dev0+g5b1e8bb 1 1024 \n", + "\n", + " original_shape_Maximum2DDiameterColumn ... \\\n", + "0 31.0 ... \n", + "\n", + " original_glrlm_HighGrayLevelRunEmphasis \\\n", + "0 7.497225 \n", + "\n", + " original_glrlm_GrayLevelNonUniformity \\\n", + "0 28.953289 \n", + "\n", + " original_glrlm_LongRunHighGrayLevelEmphasis \\\n", + "0 2276.534562 \n", + "\n", + " original_glrlm_LongRunEmphasis \\\n", + "0 296.399351 \n", + "\n", + " original_glrlm_GrayLevelNonUniformityNormalized \\\n", + "0 0.250258 \n", + "\n", + " original_glrlm_ShortRunLowGrayLevelEmphasis original_glrlm_RunVariance \\\n", + "0 0.016999 2.421762 \n", + "\n", + " original_glrlm_RunPercentage original_glrlm_ShortRunHighGrayLevelEmphasis \\\n", + "0 0.113037 0.298798 \n", + "\n", + " original_glrlm_ShortRunEmphasis \n", + "0 0.044432 \n", + "\n", + "[1 rows x 102 columns]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "extractor = RadiomicsFeaturesExtractor(verbose = True)\n", + "extractor.enableAllFeatures()\n", + "out_dict = extractor.execute(test_img, test_mask)\n", + "pd.DataFrame([out_dict])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Calculating firstorder features\n", + "Calculating glcm features\n", + "Calculating glszm features\n", + "Calculating shape features\n", + "Calculating glrlm features\n" + ] + } + ], + "source": [ + "\n", + "for c_name in extractor.getFeatureClassNames():\n", + " print('Calculating {} features'.format(c_name))\n", + " featureClass = extractor.featureClasses[c_name](test_img, test_mask)" + ] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.5.2" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/bin/Notebooks/helloFeatureClass.ipynb b/bin/Notebooks/helloFeatureClass.ipynb index 8477b5be..0316df6a 100644 --- a/bin/Notebooks/helloFeatureClass.ipynb +++ b/bin/Notebooks/helloFeatureClass.ipynb @@ -23,6 +23,7 @@ }, "outputs": [], "source": [ + "from __future__ import print_function, unicode_literals, division, absolute_import\n", "import os\n", "import collections\n", "import SimpleITK as sitk\n", @@ -63,9 +64,9 @@ "maskName = os.path.join(dataDir, testCase + '_label.nrrd')\n", "\n", "if not os.path.exists(imageName):\n", - " print 'Error: problem finding input image', imageName\n", + " print('Error: problem finding input image', imageName)\n", "if not os.path.exists(maskName):\n", - " print 'Error: problem finding input labelmap', maskName" + " print('Error: problem finding input labelmap', maskName)" ] }, { @@ -197,10 +198,10 @@ ], "source": [ "# Print out the docstrings of the enabled features\n", - "print 'Will calculate the following first order features: '\n", + "print('Will calculate the following first order features: ')\n", "for f in firstOrderFeatures.enabledFeatures.keys():\n", - " print f\n", - " print eval('firstOrderFeatures.get' + f + 'FeatureValue.__doc__')" + " print(f)\n", + " print(eval('firstOrderFeatures.get' + f + 'FeatureValue.__doc__'))" ] }, { @@ -214,7 +215,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "Calculating first order features... done\n", + "Calculating first order features...\n", + "done\n", "Calculated first order features: \n", " Mean : 825.235436307\n" ] @@ -222,13 +224,13 @@ ], "source": [ "# Calculate the features and print out result\n", - "print 'Calculating first order features...',\n", + "print('Calculating first order features...')\n", "firstOrderFeatures.calculateFeatures()\n", - "print 'done'\n", + "print('done')\n", "\n", - "print 'Calculated first order features: '\n", - "for (key, val) in firstOrderFeatures.featureValues.iteritems():\n", - " print ' ', key, ':', val" + "print('Calculated first order features: ')\n", + "for (key, val) in firstOrderFeatures.featureValues.items():\n", + " print(' ', key, ':', val)" ] }, { @@ -265,21 +267,9 @@ "output_type": "stream", "text": [ "Will calculate the following shape features: \n", - "Maximum3DDiameter\n", - "\n", - " Calculate the largest pairwise euclidean distance between tumor surface voxels.\n", - " Also known as Feret Diameter.\n", - " \n", - "Compactness2\n", - "\n", - " Calculate the Compactness (2) of the tumor region.\n", - "\n", - " :math:`compactness\\ 2 = 36\\pi\\frac{V^2}{A^3}`\n", + "Maximum2DDiameterSlice\n", "\n", - " Compactness 2 is a measure of how compact the shape of the tumor is\n", - " relative to a sphere (most compact). It is a dimensionless measure,\n", - " independent of scale and orientation. This is a measure of the compactness\n", - " of the shape of the image ROI.\n", + " Calculate the largest pairwise euclidean distance between tumor surface voxels in the row-column plane.\n", " \n", "Compactness1\n", "\n", @@ -293,6 +283,15 @@ " ratio of volume to the :math:`\\sqrt{\\text{surface area}^3}`. This is a measure of the\n", " compactness of the shape of the image ROI\n", " \n", + "Maximum3DDiameter\n", + "\n", + " Calculate the largest pairwise euclidean distance between tumor surface voxels.\n", + " Also known as Feret Diameter.\n", + " \n", + "Flatness\n", + "\n", + "\n", + " \n", "Sphericity\n", "\n", " Calculate the Sphericity of the tumor region.\n", @@ -302,19 +301,27 @@ " Sphericity is a measure of the roundness of the shape of the tumor region\n", " relative to a sphere. This is another measure of the compactness of a tumor.\n", " \n", - "Maximum2DDiameterSlice\n", + "Maximum2DDiameterRow\n", "\n", - " Calculate the largest pairwise euclidean distance between tumor surface voxels in the row-column plane.\n", + " Calculate the largest pairwise euclidean distance between tumor surface voxels in the column-slice plane.\n", " \n", "Elongation\n", "\n", "\n", " \n", - "SurfaceVolumeRatio\n", + "SurfaceArea\n", "\n", - " Calculate the surface area to volume ratio of the tumor region\n", + " Calculate the surface area of the tumor region in square millimeters.\n", + "\n", + " :math:`A = \\displaystyle\\sum^{N}_{i=1}{\\frac{1}{2}|\\textbf{a}_i\\textbf{b}_i \\times \\textbf{a}_i\\textbf{c}_i|}`\n", + "\n", + " Where:\n", + "\n", + " :math:`N` is the number of triangles forming the surface of the volume\n", + "\n", + " :math:`a_ib_i` and :math:`a_ic_i` are the edges of the :math:`i`\\ :sup:`th` triangle formed by points :math:`a_i`,\n", + " :math:`b_i` and :math:`c_i`\n", "\n", - " :math:`surface\\ to\\ volume\\ ratio = \\frac{A}{V}`\n", " \n", "Volume\n", "\n", @@ -324,53 +331,48 @@ "\n", "\n", " \n", - "SphericalDisproportion\n", - "\n", - " Calculate the Spherical Disproportion of the tumor region.\n", + "Compactness2\n", "\n", - " :math:`spherical\\ disproportion = \\frac{A}{4\\pi R^2}`\n", + " Calculate the Compactness (2) of the tumor region.\n", "\n", - " Where :math:`R` is the radius of a sphere with the same volume as the tumor.\n", + " :math:`compactness\\ 2 = 36\\pi\\frac{V^2}{A^3}`\n", "\n", - " Spherical Disproportion is the ratio of the surface area of the\n", - " tumor region to the surface area of a sphere with the same\n", - " volume as the tumor region.\n", + " Compactness 2 is a measure of how compact the shape of the tumor is\n", + " relative to a sphere (most compact). It is a dimensionless measure,\n", + " independent of scale and orientation. This is a measure of the compactness\n", + " of the shape of the image ROI.\n", " \n", - "Flatness\n", - "\n", + "Maximum2DDiameterColumn\n", "\n", + " Calculate the largest pairwise euclidean distance between tumor surface voxels in the row-slice plane.\n", " \n", - "SurfaceArea\n", - "\n", - " Calculate the surface area of the tumor region in square millimeters.\n", - "\n", - " :math:`A = \\displaystyle\\sum^{N}_{i=1}{\\frac{1}{2}|\\textbf{a}_i\\textbf{b}_i \\times \\textbf{a}_i\\textbf{c}_i|}`\n", + "SurfaceVolumeRatio\n", "\n", - " Where:\n", + " Calculate the surface area to volume ratio of the tumor region\n", "\n", - " :math:`N` is the number of triangles forming the surface of the volume\n", + " :math:`surface\\ to\\ volume\\ ratio = \\frac{A}{V}`\n", + " \n", + "SphericalDisproportion\n", "\n", - " :math:`a_ib_i` and :math:`a_ic_i` are the edges of the :math:`i`\\ :sup:`th` triangle formed by points :math:`a_i`,\n", - " :math:`b_i` and :math:`c_i`\n", + " Calculate the Spherical Disproportion of the tumor region.\n", "\n", - " \n", - "Maximum2DDiameterColumn\n", + " :math:`spherical\\ disproportion = \\frac{A}{4\\pi R^2}`\n", "\n", - " Calculate the largest pairwise euclidean distance between tumor surface voxels in the row-slice plane.\n", - " \n", - "Maximum2DDiameterRow\n", + " Where :math:`R` is the radius of a sphere with the same volume as the tumor.\n", "\n", - " Calculate the largest pairwise euclidean distance between tumor surface voxels in the column-slice plane.\n", + " Spherical Disproportion is the ratio of the surface area of the\n", + " tumor region to the surface area of a sphere with the same\n", + " volume as the tumor region.\n", " \n" ] } ], "source": [ "# Print out the docstrings of the enabled features\n", - "print 'Will calculate the following shape features: '\n", + "print('Will calculate the following shape features: ')\n", "for f in shapeFeatures.enabledFeatures.keys():\n", - " print f\n", - " print eval('shapeFeatures.get' + f + 'FeatureValue.__doc__')" + " print(f)\n", + " print(eval('shapeFeatures.get' + f + 'FeatureValue.__doc__'))" ] }, { @@ -384,34 +386,35 @@ "name": "stdout", "output_type": "stream", "text": [ - "Calculating shape features... done\n", + "Calculating shape features...\n", + "done\n", "Calculated shape features: \n", - " Maximum3DDiameter : 65.5366145873\n", - " Compactness2 : 0.114127701901\n", " Maximum2DDiameterSlice : 47.2187913633\n", - " Sphericity : 0.485061744222\n", " Compactness1 : 26.7546787215\n", - " Elongation : 1.7789885567\n", - " SurfaceVolumeRatio : 0.392308261863\n", - " Volume : 16412.6586914\n", - " Flatness : 1.21918505897\n", - " SphericalDisproportion : 2.06159321347\n", - " Roundness : 0.61469066615\n", + " Maximum3DDiameter : 65.53661458728622\n", + " Flatness : 1.2191850589688844\n", + " Sphericity : 0.485061744222\n", + " Maximum2DDiameterRow : 61.5801767135\n", + " Elongation : 1.7789885567018646\n", " SurfaceArea : 6438.82160378\n", + " Volume : 16412.65869140624\n", + " Roundness : 0.6146906661500379\n", + " Compactness2 : 0.114127701901\n", " Maximum2DDiameterColumn : 44.5487904052\n", - " Maximum2DDiameterRow : 61.5801767135\n" + " SurfaceVolumeRatio : 0.392308261863\n", + " SphericalDisproportion : 2.06159321347\n" ] } ], "source": [ "# Calculate the features and print out result\n", - "print 'Calculating shape features...',\n", + "print('Calculating shape features...')\n", "shapeFeatures.calculateFeatures()\n", - "print 'done'\n", + "print('done')\n", "\n", - "print 'Calculated shape features: '\n", - "for (key, val) in shapeFeatures.featureValues.iteritems():\n", - " print ' ', key, ':', val" + "print('Calculated shape features: ')\n", + "for (key, val) in shapeFeatures.featureValues.items():\n", + " print(' ', key, ':', val)" ] }, { @@ -432,7 +435,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "calculate GLCM: 100%|██████████████████████████████████████████████████████████████████| 33/33 [00:01<00:00, 29.62it/s]\n" + "calculate GLCM: 100%|██████████████████████████████████████████████████████████████████| 33/33 [00:00<00:00, 51.40it/s]\n" ] } ], @@ -456,46 +459,70 @@ "output_type": "stream", "text": [ "Will calculate the following GLCM features: \n", - "SumVariance\n", + "SumSquares\n", "\n", - " Using coefficients pxAddy, kValuesSum, SumEntropy calculate and return the mean Sum Variance.\n", + " Using coefficients :math:`i` and math:`\\mu_x`, calculate and return the mean Sum of Squares (also known as\n", + " Variance) of the :math:`i` distribution.\n", "\n", - " :math:`sum\\ variance = \\displaystyle\\sum^{2N_g}_{k=2}{(k-SE)^2p_{x+y}(k)}`\n", + " :math:`sum\\ squares = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{(i-\\mu_x)^2p(i,j)}`\n", "\n", - " Sum Variance is a measure of heterogeneity that places higher weights on\n", - " neighboring intensity level pairs that deviate more from the mean.\n", + " Sum of Squares or Variance is a measure in the distribution of neigboring intensity level pairs\n", + " about the mean intensity level in the GLCM.\n", + "\n", + " N.B. This formula represents the variance of the distribution of :math:`i` and is independent from the distribution\n", + " of :math:`j`. Therefore, only use this formula if the GLCM is symmetrical, where VAR(i) is equal to VAR(j).\n", " \n", - "Homogeneity1\n", + "SumAverage\n", "\n", - " Using coefficients i, j, calculate and return the mean Homogeneity 1.\n", + " Coefficient :math:`p_{x+y}`, calculate and return the mean Sum Average.\n", "\n", - " :math:`homogeneity\\ 1 = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\frac{p(i,j)}{1+|i-j|}}`\n", + " :math:`sum\\ average = \\displaystyle\\sum^{2N_g}_{k=2}{p_{x+y}(k)k}`\n", "\n", - " Homogeneity 1 is a measure of the similarity in intensity values for\n", - " neighboring voxels. It is a measure of local homogeneity that increases\n", - " with less contrast in the window.\n", + " Sum Average measures the relationship between occurrences of pairs\n", + " with lower intensity values and occurrences of pairs with higher intensity\n", + " values.\n", " \n", - "Homogeneity2\n", + "Energy\n", "\n", - " Using coefficients i, j, calculate and return the mean Homogeneity 2.\n", + " Calculate and return the mean Energy.\n", "\n", - " :math:`homogeneity\\ 2 = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\frac{p(i,j)}{1+|i-j|^2}}`\n", + " :math:`energy = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big(p(i,j)\\big)^2}`\n", "\n", - " Homogeneity 2 is a measure of the similarity in intensity values\n", - " for neighboring voxels.\n", + " Energy (or Angular Second Moment)is a measure of homogeneous patterns\n", + " in the image. A greater Energy implies that there are more instances\n", + " of intensity value pairs in the image that neighbor each other at\n", + " higher frequencies.\n", " \n", - "ClusterShade\n", + "SumEntropy\n", "\n", - " Using coefficients i, j, ux, uy, calculate and return the mean Cluster Shade.\n", + " Using coefficient :math:`p_{x+y}`, calculate and return the mean Sum Entropy.\n", "\n", - " :math:`cluster\\ shade = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big(i+j-\\mu_x(i)-\\mu_y(j)\\big)^3p(i,j)}`\n", + " :math:`sum\\ entropy = \\displaystyle\\sum^{2N_g}_{k=2}{p_{x+y}(k)\\log_2\\big(p_{x+y}(k)+\\epsilon\\big)}`\n", "\n", - " Cluster Shade is a measure of the skewness and uniformity of the GLCM.\n", - " A higher cluster shade implies greater asymmetry about the mean.\n", + " Sum Entropy is a sum of neighborhood intensity value differences.\n", + " \n", + "AverageIntensity\n", + "\n", + " Return the mean gray level intensity of the :math:`i` distribution.\n", + "\n", + " :math:`\\mu_x = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{p(i,j)i}`\n", + "\n", + " N.B. As this formula represents the average of the distribution of :math:`i`, it is independent from the\n", + " distribution of :math:`j`. Therefore, only use this formula if the GLCM is symmetrical, where both distrubutions\n", + " are equal.\n", + " \n", + "Id\n", + "\n", + " Calculate and return the mean Inverse Difference.\n", + "\n", + " :math:`ID = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{\\textbf{P}(i,j)}{1+|i-j|} }`\n", + "\n", + " ID (inverse difference) is another measure of the local homogeneity of an image.\n", + " With more uniform gray levels, the denominator will remain low, resulting in a higher overall value.\n", " \n", "MaximumProbability\n", "\n", - " Using P_glcm, calculate and return the mean Maximum Probability.\n", + " Calculate and return the mean Maximum Probability.\n", "\n", " :math:`maximum\\ probability = \\max\\big(p(i,j)\\big)`\n", "\n", @@ -504,7 +531,7 @@ " \n", "Idmn\n", "\n", - " Using coefficients i, j, Ng, calculate and return the mean Inverse Difference Moment Normalized.\n", + " Calculate and return the mean Inverse Difference Moment Normalized.\n", "\n", " :math:`IDMN = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{p(i,j)}{1+\\left(\\frac{|i-j|^2}{N_g^2}\\right)} }`\n", "\n", @@ -515,154 +542,127 @@ " neighboring intensity values by dividing over the square of the total\n", " number of discrete intensity values.\n", " \n", - "SumVariance2\n", - "\n", - " Using coefficients pxAddy, kValuesSum, SumAvarage calculate and return the mean Sum Variance.\n", + "Entropy\n", "\n", - " :math:`sum\\ variance\\ 2 = \\displaystyle\\sum^{2N_g}_{k=2}{(k-SA)^2p_{x+y}(k)}`\n", + " Calculate and return the mean Entropy.\n", "\n", - " Sum Variance is a measure of heterogeneity that places higher weights on\n", - " neighboring intensity level pairs that deviate more from the mean.\n", + " :math:`entropy = -\\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{p(i,j)\\log_2\\big(p(i,j)+\\epsilon\\big)}`\n", "\n", - " This formula differs from SumVariance in that instead of subtracting the SumEntropy from the intensity,\n", - " it subtracts the SumAvarage, which is the mean of intensities and not its entropy\n", + " Entropy is a measure of the randomness/variability in neighborhood intensity values.\n", " \n", - "Contrast\n", + "Idn\n", "\n", - " Using coefficients i, j, calculate and return the mean Contrast.\n", + " Calculate and return the mean Inverse Difference Normalized.\n", "\n", - " :math:`contrast = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{(i-j)^2p(i,j)}`\n", + " :math:`IDN = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{p(i,j)}{1+\\left(\\frac{|i-j|}{N_g}\\right)} }`\n", "\n", - " Contrast is a measure of the local intensity variation, favoring :math:`P(i,j)`\n", - " values away from the diagonal :math:`(i = j)`. A larger value correlates with\n", - " a greater disparity in intensity values among neighboring voxels.\n", + " IDN (inverse difference normalized) is another measure of the local\n", + " homogeneity of an image. Unlike Homogeneity1, IDN normalizes the difference\n", + " between the neighboring intensity values by dividing over the total number\n", + " of discrete intensity values.\n", " \n", - "DifferenceEntropy\n", - "\n", - " Using coefficients pxSuby, eps, calculate and return the mean Difference Entropy.\n", + "ClusterShade\n", "\n", - " :math:`difference\\ entropy = \\displaystyle\\sum^{N_g-1}_{k=0}{p_{x-y}(k)\\log_2\\big(p_{x-y}(k)\\big)}`\n", + " Using coefficients :math:`\\mu_x` and :math:`\\mu_y`, calculate and return the mean Cluster Shade.\n", "\n", - " Difference Entropy is a measure of the randomness/variability\n", - " in neighborhood intensity value differences.\n", - " \n", - "InverseVariance\n", - "Using the i, j coeffients, calculate and return the mean Inverse Variance.\n", + " :math:`cluster\\ shade = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big(i+j-\\mu_x(i)-\\mu_y(j)\\big)^3p(i,j)}`\n", "\n", - " :math:`inverse\\ variance = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\frac{p(i,j)}{|i-j|^2}}, i \\neq j`\n", + " Cluster Shade is a measure of the skewness and uniformity of the GLCM.\n", + " A higher cluster shade implies greater asymmetry about the mean.\n", " \n", - "Dissimilarity\n", - "\n", - " Using coefficients i, j, calculate and return the mean Dissimilarity.\n", + "Imc1\n", "\n", - " :math:`dissimilarity = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{|i-j|p(i,j)}`\n", + " Using coefficients :math:`HX`, :math:`HY`, :math:`HXY` and :math:`HXY1`, calculate and return the mean Informal\n", + " Measure of Correlation 1.\n", "\n", - " Dissimilarity is a measure of local intensity variation. A larger\n", - " value correlates with a greater disparity in intensity values\n", - " among neighboring voxels.\n", + " :math:`IMC\\ 1 = \\frac{HXY-HXY1}{\\max\\{HX,HY\\}}`\n", " \n", - "SumAverage\n", + "ClusterTendency\n", "\n", - " Using coefficients pxAddy, kValuesSum, calculate and return the mean Sum Average.\n", + " Using coefficients :math:`\\mu_x` and :math:`\\mu_y`, calculate and return the mean Cluster Tendency.\n", "\n", - " :math:`sum\\ average = \\displaystyle\\sum^{2N_g}_{k=2}{p_{x+y}(k)k}`\n", + " :math:`cluster\\ prominence = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big(i+j-\\mu_x(i)-\\mu_y(j)\\big)^2p(i,j)}`\n", "\n", - " Sum Average measures the relationship between occurrences of pairs\n", - " with lower intensity values and occurrences of pairs with higher intensity\n", - " values.\n", + " Cluster Tendency is a measure of groupings of voxels with similar gray-level values.\n", " \n", "DifferenceVariance\n", "\n", - " Using coefficients pxSuby, kValuesDiff, DifferenceAverage calculate and return the mean Difference Variance.\n", + " Using coefficients :math:`p_{x-y}` and DifferenceAverage (DA) calculate and return the mean Difference Variance.\n", "\n", " :math:`Difference\\ variance = \\displaystyle\\sum^{N_g-1}_{k=0}{(1-DA)^2\\textbf{P}_{x-y}(k)}`\n", "\n", " Difference Variance is a measure of heterogeneity that places higher weights on\n", " differing intensity level pairs that deviate more from the mean.\n", " \n", - "Idn\n", - "\n", - " Using coefficients i, j, Ng, calculate and return the mean Inverse Difference Normalized.\n", - "\n", - " :math:`IDN = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{p(i,j)}{1+\\left(\\frac{|i-j|}{N_g}\\right)} }`\n", - "\n", - " IDN (inverse difference normalized) is another measure of the local\n", - " homogeneity of an image. Unlike Homogeneity1, IDN normalizes the difference\n", - " between the neighboring intensity values by dividing over the total number\n", - " of discrete intensity values.\n", - " \n", - "Idm\n", + "Homogeneity1\n", "\n", - " Using coefficients i, j, calculate and return the mean Inverse Difference Moment.\n", + " Calculate and return the mean Homogeneity 1.\n", "\n", - " :math:`IDM = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{\\textbf{P}(i,j)}{1+|i-j|^2} }`\n", + " :math:`homogeneity\\ 1 = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\frac{p(i,j)}{1+|i-j|}}`\n", "\n", - " IDM (inverse difference moment) is a measure of the local\n", - " homogeneity of an image. IDM weights are the inverse of the Contrast\n", - " weights (decreasing exponentially from the diagonal i=j in the GLCM).\n", + " Homogeneity 1 is a measure of the similarity in intensity values for\n", + " neighboring voxels. It is a measure of local homogeneity that increases\n", + " with less contrast in the window.\n", " \n", - "Correlation\n", + "Imc2\n", "\n", - " Using coefficients i, j, ux, uy, sigx, sigy, calculate and return the mean Correlation.\n", + " Using coefficients :math:`HXY` and :math:`HXY2`, calculate and return the mean Informal Measure of Correlation 2.\n", "\n", - " :math:`correlation = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_g}_{j=1}{p(i,j)ij-\\mu_x(i)\\mu_y(j)}}{\\sigma_x(i)\\sigma_y(j)}`\n", - "\n", - " Correlation is a value between 0 (uncorrelated) and 1 (perfectly correlated) showing the\n", - " linear dependency of gray level values to their respective voxels in the GLCM.\n", + " :math:`IMC\\ 2 = \\sqrt{1-e^{-2(HXY2-HXY)}}`\n", " \n", - "Autocorrelation\n", + "SumVariance\n", "\n", - " Using the i and j arrays, calculate and return the mean Autocorrelation.\n", + " Using coefficients :math:`p_{x+y}` and SumEntropy (SE) calculate and return the mean Sum Variance.\n", "\n", - " :math:`autocorrelation = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{p(i,j)ij}`\n", + " :math:`sum\\ variance = \\displaystyle\\sum^{2N_g}_{k=2}{(k-SE)^2p_{x+y}(k)}`\n", "\n", - " Autocorrelation is a measure of the magnitude of the\n", - " fineness and coarseness of texture.\n", + " Sum Variance is a measure of heterogeneity that places higher weights on\n", + " neighboring intensity level pairs that deviate more from the mean.\n", " \n", - "SumEntropy\n", + "Contrast\n", "\n", - " Using coefficients pxAddy, eps, calculate and return the mean Sum Entropy.\n", + " Using the squared difference between gray values of neighbouring paris, calculate and return the mean Contrast.\n", "\n", - " :math:`sum\\ entropy = \\displaystyle\\sum^{2N_g}_{k=2}{p_{x+y}(k)\\log_2\\big(p_{x+y}(k)+\\epsilon\\big)}`\n", + " :math:`contrast = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{(i-j)^2p(i,j)}`\n", "\n", - " Sum Entropy is a sum of neighborhood intensity value differences.\n", + " Contrast is a measure of the local intensity variation, favoring :math:`P(i,j)`\n", + " values away from the diagonal :math:`(i = j)`. A larger value correlates with\n", + " a greater disparity in intensity values among neighboring voxels.\n", " \n", - "AverageIntensity\n", + "DifferenceAverage\n", "\n", - " Return the mean gray level intensity of the :math:`i` distribution.\n", + " Using coefficient :math:`p_{x-y}`, calculate and return the mean Difference Average.\n", "\n", - " :math:`\\mu_x = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{p(i,j)i}`\n", + " :math:`Difference\\ average = \\displaystyle\\sum^{N_g-1}_{k=0}{k\\textbf{P}_{x-y}(k)}`\n", "\n", - " N.B. As this formula represents the average of the distribution of :math:`i`, it is independent from the\n", - " distribution of :math:`j`. Therefore, only use this formula if the GLCM is symmetrical, where both distrubutions\n", - " are equal.\n", + " Difference Average measures the relationship between occurrences of pairs\n", + " with similar intensity values and occurrences of pairs with differing intensity\n", + " values.\n", " \n", - "Energy\n", + "SumVariance2\n", "\n", - " Using P_glcm, calculate and return the mean Energy.\n", + " Using coefficients :math:`p_{x+y}` and SumAvarage (SA) calculate and return the mean Sum Variance 2.\n", + " :math:`sum\\ variance\\ 2 = \\displaystyle\\sum^{2N_g}_{k=2}{(k-SA)^2p_{x+y}(k)}`\n", "\n", - " :math:`energy = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big(p(i,j)\\big)^2}`\n", + " Sum Variance 2 is a measure of heterogeneity that places higher weights on\n", + " neighboring intensity level pairs that deviate more from the mean.\n", "\n", - " Energy (or Angular Second Moment)is a measure of homogeneous patterns\n", - " in the image. A greater Energy implies that there are more instances\n", - " of intensity value pairs in the image that neighbor each other at\n", - " higher frequencies.\n", + " This formula differs from SumVariance in that instead of subtracting the SumEntropy from the intensity,\n", + " it subtracts the SumAvarage, which is the mean of intensities and not its entropy\n", " \n", - "SumSquares\n", - "\n", - " Using coefficients i and ux, calculate and return the mean Sum of Squares (also known as Variance).\n", + "Correlation\n", "\n", - " :math:`sum\\ squares = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{(i-\\mu_x)^2p(i,j)}`\n", + " Using coefficients :math:`\\mu_x`, :math:`\\mu_y`, :math:`\\sigma_x` and :math:`\\sigma_y`, calculate and return the\n", + " mean Correlation.\n", "\n", - " Sum of Squares or Variance is a measure in the distribution of neigboring intensity level pairs\n", - " about the mean intensity level in the GLCM.\n", + " :math:`correlation = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_g}_{j=1}{p(i,j)ij-\\mu_x(i)\\mu_y(j)}}{\\sigma_x(i)\\sigma_y(j)}`\n", "\n", - " N.B. This formula represents the variance of the distribution of :math:`i` and is independent from the distribution\n", - " of :math:`j`. Therefore, only use this formula if the GLCM is symmetrical, where VAR(i) to be equal to VAR(j).\n", + " Correlation is a value between 0 (uncorrelated) and 1 (perfectly correlated) showing the\n", + " linear dependency of gray level values to their respective voxels in the GLCM.\n", " \n", "ClusterProminence\n", "\n", - " Using coefficients i, j, ux, uy, calculate and return the mean Cluster Prominence.\n", + " Using coefficients :math:`\\mu_x` and :math:`\\mu_y`, calculate and return the mean Cluster Prominence.\n", "\n", " :math:`cluster\\ prominence = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big( i+j-\\mu_x(i)-\\mu_y(j)\\big)^4p(i,j)}`\n", "\n", @@ -670,62 +670,68 @@ " A higher values implies more asymmetry about the mean while a lower value\n", " indicates a peak near the mean value and less variation about the mean.\n", " \n", - "Entropy\n", - "\n", - " Using coefficients eps, calculate and return the mean Entropy.\n", + "InverseVariance\n", "\n", - " :math:`entropy = -\\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{p(i,j)\\log_2\\big(p(i,j)+\\epsilon\\big)}`\n", + " Calculate and return the mean Inverse Variance.\n", "\n", - " Entropy is a measure of the randomness/variability in neighborhood intensity values.\n", + " :math:`inverse\\ variance = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\frac{p(i,j)}{|i-j|^2}}, i \\neq j`\n", " \n", - "Imc2\n", + "Homogeneity2\n", "\n", - " Using coefficients HXY, HXY2, calculate and return the mean Informal Measure of Correlation 2.\n", + " Calculate and return the mean Homogeneity 2.\n", "\n", - " :math:`IMC\\ 2 = \\sqrt{1-e^{-2(HXY2-HXY)}}`\n", + " :math:`homogeneity\\ 2 = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\frac{p(i,j)}{1+|i-j|^2}}`\n", + "\n", + " Homogeneity 2 is a measure of the similarity in intensity values\n", + " for neighboring voxels.\n", " \n", - "Imc1\n", + "DifferenceEntropy\n", "\n", - " Using coefficients HX, HY, HXY, HXY1, calculate and return the mean Informal Measure of Correlation 1.\n", + " Using coefficient :math:`p_{x-y}`, calculate and return the mean Difference Entropy.\n", "\n", - " :math:`IMC\\ 1 = \\frac{HXY-HXY1}{\\max\\{HX,HY\\}}`\n", + " :math:`difference\\ entropy = \\displaystyle\\sum^{N_g-1}_{k=0}{p_{x-y}(k)\\log_2\\big(p_{x-y}(k)+\\epsilon\\big)}`\n", + "\n", + " Difference Entropy is a measure of the randomness/variability\n", + " in neighborhood intensity value differences.\n", " \n", - "DifferenceAverage\n", + "Dissimilarity\n", "\n", - " Using coefficients pxMiny, kValuesDiff, calculate and return the mean Difference Average.\n", + " Calculate and return the mean Dissimilarity.\n", "\n", - " :math:`Difference\\ average = \\displaystyle\\sum^{N_g-1}_{k=0}{k\\textbf{P}_{x-y}(k)}`\n", + " :math:`dissimilarity = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{|i-j|p(i,j)}`\n", "\n", - " Difference Average measures the relationship between occurrences of pairs\n", - " with similar intensity values and occurrences of pairs with differing intensity\n", - " values.\n", + " Dissimilarity is a measure of local intensity variation defined as the mean absolute difference between the\n", + " neighbouring pairs. A larger value correlates with a greater disparity in intensity values\n", + " among neighboring voxels.\n", " \n", - "Id\n", + "Idm\n", "\n", - " Using coefficients i, j, Ng, calculate and return the mean Inverse Difference.\n", + " Calculate and return the mean Inverse Difference Moment.\n", "\n", - " :math:`ID = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{\\textbf{P}(i,j)}{1+|i-j|} }`\n", + " :math:`IDM = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{ \\frac{\\textbf{P}(i,j)}{1+|i-j|^2} }`\n", "\n", - " ID (inverse difference) is another measure of the local homogeneity of an image.\n", - " With more uniform gray levels, the denominator will remain low, resulting in a higher overall value.\n", + " IDM (inverse difference moment) is a measure of the local\n", + " homogeneity of an image. IDM weights are the inverse of the Contrast\n", + " weights (decreasing exponentially from the diagonal i=j in the GLCM).\n", " \n", - "ClusterTendency\n", + "Autocorrelation\n", "\n", - " Using coefficients i, j, ux, uy, calculate and return the mean Cluster Tendency.\n", + " Calculate and return the mean Autocorrelation.\n", "\n", - " :math:`cluster\\ prominence = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{\\big(i+j-\\mu_x(i)-\\mu_y(j)\\big)^2p(i,j)}`\n", + " :math:`autocorrelation = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_g}_{j=1}{p(i,j)ij}`\n", "\n", - " Cluster Tendency is a measure of groupings of voxels with similar gray-level values.\n", + " Autocorrelation is a measure of the magnitude of the\n", + " fineness and coarseness of texture.\n", " \n" ] } ], "source": [ "# Print out the docstrings of the enabled features\n", - "print 'Will calculate the following GLCM features: '\n", + "print('Will calculate the following GLCM features: ')\n", "for f in glcmFeatures.enabledFeatures.keys():\n", - " print f\n", - " print eval('glcmFeatures.get' + f + 'FeatureValue.__doc__')" + " print(f)\n", + " print(eval('glcmFeatures.get' + f + 'FeatureValue.__doc__'))" ] }, { @@ -739,48 +745,49 @@ "name": "stdout", "output_type": "stream", "text": [ - "Calculating GLCM features... done\n", + "Calculating GLCM features...\n", + "done\n", "Calculated GLCM features: \n", - " SumVariance : 895.891808819\n", - " Homogeneity1 : 0.276140402104\n", - " Homogeneity2 : 0.189156155892\n", - " ClusterShade : -52.9707943386\n", - " MaximumProbability : 0.00792784235012\n", - " Idmn : 0.957796447609\n", - " SumVariance2 : 103.142793792\n", - " Contrast : 52.2310659277\n", - " DifferenceEntropy : 3.79686113536\n", - " InverseVariance : 0.188666637795\n", - " Entropy : 8.79428086119\n", - " Dissimilarity : 5.58932678922\n", - " DifferenceVariance : 17.6107741076\n", - " Idn : 0.866370546902\n", - " Idm : 0.189156155892\n", - " Correlation : 0.335214788202\n", - " Autocorrelation : 292.684050471\n", - " SumEntropy : 5.31547876648\n", - " AverageIntensity : 17.1242601309\n", - " Energy : 0.00290880217681\n", " SumSquares : 39.9781084143\n", - " ClusterProminence : 26251.1709801\n", " SumAverage : 33.4497492152\n", + " Energy : 0.00290880217681\n", + " Idn : 0.866370546902\n", + " AverageIntensity : 17.1242601309\n", + " Id : 0.276140402104\n", + " SumVariance : 895.891808819\n", + " Dissimilarity : 5.58932678922\n", + " Entropy : 8.79428086119\n", + " SumEntropy : 5.31547876648\n", + " ClusterShade : -52.9707943386\n", " Imc2 : 0.692033706271\n", + " ClusterTendency : 103.142793792\n", + " DifferenceVariance : 17.6107741076\n", + " Homogeneity1 : 0.276140402104\n", " Imc1 : -0.091940840043\n", + " Contrast : 52.2310659277\n", + " DifferenceEntropy : 3.79686113536\n", " DifferenceAverage : 5.58932678922\n", - " Id : 0.276140402104\n", - " ClusterTendency : 103.142793792\n" + " SumVariance2 : 103.142793792\n", + " Correlation : 0.335214788202\n", + " ClusterProminence : 26251.1709801\n", + " InverseVariance : 0.188666637795\n", + " Homogeneity2 : 0.189156155892\n", + " MaximumProbability : 0.00792784235012\n", + " Idmn : 0.957796447609\n", + " Idm : 0.189156155892\n", + " Autocorrelation : 292.684050471\n" ] } ], "source": [ "# Calculate the features and print out result\n", - "print 'Calculating GLCM features...',\n", + "print('Calculating GLCM features...')\n", "glcmFeatures.calculateFeatures()\n", - "print 'done'\n", + "print('done')\n", "\n", - "print 'Calculated GLCM features: '\n", - "for (key, val) in glcmFeatures.featureValues.iteritems():\n", - " print ' ', key, ':', val" + "print('Calculated GLCM features: ')\n", + "for (key, val) in glcmFeatures.featureValues.items():\n", + " print(' ', key, ':', val)" ] }, { @@ -817,77 +824,64 @@ "output_type": "stream", "text": [ "Will calculate the following GLRLM features: \n", - "ShortRunLowGrayLevelEmphasis\n", - "\n", - " Calculate and return the mean Short Run Low Gray Level Emphasis (SRLGLE) value for all GLRLMs.\n", - "\n", - " :math:`SRLGLE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)}{i^2j^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", - "\n", - " Measures the joint distribution of shorter run lengths with lower gray-level values.\n", - " \n", - "GrayLevelVariance\n", - "\n", - " Calculate and return the Gray Level Variance (GLV) value.\n", + "GrayLevelNonUniformityNormalized\n", "\n", - " :math:`GLV = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)(i - \\mu)^2}`, where\n", + " Calculate and return the Gray Level Non-Uniformity Normalized (GLNN) value.\n", "\n", - " :math:`\\mu = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)i}`\n", + " :math:`GLNN = \\frac{\\sum^{N_g}_{i=1}\\left(\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}\\right)^2}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}^2}`\n", "\n", - " Measures the variance in gray level intensity for the runs.\n", + " Measures the similarity of gray-level intensity values in the image, where a lower GLNN value\n", + " correlates with a greater similarity in intensity values. This is the normalized version of the GLN formula.\n", " \n", - "LowGrayLevelRunEmphasis\n", + "ShortRunHighGrayLevelEmphasis\n", "\n", - " Calculate and return the mean Low Gray Level Run Emphasis (LGLRE) value for all GLRLMs.\n", + " Calculate and return the mean Short Run High Gray Level Emphasis (SRHGLE) value for all GLRLMs.\n", "\n", - " :math:`LGLRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)}{i^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`SRHGLE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)i^2}{j^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", "\n", - " Measures the distribution of low gray-level values, with a higher value indicating a greater\n", - " concentration of low gray-level values in the image.\n", + " Measures the joint distribution of shorter run lengths with higher gray-level values.\n", " \n", - "GrayLevelNonUniformityNormalized\n", + "RunLengthNonUniformityNormalized\n", "\n", - " Calculate and return the Gray Level Non-Uniformity Normalized (GLNN) value.\n", + " Calculate and return the mean Run Length Non-Uniformity Normalized (RLNN) value for all GLRLMs.\n", "\n", - " :math:`GLNN = \\frac{\\sum^{N_g}_{i=1}\\left(\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}\\right)^2}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}^2}`\n", + " :math:`RLNN = \\frac{\\sum^{N_r}_{j=1}\\left(\\sum^{N_g}_{i=1}{\\textbf{P}(i,j|\\theta)}\\right)^2}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", "\n", - " Measures the similarity of gray-level intensity values in the image, where a lower GLNN value\n", - " correlates with a greater similarity in intensity values. This is the normalized version of the GLN formula.\n", + " Measures the similarity of run lengths throughout the image, with a lower value indicating\n", + " more homogeneity among run lengths in the image. This is the normalized version of the RLN formula.\n", " \n", - "RunVariance\n", - "\n", - " Calculate and return the Run Variance (RV) value.\n", + "RunEntropy\n", "\n", - " :math:`RV = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)(j - \\mu)^2}`, where\n", + " Calculate and return the Run Entropy (RE) value.\n", "\n", - " :math:`\\mu = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)j}`\n", + " :math:`RE = -\\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)\\log_{2}(p(i,j|\\theta)+\\epsilon)}`\n", "\n", - " Measures the variance in runs for the run lengths.\n", + " Here, :math:`\\epsilon` is an arbitrarily small positive number (:math:`\\approx 2.2\\times10^{-16}`).\n", " \n", - "GrayLevelNonUniformity\n", + "HighGrayLevelRunEmphasis\n", "\n", - " Calculate and return the mean Gray Level Non-Uniformity (GLN) value for all GLRLMs.\n", + " Calculate and return the mean High Gray Level Run Emphasis (HGLRE) value for all GLRLMs.\n", "\n", - " :math:`GLN = \\frac{\\sum^{N_g}_{i=1}\\left(\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}\\right)^2}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`HGLRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)i^2}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", "\n", - " Measures the similarity of gray-level intensity values in the image, where a lower GLN value\n", - " correlates with a greater similarity in intensity values.\n", + " Measures the distribution of the higher gray-level values, with a higher value indicating\n", + " a greater concentration of high gray-level values in the image.\n", " \n", - "LongRunEmphasis\n", + "LongRunHighGrayLevelEmphasis\n", "\n", - " Calculate and return the mean Long Run Emphasis (LRE) value for all GLRLMs.\n", + " Calculate and return the mean Long Run High Gray Level Emphasis (LRHGLE) value for all GLRLMs.\n", "\n", - " :math:`LRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)j^2}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`LRHGLRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)i^2j^2}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", "\n", - " A measure of the distribution of long run lengths, with a greater value indicative\n", - " of longer run lengths and more coarse structural textures.\n", + " Measures the joint distribution of long run lengths with higher gray-level values.\n", " \n", - "ShortRunHighGrayLevelEmphasis\n", + "ShortRunLowGrayLevelEmphasis\n", "\n", - " Calculate and return the mean Short Run High Gray Level Emphasis (SRHGLE) value for all GLRLMs.\n", + " Calculate and return the mean Short Run Low Gray Level Emphasis (SRLGLE) value for all GLRLMs.\n", "\n", - " :math:`SRHGLE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)i^2}{j^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`SRLGLE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)}{i^2j^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", "\n", - " Measures the joint distribution of shorter run lengths with higher gray-level values.\n", + " Measures the joint distribution of shorter run lengths with lower gray-level values.\n", " \n", "RunLengthNonUniformity\n", "\n", @@ -898,22 +892,15 @@ " Measures the similarity of run lengths throughout the image, with a lower value indicating\n", " more homogeneity among run lengths in the image.\n", " \n", - "ShortRunEmphasis\n", - "\n", - " Calculate and return the mean Short Run Emphasis (SRE) value for all GLRLMs.\n", - "\n", - " :math:`SRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)}{i^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + "GrayLevelVariance\n", "\n", - " A measure of the distribution of short run lengths, with a greater value indicative\n", - " of shorter run lengths and more fine textural textures.\n", - " \n", - "LongRunHighGrayLevelEmphasis\n", + " Calculate and return the Gray Level Variance (GLV) value.\n", "\n", - " Calculate and return the mean Long Run High Gray Level Emphasis (LRHGLE) value for all GLRLMs.\n", + " :math:`GLV = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)(i - \\mu)^2}`, where\n", "\n", - " :math:`LRHGLRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)i^2j^2}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`\\mu = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)i}`\n", "\n", - " Measures the joint distribution of long run lengths with higher gray-level values.\n", + " Measures the variance in gray level intensity for the runs.\n", " \n", "RunPercentage\n", "\n", @@ -923,6 +910,33 @@ "\n", " Measures the homogeneity and distribution of runs of an image.\n", " \n", + "GrayLevelNonUniformity\n", + "\n", + " Calculate and return the mean Gray Level Non-Uniformity (GLN) value for all GLRLMs.\n", + "\n", + " :math:`GLN = \\frac{\\sum^{N_g}_{i=1}\\left(\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}\\right)^2}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + "\n", + " Measures the similarity of gray-level intensity values in the image, where a lower GLN value\n", + " correlates with a greater similarity in intensity values.\n", + " \n", + "LongRunEmphasis\n", + "\n", + " Calculate and return the mean Long Run Emphasis (LRE) value for all GLRLMs.\n", + "\n", + " :math:`LRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)j^2}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + "\n", + " A measure of the distribution of long run lengths, with a greater value indicative\n", + " of longer run lengths and more coarse structural textures.\n", + " \n", + "LowGrayLevelRunEmphasis\n", + "\n", + " Calculate and return the mean Low Gray Level Run Emphasis (LGLRE) value for all GLRLMs.\n", + "\n", + " :math:`LGLRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)}{i^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + "\n", + " Measures the distribution of low gray-level values, with a higher value indicating a greater\n", + " concentration of low gray-level values in the image.\n", + " \n", "LongRunLowGrayLevelEmphasis\n", "\n", " Calculate and return the mean Long Run Low Gray Level Emphasis (LRLGLE) value for all GLRLMs.\n", @@ -931,39 +945,34 @@ "\n", " Measures the joint distribution of long run lengths with lower gray-level values.\n", " \n", - "RunEntropy\n", - "1\n", - " Calculate and return the Run Entropy (RE) value.\n", + "RunVariance\n", "\n", - " :math:`RE = -\\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)\\log_{2}(p(i,j|\\theta)+\\epsilon)}`\n", - " \n", - "HighGrayLevelRunEmphasis\n", + " Calculate and return the Run Variance (RV) value.\n", "\n", - " Calculate and return the mean High Gray Level Run Emphasis (HGLRE) value for all GLRLMs.\n", + " :math:`RV = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)(j - \\mu)^2}`, where\n", "\n", - " :math:`HGLRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)i^2}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`\\mu = \\displaystyle\\sum^{N_g}_{i=1}\\displaystyle\\sum^{N_r}_{j=1}{p(i,j|\\theta)j}`\n", "\n", - " Measures the distribution of the higher gray-level values, with a higher value indicating\n", - " a greater concentration of high gray-level values in the image.\n", + " Measures the variance in runs for the run lengths.\n", " \n", - "RunLengthNonUniformityNormalized\n", + "ShortRunEmphasis\n", "\n", - " Calculate and return the mean Run Length Non-Uniformity Normalized (RLNN) value for all GLRLMs.\n", + " Calculate and return the mean Short Run Emphasis (SRE) value for all GLRLMs.\n", "\n", - " :math:`RLNN = \\frac{\\sum^{N_r}_{j=1}\\left(\\sum^{N_g}_{i=1}{\\textbf{P}(i,j|\\theta)}\\right)^2}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", + " :math:`SRE = \\frac{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\frac{\\textbf{P}(i,j|\\theta)}{i^2}}}{\\sum^{N_g}_{i=1}\\sum^{N_r}_{j=1}{\\textbf{P}(i,j|\\theta)}}`\n", "\n", - " Measures the similarity of run lengths throughout the image, with a lower value indicating\n", - " more homogeneity among run lengths in the image. This is the normalized version of the RLN formula.\n", + " A measure of the distribution of short run lengths, with a greater value indicative\n", + " of shorter run lengths and more fine textural textures.\n", " \n" ] } ], "source": [ "# Print out the docstrings of the enabled features\n", - "print 'Will calculate the following GLRLM features: '\n", + "print('Will calculate the following GLRLM features: ')\n", "for f in glrlmFeatures.enabledFeatures.keys():\n", - " print f\n", - " print eval('glrlmFeatures.get' + f + 'FeatureValue.__doc__')" + " print(f)\n", + " print(eval('glrlmFeatures.get' + f + 'FeatureValue.__doc__'))" ] }, { @@ -977,36 +986,37 @@ "name": "stdout", "output_type": "stream", "text": [ - "Calculating GLRLM features... done\n", + "Calculating GLRLM features...\n", + "done\n", "Calculated GLRLM features: \n", - " ShortRunLowGrayLevelEmphasis : 0.00822976624416\n", - " GrayLevelVariance : 39.118151022\n", - " LowGrayLevelRunEmphasis : 0.00860039789166\n", - " GrayLevelNonUniformityNormalized : 0.0451412381498\n", - " RunVariance : 0.0847945778959\n", - " GrayLevelNonUniformity : 175.635192315\n", - " LongRunEmphasis : 1.22684403826\n", - " ShortRunHighGrayLevelEmphasis : 268.974179841\n", - " RunLengthNonUniformity : 3500.04323157\n", - " ShortRunEmphasis : 0.955939173141\n", - " LongRunHighGrayLevelEmphasis : 341.286579098\n", - " RunPercentage : 0.940406463249\n", - " LongRunLowGrayLevelEmphasis : 0.0106011704787\n", - " RunEntropy : 4.91503800316\n", - " HighGrayLevelRunEmphasis : 281.066493909\n", - " RunLengthNonUniformityNormalized : 0.895049465948\n" + " GrayLevelNonUniformityNormalized : 0.0451916226163\n", + " ShortRunHighGrayLevelEmphasis : 269.366654415\n", + " RunLengthNonUniformityNormalized : 0.894736783551\n", + " RunEntropy : 4.91343459806\n", + " HighGrayLevelRunEmphasis : 281.50156957\n", + " LongRunHighGrayLevelEmphasis : 341.908225705\n", + " ShortRunLowGrayLevelEmphasis : 0.0080944625119\n", + " RunLengthNonUniformity : 3473.88954354\n", + " GrayLevelVariance : 39.074009627\n", + " RunPercentage : 0.934010152284\n", + " GrayLevelNonUniformity : 174.640108304\n", + " LongRunEmphasis : 1.22741432468\n", + " LowGrayLevelRunEmphasis : 0.00846567390082\n", + " LongRunLowGrayLevelEmphasis : 0.0104694333711\n", + " RunVariance : 0.0849939088453\n", + " ShortRunEmphasis : 0.955813827306\n" ] } ], "source": [ "# Calculate the features and print out result\n", - "print 'Calculating GLRLM features...',\n", + "print('Calculating GLRLM features...')\n", "glrlmFeatures.calculateFeatures()\n", - "print 'done'\n", + "print('done')\n", "\n", - "print 'Calculated GLRLM features: '\n", - "for (key, val) in glrlmFeatures.featureValues.iteritems():\n", - " print ' ', key, ':', val" + "print('Calculated GLRLM features: ')\n", + "for (key, val) in glrlmFeatures.featureValues.items():\n", + " print(' ', key, ':', val)" ] }, { @@ -1050,7 +1060,7 @@ "source": [ "logFeatures = {}\n", "sigmaValues = [1.0, 3.0, 5.0]\n", - "for logImage, inputImageName, inputKwargs in imageoperations.applyFilterLoG(image, sigma=sigmaValues, verbose=True):\n", + "for logImage, inputImageName, inputKwargs in imageoperations.getLoGImage(image, sigma=sigmaValues, verbose=True):\n", " logImage, croppedMask, bb = imageoperations.cropToTumorMask(logImage, mask)\n", " logFirstorderFeatures = firstorder.RadiomicsFirstOrder(logImage, croppedMask, **inputKwargs)\n", " logFirstorderFeatures.enableAllFeatures()\n", @@ -1069,72 +1079,72 @@ "name": "stdout", "output_type": "stream", "text": [ - " log-sigma-3-0-mm-3D_InterquartileRange : 103.158138275\n", - " log-sigma-3-0-mm-3D_Skewness : -0.498386343995\n", - " log-sigma-3-0-mm-3D_Uniformity : 0.0906478492348\n", - " log-sigma-3-0-mm-3D_MeanAbsoluteDeviation : 64.3312024633\n", - " log-sigma-3-0-mm-3D_Energy : 15235011555.6\n", - " log-sigma-3-0-mm-3D_RobustMeanAbsoluteDeviation : 43.3779243984\n", - " log-sigma-3-0-mm-3D_Median : -73.3129653931\n", - " log-sigma-3-0-mm-3D_TotalEnergy : 60441635199.8\n", - " log-sigma-3-0-mm-3D_Maximum : 114.296691895\n", - " log-sigma-3-0-mm-3D_RootMeanSquared : 1919.01616706\n", - " log-sigma-3-0-mm-3D_90Percentile : 13.9173410416\n", - " log-sigma-3-0-mm-3D_Minimum : -354.335235596\n", - " log-sigma-3-0-mm-3D_Entropy : 3.72121444058\n", - " log-sigma-3-0-mm-3D_StandardDeviation : 81.9760118492\n", - " log-sigma-3-0-mm-3D_Range : 468.63192749\n", - " log-sigma-3-0-mm-3D_Variance : 6720.06651871\n", - " log-sigma-3-0-mm-3D_10Percentile : -197.017340088\n", - " log-sigma-3-0-mm-3D_Kurtosis : 3.18336583197\n", - " log-sigma-3-0-mm-3D_Mean : -82.7355469484\n", - " log-sigma-1-0-mm-3D_InterquartileRange : 81.8767185211\n", - " log-sigma-1-0-mm-3D_Skewness : -0.220905251704\n", - " log-sigma-1-0-mm-3D_Uniformity : 0.114235313372\n", + " log-sigma-1-0-mm-3D_Range : 419.98638916\n", " log-sigma-1-0-mm-3D_MeanAbsoluteDeviation : 49.6646616511\n", - " log-sigma-1-0-mm-3D_Energy : 16201455197.6\n", - " log-sigma-1-0-mm-3D_RobustMeanAbsoluteDeviation : 34.3094515237\n", - " log-sigma-1-0-mm-3D_Median : -18.9197921753\n", - " log-sigma-1-0-mm-3D_TotalEnergy : 64275792715.1\n", + " log-sigma-1-0-mm-3D_10Percentile : -104.93405304\n", + " log-sigma-1-0-mm-3D_Skewness : -0.220905251704\n", + " log-sigma-1-0-mm-3D_InterquartileRange : 81.8767185211\n", + " log-sigma-1-0-mm-3D_90Percentile : 54.7903442383\n", " log-sigma-1-0-mm-3D_Maximum : 164.726760864\n", " log-sigma-1-0-mm-3D_RootMeanSquared : 1978.94740333\n", - " log-sigma-1-0-mm-3D_90Percentile : 54.7903442383\n", - " log-sigma-1-0-mm-3D_Minimum : -255.259628296\n", - " log-sigma-1-0-mm-3D_Entropy : 3.37004955078\n", + " log-sigma-1-0-mm-3D_Median : -18.9197921753\n", " log-sigma-1-0-mm-3D_StandardDeviation : 62.6969503974\n", - " log-sigma-1-0-mm-3D_Range : 419.98638916\n", - " log-sigma-1-0-mm-3D_Variance : 3930.90758913\n", - " log-sigma-1-0-mm-3D_10Percentile : -104.93405304\n", + " log-sigma-1-0-mm-3D_Entropy : 3.37004955078\n", + " log-sigma-1-0-mm-3D_Minimum : -255.259628296\n", + " log-sigma-1-0-mm-3D_TotalEnergy : 64275792715.1\n", " log-sigma-1-0-mm-3D_Kurtosis : 3.07182438072\n", + " log-sigma-1-0-mm-3D_RobustMeanAbsoluteDeviation : 34.3094515237\n", " log-sigma-1-0-mm-3D_Mean : -22.0460274432\n", - " log-sigma-5-0-mm-3D_InterquartileRange : 106.342716217\n", - " log-sigma-5-0-mm-3D_Skewness : -0.30549686903\n", - " log-sigma-5-0-mm-3D_Uniformity : 0.0902675928609\n", + " log-sigma-1-0-mm-3D_Variance : 3930.90758913\n", + " log-sigma-1-0-mm-3D_Uniformity : 0.114235313372\n", + " log-sigma-1-0-mm-3D_Energy : 16201455197.6\n", + " log-sigma-3-0-mm-3D_Range : 468.63192749\n", + " log-sigma-3-0-mm-3D_MeanAbsoluteDeviation : 64.3312024633\n", + " log-sigma-3-0-mm-3D_10Percentile : -197.017340088\n", + " log-sigma-3-0-mm-3D_Skewness : -0.498386343995\n", + " log-sigma-3-0-mm-3D_InterquartileRange : 103.158138275\n", + " log-sigma-3-0-mm-3D_90Percentile : 13.9173410416\n", + " log-sigma-3-0-mm-3D_Maximum : 114.296691895\n", + " log-sigma-3-0-mm-3D_RootMeanSquared : 1919.01616706\n", + " log-sigma-3-0-mm-3D_Median : -73.3129653931\n", + " log-sigma-3-0-mm-3D_StandardDeviation : 81.9760118492\n", + " log-sigma-3-0-mm-3D_Entropy : 3.72121444058\n", + " log-sigma-3-0-mm-3D_Minimum : -354.335235596\n", + " log-sigma-3-0-mm-3D_TotalEnergy : 60441635199.8\n", + " log-sigma-3-0-mm-3D_Kurtosis : 3.18336583197\n", + " log-sigma-3-0-mm-3D_RobustMeanAbsoluteDeviation : 43.3779243984\n", + " log-sigma-3-0-mm-3D_Mean : -82.7355469484\n", + " log-sigma-3-0-mm-3D_Variance : 6720.06651871\n", + " log-sigma-3-0-mm-3D_Uniformity : 0.0906478492348\n", + " log-sigma-3-0-mm-3D_Energy : 15235011555.6\n", + " log-sigma-5-0-mm-3D_Range : 464.759513855\n", " log-sigma-5-0-mm-3D_MeanAbsoluteDeviation : 63.4364264458\n", - " log-sigma-5-0-mm-3D_Energy : 14878729370.4\n", - " log-sigma-5-0-mm-3D_RobustMeanAbsoluteDeviation : 43.2562957783\n", - " log-sigma-5-0-mm-3D_Median : -99.1174468994\n", - " log-sigma-5-0-mm-3D_TotalEnergy : 59028162175.1\n", + " log-sigma-5-0-mm-3D_10Percentile : -211.974316406\n", + " log-sigma-5-0-mm-3D_Skewness : -0.30549686903\n", + " log-sigma-5-0-mm-3D_InterquartileRange : 106.342716217\n", + " log-sigma-5-0-mm-3D_90Percentile : -10.6977561951\n", " log-sigma-5-0-mm-3D_Maximum : 117.414512634\n", " log-sigma-5-0-mm-3D_RootMeanSquared : 1896.44460614\n", - " log-sigma-5-0-mm-3D_90Percentile : -10.6977561951\n", - " log-sigma-5-0-mm-3D_Minimum : -347.345001221\n", - " log-sigma-5-0-mm-3D_Entropy : 3.71336391413\n", + " log-sigma-5-0-mm-3D_Median : -99.1174468994\n", " log-sigma-5-0-mm-3D_StandardDeviation : 80.5220089107\n", - " log-sigma-5-0-mm-3D_Range : 464.759513855\n", - " log-sigma-5-0-mm-3D_Variance : 6483.79391901\n", - " log-sigma-5-0-mm-3D_10Percentile : -211.974316406\n", + " log-sigma-5-0-mm-3D_Entropy : 3.71336391413\n", + " log-sigma-5-0-mm-3D_Minimum : -347.345001221\n", + " log-sigma-5-0-mm-3D_TotalEnergy : 59028162175.1\n", " log-sigma-5-0-mm-3D_Kurtosis : 3.11489160873\n", - " log-sigma-5-0-mm-3D_Mean : -105.265625411\n" + " log-sigma-5-0-mm-3D_RobustMeanAbsoluteDeviation : 43.2562957783\n", + " log-sigma-5-0-mm-3D_Mean : -105.265625411\n", + " log-sigma-5-0-mm-3D_Variance : 6483.79391901\n", + " log-sigma-5-0-mm-3D_Uniformity : 0.0902675928609\n", + " log-sigma-5-0-mm-3D_Energy : 14878729370.4\n" ] } ], "source": [ "# Show result\n", - "for sigma, features in logFeatures.iteritems():\n", - " for (key, val) in features.iteritems():\n", + "for sigma, features in logFeatures.items():\n", + " for (key, val) in features.items():\n", " laplacianFeatureName = '%s_%s' % (str(sigma), key)\n", - " print ' ', laplacianFeatureName, ':', val" + " print(' ', laplacianFeatureName, ':', val)" ] }, { @@ -1162,26 +1172,26 @@ "name": "stdout", "output_type": "stream", "text": [ - "Calculated firstorder features with wavelet-LHL\n", "Calculated firstorder features with wavelet-LHH\n", "Calculated firstorder features with wavelet-HLL\n", + "Calculated firstorder features with wavelet-LHL\n", "Calculated firstorder features with wavelet-LLH\n", "Calculated firstorder features with wavelet-HLH\n", - "Calculated firstorder features with wavelet-HHH\n", "Calculated firstorder features with wavelet-HHL\n", + "Calculated firstorder features with wavelet-HHH\n", "Calculated firstorder features with wavelet-LLL\n" ] } ], "source": [ "waveletFeatures = {}\n", - "for decompositionImage, decompositionName, inputKwargs in imageoperations.applyFilterWavelet(image):\n", + "for decompositionImage, decompositionName, inputKwargs in imageoperations.getWaveletImage(image):\n", " decompositionImage, croppedMask, bb = imageoperations.cropToTumorMask(decompositionImage, mask)\n", " waveletFirstOrderFeaturs = firstorder.RadiomicsFirstOrder(decompositionImage, croppedMask, **kwargs)\n", " waveletFirstOrderFeaturs.enableAllFeatures()\n", " waveletFirstOrderFeaturs.calculateFeatures()\n", "\n", - " print 'Calculated firstorder features with ', decompositionName\n", + " print('Calculated firstorder features with ', decompositionName)\n", " waveletFeatures[decompositionName] = waveletFirstOrderFeaturs.featureValues" ] }, @@ -1196,187 +1206,188 @@ "name": "stdout", "output_type": "stream", "text": [ - " wavelet-LLL_InterquartileRange : 139.421191409\n", - " wavelet-LLL_Skewness : -0.602284216139\n", - " wavelet-LLL_Uniformity : 0.0803482068616\n", - " wavelet-LLL_MeanAbsoluteDeviation : 74.7767053453\n", - " wavelet-LLL_Energy : 51254818952.9\n", - " wavelet-LLL_RobustMeanAbsoluteDeviation : 55.382748498\n", - " wavelet-LLL_Median : 1525.11598422\n", - " wavelet-LLL_TotalEnergy : 203342482418.0\n", - " wavelet-LLL_Maximum : 1699.41321411\n", - " wavelet-LLL_RootMeanSquared : 3519.85352748\n", - " wavelet-LLL_90Percentile : 1630.40959406\n", - " wavelet-LLL_Minimum : 1101.29425235\n", - " wavelet-LLL_Entropy : 3.81186803941\n", - " wavelet-LLL_StandardDeviation : 91.2394927085\n", - " wavelet-LLL_Range : 598.118961757\n", - " wavelet-LLL_Variance : 8324.64502971\n", - " wavelet-LLL_10Percentile : 1408.32497169\n", - " wavelet-LLL_Kurtosis : 3.30562101996\n", - " wavelet-LLL_Mean : 1518.67080158\n", - " wavelet-HHH_InterquartileRange : 10.9326520923\n", - " wavelet-HHH_Skewness : -0.161302587938\n", - " wavelet-HHH_Uniformity : 0.486414452921\n", - " wavelet-HHH_MeanAbsoluteDeviation : 6.88788741453\n", - " wavelet-HHH_Energy : 16547974728.9\n", - " wavelet-HHH_RobustMeanAbsoluteDeviation : 4.54697240555\n", - " wavelet-HHH_Median : 0.0513263224352\n", - " wavelet-HHH_TotalEnergy : 65650534507.8\n", - " wavelet-HHH_Maximum : 59.9134724023\n", - " wavelet-HHH_RootMeanSquared : 1999.99847286\n", - " wavelet-HHH_90Percentile : 10.735432567\n", - " wavelet-HHH_Minimum : -58.0273629729\n", - " wavelet-HHH_Entropy : 1.11204797433\n", - " wavelet-HHH_StandardDeviation : 9.19944859408\n", - " wavelet-HHH_Range : 117.940835375\n", - " wavelet-HHH_Variance : 84.6298544352\n", - " wavelet-HHH_10Percentile : -10.6879329892\n", - " wavelet-HHH_Kurtosis : 6.35651924152\n", - " wavelet-HHH_Mean : -0.0226847341909\n", - " wavelet-HLL_InterquartileRange : 11.2243394312\n", - " wavelet-HLL_Skewness : 0.206551182686\n", - " wavelet-HLL_Uniformity : 0.48329527556\n", - " wavelet-HLL_MeanAbsoluteDeviation : 7.05950814555\n", - " wavelet-HLL_Energy : 16547158112.3\n", - " wavelet-HLL_RobustMeanAbsoluteDeviation : 4.68136820947\n", - " wavelet-HLL_Median : -0.196353934925\n", - " wavelet-HLL_TotalEnergy : 65647294757.0\n", - " wavelet-HLL_Maximum : 74.8590093632\n", - " wavelet-HLL_RootMeanSquared : 1999.94912386\n", - " wavelet-HLL_90Percentile : 10.9187592195\n", - " wavelet-HLL_Minimum : -51.9965483949\n", - " wavelet-HLL_Entropy : 1.13005458526\n", - " wavelet-HLL_StandardDeviation : 9.42063441708\n", - " wavelet-HLL_Range : 126.855557758\n", - " wavelet-HLL_Variance : 88.7483528203\n", - " wavelet-HLL_10Percentile : -11.1822447049\n", - " wavelet-HLL_Kurtosis : 6.44193937557\n", - " wavelet-HLL_Mean : -0.0730639190171\n", - " wavelet-HHL_InterquartileRange : 11.4885048182\n", - " wavelet-HHL_Skewness : 0.00173347436935\n", - " wavelet-HHL_Uniformity : 0.482438705425\n", - " wavelet-HHL_MeanAbsoluteDeviation : 7.09468688619\n", - " wavelet-HHL_Energy : 16548297953.4\n", - " wavelet-HHL_RobustMeanAbsoluteDeviation : 4.73465210477\n", - " wavelet-HHL_Median : 0.0731800928294\n", - " wavelet-HHL_TotalEnergy : 65651816831.6\n", - " wavelet-HHL_Maximum : 52.750446198\n", - " wavelet-HHL_RootMeanSquared : 2000.01800532\n", - " wavelet-HHL_90Percentile : 11.1168222752\n", - " wavelet-HHL_Minimum : -52.3423184536\n", - " wavelet-HHL_Entropy : 1.13367844225\n", - " wavelet-HHL_StandardDeviation : 9.38156375064\n", - " wavelet-HHL_Range : 105.092764652\n", - " wavelet-HHL_Variance : 88.0137384072\n", - " wavelet-HHL_10Percentile : -11.1708244039\n", - " wavelet-HHL_Kurtosis : 5.24887979579\n", - " wavelet-HHL_Mean : -0.00399803756091\n", - " wavelet-LLH_InterquartileRange : 51.7485179102\n", - " wavelet-LLH_Skewness : -0.514193775328\n", - " wavelet-LLH_Uniformity : 0.182181892142\n", - " wavelet-LLH_MeanAbsoluteDeviation : 31.7641825492\n", - " wavelet-LLH_Energy : 16405200866.5\n", - " wavelet-LLH_RobustMeanAbsoluteDeviation : 21.5262080555\n", - " wavelet-LLH_Median : -7.1948286426\n", - " wavelet-LLH_TotalEnergy : 65084109883.0\n", - " wavelet-LLH_Maximum : 167.364402698\n", - " wavelet-LLH_RootMeanSquared : 1991.35191339\n", - " wavelet-LLH_90Percentile : 38.2699123752\n", - " wavelet-LLH_Minimum : -209.872439978\n", - " wavelet-LLH_Entropy : 2.73966669562\n", - " wavelet-LLH_StandardDeviation : 41.4892105786\n", - " wavelet-LLH_Range : 377.236842676\n", - " wavelet-LLH_Variance : 1721.35459443\n", - " wavelet-LLH_10Percentile : -58.0702381448\n", - " wavelet-LLH_Kurtosis : 4.92884123547\n", - " wavelet-LLH_Mean : -9.08034105832\n", - " wavelet-HLH_InterquartileRange : 10.99821242\n", - " wavelet-HLH_Skewness : -0.216428246439\n", - " wavelet-HLH_Uniformity : 0.488298907219\n", - " wavelet-HLH_MeanAbsoluteDeviation : 6.77388628528\n", - " wavelet-HLH_Energy : 16547339501.6\n", - " wavelet-HLH_RobustMeanAbsoluteDeviation : 4.48592652835\n", - " wavelet-HLH_Median : -0.065165818915\n", - " wavelet-HLH_TotalEnergy : 65648014380.2\n", - " wavelet-HLH_Maximum : 54.031233446\n", - " wavelet-HLH_RootMeanSquared : 1999.96008551\n", - " wavelet-HLH_90Percentile : 10.5580383627\n", - " wavelet-HLH_Minimum : -59.7402845962\n", - " wavelet-HLH_Entropy : 1.09951676451\n", - " wavelet-HLH_StandardDeviation : 9.02836267104\n", - " wavelet-HLH_Range : 113.771518042\n", - " wavelet-HLH_Variance : 81.5113325198\n", - " wavelet-HLH_10Percentile : -10.5831673374\n", - " wavelet-HLH_Kurtosis : 6.47263183683\n", - " wavelet-HLH_Mean : -0.0602928335869\n", - " wavelet-LHL_InterquartileRange : 11.2243394312\n", - " wavelet-LHL_Skewness : 0.206551182686\n", - " wavelet-LHL_Uniformity : 0.48329527556\n", - " wavelet-LHL_MeanAbsoluteDeviation : 7.05950814555\n", - " wavelet-LHL_Energy : 16547158112.3\n", - " wavelet-LHL_RobustMeanAbsoluteDeviation : 4.68136820947\n", - " wavelet-LHL_Median : -0.196353934925\n", - " wavelet-LHL_TotalEnergy : 65647294757.0\n", - " wavelet-LHL_Maximum : 74.8590093632\n", - " wavelet-LHL_RootMeanSquared : 1999.94912386\n", - " wavelet-LHL_90Percentile : 10.9187592195\n", - " wavelet-LHL_Minimum : -51.9965483949\n", - " wavelet-LHL_Entropy : 1.13005458526\n", - " wavelet-LHL_StandardDeviation : 9.42063441708\n", - " wavelet-LHL_Range : 126.855557758\n", - " wavelet-LHL_Variance : 88.7483528203\n", - " wavelet-LHL_10Percentile : -11.1822447049\n", - " wavelet-LHL_Kurtosis : 6.44193937557\n", - " wavelet-LHL_Mean : -0.0730639190171\n", - " wavelet-LHH_InterquartileRange : 10.99821242\n", - " wavelet-LHH_Skewness : -0.216428246439\n", - " wavelet-LHH_Uniformity : 0.488298907219\n", - " wavelet-LHH_MeanAbsoluteDeviation : 6.77388628528\n", - " wavelet-LHH_Energy : 16547339501.6\n", - " wavelet-LHH_RobustMeanAbsoluteDeviation : 4.48592652835\n", - " wavelet-LHH_Median : -0.0651658189149\n", - " wavelet-LHH_TotalEnergy : 65648014380.2\n", - " wavelet-LHH_Maximum : 54.031233446\n", - " wavelet-LHH_RootMeanSquared : 1999.96008551\n", - " wavelet-LHH_90Percentile : 10.5580383627\n", - " wavelet-LHH_Minimum : -59.7402845962\n", - " wavelet-LHH_Entropy : 1.09951676451\n", - " wavelet-LHH_StandardDeviation : 9.02836267104\n", - " wavelet-LHH_Range : 113.771518042\n", - " wavelet-LHH_Variance : 81.5113325198\n", - " wavelet-LHH_10Percentile : -10.5831673374\n", - " wavelet-LHH_Kurtosis : 6.47263183683\n", - " wavelet-LHH_Mean : -0.0602928335869\n" + " wavelet-HLH_Range : 449.325547601\n", + " wavelet-HLH_MeanAbsoluteDeviation : 30.6419982661\n", + " wavelet-HLH_10Percentile : -48.7973652241\n", + " wavelet-HLH_Skewness : -0.109634215846\n", + " wavelet-HLH_InterquartileRange : 44.1166822627\n", + " wavelet-HLH_90Percentile : 48.8399961041\n", + " wavelet-HLH_Maximum : 195.252851267\n", + " wavelet-HLH_RootMeanSquared : 2000.3397095\n", + " wavelet-HLH_Median : -0.788893809524\n", + " wavelet-HLH_StandardDeviation : 42.5781351255\n", + " wavelet-HLH_Entropy : 2.75501833527\n", + " wavelet-HLH_Minimum : -254.072696334\n", + " wavelet-HLH_TotalEnergy : 65672938804.3\n", + " wavelet-HLH_Kurtosis : 5.96264006105\n", + " wavelet-HLH_RobustMeanAbsoluteDeviation : 18.8328798407\n", + " wavelet-HLH_Mean : -0.113489262994\n", + " wavelet-HLH_Variance : 1812.89759077\n", + " wavelet-HLH_Uniformity : 0.192969183516\n", + " wavelet-HLH_Energy : 16553621990.3\n", + " wavelet-LLH_Range : 1516.85940092\n", + " wavelet-LLH_MeanAbsoluteDeviation : 205.422769392\n", + " wavelet-LLH_10Percentile : -370.145218742\n", + " wavelet-LLH_Skewness : -0.525773815287\n", + " wavelet-LLH_InterquartileRange : 352.67245821\n", + " wavelet-LLH_90Percentile : 297.911610403\n", + " wavelet-LLH_Maximum : 733.850508276\n", + " wavelet-LLH_RootMeanSquared : 2025.7276121\n", + " wavelet-LLH_Median : 62.7368130339\n", + " wavelet-LLH_StandardDeviation : 252.406755064\n", + " wavelet-LLH_Entropy : 5.27888924024\n", + " wavelet-LLH_Minimum : -783.008892644\n", + " wavelet-LLH_TotalEnergy : 67350532534.4\n", + " wavelet-LLH_Kurtosis : 2.69674899921\n", + " wavelet-LLH_RobustMeanAbsoluteDeviation : 146.79059076\n", + " wavelet-LLH_Mean : 9.94109078503\n", + " wavelet-LLH_Variance : 63709.1700017\n", + " wavelet-LLH_Uniformity : 0.0304399667913\n", + " wavelet-LLH_Energy : 16976478846.8\n", + " wavelet-HLL_Range : 477.790085389\n", + " wavelet-HLL_MeanAbsoluteDeviation : 35.7539421526\n", + " wavelet-HLL_10Percentile : -62.825323625\n", + " wavelet-HLL_Skewness : -0.514166044802\n", + " wavelet-HLL_InterquartileRange : 53.6721220044\n", + " wavelet-HLL_90Percentile : 50.858801791\n", + " wavelet-HLL_Maximum : 186.246123128\n", + " wavelet-HLL_RootMeanSquared : 1995.50607096\n", + " wavelet-HLL_Median : -2.81720035764\n", + " wavelet-HLL_StandardDeviation : 48.2505612367\n", + " wavelet-HLL_Entropy : 2.95573220857\n", + " wavelet-HLL_Minimum : -291.543962261\n", + " wavelet-HLL_TotalEnergy : 65355936931.4\n", + " wavelet-HLL_Kurtosis : 5.09932613055\n", + " wavelet-HLL_RobustMeanAbsoluteDeviation : 22.7497539081\n", + " wavelet-HLL_Mean : -5.07735424173\n", + " wavelet-HLL_Variance : 2328.11665966\n", + " wavelet-HLL_Uniformity : 0.163587425574\n", + " wavelet-HLL_Energy : 16473718010.5\n", + " wavelet-LLL_Range : 1712.56194574\n", + " wavelet-LLL_MeanAbsoluteDeviation : 293.143995944\n", + " wavelet-LLL_10Percentile : 1812.68473489\n", + " wavelet-LLL_Skewness : 0.228846426465\n", + " wavelet-LLL_InterquartileRange : 550.594267427\n", + " wavelet-LLL_90Percentile : 2739.69052111\n", + " wavelet-LLL_Maximum : 3180.63918677\n", + " wavelet-LLL_RootMeanSquared : 4269.54365408\n", + " wavelet-LLL_Median : 2244.88673609\n", + " wavelet-LLL_StandardDeviation : 350.172190209\n", + " wavelet-LLL_Entropy : 5.78300489052\n", + " wavelet-LLL_Minimum : 1468.07724103\n", + " wavelet-LLL_TotalEnergy : 299186404755.0\n", + " wavelet-LLL_Kurtosis : 2.27365643067\n", + " wavelet-LLL_RobustMeanAbsoluteDeviation : 220.739697172\n", + " wavelet-LLL_Mean : 2255.1595095\n", + " wavelet-LLL_Variance : 122620.562796\n", + " wavelet-LLL_Uniformity : 0.0199077767278\n", + " wavelet-LLL_Energy : 75413385469.3\n", + " wavelet-HHL_Range : 197.650791431\n", + " wavelet-HHL_MeanAbsoluteDeviation : 14.4086961805\n", + " wavelet-HHL_10Percentile : -22.0062020433\n", + " wavelet-HHL_Skewness : 0.121890250304\n", + " wavelet-HHL_InterquartileRange : 22.3805938159\n", + " wavelet-HHL_90Percentile : 23.0361222477\n", + " wavelet-HHL_Maximum : 96.9275598214\n", + " wavelet-HHL_RootMeanSquared : 2000.43345645\n", + " wavelet-HHL_Median : 0.0829386441652\n", + " wavelet-HHL_StandardDeviation : 19.2752766031\n", + " wavelet-HHL_Entropy : 1.75422760497\n", + " wavelet-HHL_Minimum : -100.72323161\n", + " wavelet-HHL_TotalEnergy : 65679094540.6\n", + " wavelet-HHL_Kurtosis : 4.71302154772\n", + " wavelet-HHL_RobustMeanAbsoluteDeviation : 9.30098915932\n", + " wavelet-HHL_Mean : 0.340590352112\n", + " wavelet-HHL_Variance : 371.536288126\n", + " wavelet-HHL_Uniformity : 0.358701481744\n", + " wavelet-HHL_Energy : 16555173614.7\n", + " wavelet-LHH_Range : 604.053834439\n", + " wavelet-LHH_MeanAbsoluteDeviation : 41.6368845931\n", + " wavelet-LHH_10Percentile : -69.1252699081\n", + " wavelet-LHH_Skewness : 0.197382131363\n", + " wavelet-LHH_InterquartileRange : 61.7174514323\n", + " wavelet-LHH_90Percentile : 63.2151706827\n", + " wavelet-LHH_Maximum : 279.350380756\n", + " wavelet-LHH_RootMeanSquared : 1999.33891946\n", + " wavelet-LHH_Median : -1.94009209997\n", + " wavelet-LHH_StandardDeviation : 56.4734198287\n", + " wavelet-LHH_Entropy : 3.17918664471\n", + " wavelet-LHH_Minimum : -324.703453682\n", + " wavelet-LHH_TotalEnergy : 65607241581.1\n", + " wavelet-LHH_Kurtosis : 5.10438678184\n", + " wavelet-LHH_RobustMeanAbsoluteDeviation : 26.4723285429\n", + " wavelet-LHH_Mean : -1.45881510799\n", + " wavelet-LHH_Variance : 3189.24714715\n", + " wavelet-LHH_Uniformity : 0.140809788318\n", + " wavelet-LHH_Energy : 16537062247.2\n", + " wavelet-HHH_Range : 151.165663688\n", + " wavelet-HHH_MeanAbsoluteDeviation : 13.0759756862\n", + " wavelet-HHH_10Percentile : -21.0383130256\n", + " wavelet-HHH_Skewness : -0.0688112737237\n", + " wavelet-HHH_InterquartileRange : 20.3192746422\n", + " wavelet-HHH_90Percentile : 20.9238433344\n", + " wavelet-HHH_Maximum : 76.4099650187\n", + " wavelet-HHH_RootMeanSquared : 2000.23985683\n", + " wavelet-HHH_Median : 0.109117292789\n", + " wavelet-HHH_StandardDeviation : 17.3191286736\n", + " wavelet-HHH_Entropy : 1.62781167475\n", + " wavelet-HHH_Minimum : -74.7556986689\n", + " wavelet-HHH_TotalEnergy : 65666382462.8\n", + " wavelet-HHH_Kurtosis : 4.32455549814\n", + " wavelet-HHH_RobustMeanAbsoluteDeviation : 8.53027450692\n", + " wavelet-HHH_Mean : 0.164876359429\n", + " wavelet-HHH_Variance : 299.952218013\n", + " wavelet-HHH_Uniformity : 0.382919979814\n", + " wavelet-HHH_Energy : 16551969388.8\n", + " wavelet-LHL_Range : 641.465953025\n", + " wavelet-LHL_MeanAbsoluteDeviation : 48.9628822529\n", + " wavelet-LHL_10Percentile : -85.6408963111\n", + " wavelet-LHL_Skewness : -0.369538595422\n", + " wavelet-LHL_InterquartileRange : 71.4997770702\n", + " wavelet-LHL_90Percentile : 71.5796532291\n", + " wavelet-LHL_Maximum : 286.571219987\n", + " wavelet-LHL_RootMeanSquared : 1995.91519044\n", + " wavelet-LHL_Median : -4.09721168383\n", + " wavelet-LHL_StandardDeviation : 66.5740051389\n", + " wavelet-LHL_Entropy : 3.40407739189\n", + " wavelet-LHL_Minimum : -354.894733038\n", + " wavelet-LHL_TotalEnergy : 65382738281.1\n", + " wavelet-LHL_Kurtosis : 4.82046635261\n", + " wavelet-LHL_RobustMeanAbsoluteDeviation : 30.6452121553\n", + " wavelet-LHL_Mean : -5.19541075848\n", + " wavelet-LHL_Variance : 4432.09816024\n", + " wavelet-LHL_Uniformity : 0.121366697967\n", + " wavelet-LHL_Energy : 16480473600.0\n" ] } ], "source": [ "# Show result\n", - "for decompositionName, features in waveletFeatures.iteritems():\n", - " for (key, val) in features.iteritems():\n", + "for decompositionName, features in waveletFeatures.items():\n", + " for (key, val) in features.items():\n", " waveletFeatureName = '%s_%s' % (str(decompositionName), key)\n", - " print ' ', waveletFeatureName, ':', val" + " print(' ', waveletFeatureName, ':', val)" ] } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2.0 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.11" + "pygments_lexer": "ipython3", + "version": "3.5.2" } }, "nbformat": 4, diff --git a/bin/Notebooks/helloRadiomics.ipynb b/bin/Notebooks/helloRadiomics.ipynb index 8b0e6bea..5484f33c 100644 --- a/bin/Notebooks/helloRadiomics.ipynb +++ b/bin/Notebooks/helloRadiomics.ipynb @@ -24,6 +24,7 @@ }, "outputs": [], "source": [ + "from __future__ import print_function, unicode_literals, division, absolute_import\n", "import sys\n", "import os\n", "import logging\n", @@ -96,9 +97,9 @@ "maskName = os.path.join(dataDir, testCase + '_label.nrrd')\n", "\n", "if not os.path.exists(imageName):\n", - " print 'Error: problem finding input image', imageName\n", + " print('Error: problem finding input image', imageName)\n", "if not os.path.exists(maskName):\n", - " print 'Error: problem finding input labelmap', maskName" + " print('Error: problem finding input labelmap', maskName)" ] }, { @@ -152,7 +153,7 @@ "output_type": "stream", "text": [ "Enabled input images:\n", - "\toriginal\n" + "\tOriginal\n" ] } ], @@ -172,9 +173,9 @@ "\n", "# extractor.enableInputImages(wavelet={}, log={'sigma':[3.0]})\n", "\n", - "print \"Enabled input images:\"\n", + "print(\"Enabled input images:\")\n", "for imageType in extractor.inputImages.keys():\n", - " print '\\t' + imageType" + " print('\\t' + imageType)" ] }, { @@ -246,6 +247,7 @@ "\n", " :math:`entropy = -\\displaystyle\\sum^{N_l}_{i=1}{p(i)\\log_2\\big(p(i)+\\epsilon\\big)}`\n", "\n", + " Here, :math:`\\epsilon` is an arbitrarily small positive number (:math:`\\approx 2.2\\times10^{-16}`).\n", " Entropy specifies the uncertainty/randomness in the\n", " image values. It measures the average amount of\n", " information required to encode the image values.\n", @@ -391,13 +393,13 @@ } ], "source": [ - "print \"Active features:\"\n", - "for cls, features in extractor.enabledFeatures.iteritems():\n", + "print(\"Active features:\")\n", + "for cls, features in extractor.enabledFeatures.items():\n", " if len(features) == 0:\n", " features = extractor.getFeatureNames(cls)\n", " for f in features:\n", - " print f\n", - " print eval('extractor.featureClasses[\"%s\"].get%sFeatureValue.__doc__' % (cls, f))" + " print(f)\n", + " print(eval('extractor.featureClasses[\"%s\"].get%sFeatureValue.__doc__' % (cls, f)))" ] }, { @@ -421,62 +423,63 @@ "Calculating features\n", "\t\tComputing firstorder\n", "Computed general_info_BoundingBox: (162; 84; 11; 47; 70; 7)\n", - "Computed general_info_GeneralSettings: {'verbose': True; 'binWidth': 25; 'label': 1; 'interpolator': 'sitkBSpline'; 'resampledPixelSpacing': None; 'padDistance': 5}\n", + "Computed general_info_GeneralSettings: {'verbose': True; 'label': 1; 'resampledPixelSpacing': None; 'binWidth': 25; 'interpolator': 'sitkBSpline'; 'padDistance': 5}\n", "Computed general_info_ImageHash: 5c9ce3ca174f0f8324aa4d277e0fef82dc5ac566\n", "Computed general_info_ImageSpacing: (0.7812499999999999; 0.7812499999999999; 6.499999999999998)\n", - "Computed general_info_InputImages: {'original': {}}\n", + "Computed general_info_InputImages: {'Original': {}}\n", "Computed general_info_MaskHash: 9dc2c3137b31fd872997d92c9a92d5178126d9d3\n", - "Computed general_info_Version: v1.0.post11.dev0+g610dffc\n", + "Computed general_info_Version: v1.0.1.post6.dev0+g5b1e8bb\n", "Computed general_info_VolumeNum: 2\n", "Computed general_info_VoxelNum: 4137\n", - "Computed original_firstorder_InterquartileRange: 253.0\n", - "Computed original_firstorder_Skewness: 0.275650859086\n", - "Computed original_firstorder_Uniformity: 0.0451569635559\n", - "Computed original_firstorder_MeanAbsoluteDeviation: 133.447261953\n", - "Computed original_firstorder_Energy: 33122817481.0\n", + "Computed original_firstorder_Kurtosis: 2.18077293939\n", + "Computed original_firstorder_StandardDeviation: 156.611235894\n", + "Computed original_firstorder_10Percentile: 632.0\n", + "Computed original_firstorder_90Percentile: 1044.4\n", + "Computed original_firstorder_Minimum: 468.0\n", "Computed original_firstorder_RobustMeanAbsoluteDeviation: 103.00138343\n", - "Computed original_firstorder_Median: 812.0\n", - "Computed original_firstorder_TotalEnergy: 131407662126.0\n", + "Computed original_firstorder_Range: 798.0\n", "Computed original_firstorder_Maximum: 1266.0\n", + "Computed original_firstorder_Uniformity: 0.0451569635559\n", + "Computed original_firstorder_Mean: 825.235436307\n", + "Computed original_firstorder_TotalEnergy: 131407662126.0\n", + "Computed original_firstorder_MeanAbsoluteDeviation: 133.447261953\n", "Computed original_firstorder_RootMeanSquared: 2829.57282108\n", - "Computed original_firstorder_90Percentile: 1044.4\n", - "Computed original_firstorder_Minimum: 468.0\n", + "Computed original_firstorder_InterquartileRange: 253.0\n", "Computed original_firstorder_Entropy: 4.6019355539\n", - "Computed original_firstorder_StandardDeviation: 156.611235894\n", - "Computed original_firstorder_Range: 798.0\n", "Computed original_firstorder_Variance: 24527.0792084\n", - "Computed original_firstorder_10Percentile: 632.0\n", - "Computed original_firstorder_Kurtosis: 2.18077293939\n", - "Computed original_firstorder_Mean: 825.235436307\n" + "Computed original_firstorder_Energy: 33122817481.0\n", + "Computed original_firstorder_Skewness: 0.275650859086\n", + "Computed original_firstorder_Median: 812.0\n" ] } ], "source": [ - "print \"Calculating features\"\n", + "print(\"Calculating features\")\n", "featureVector = extractor.execute(imageName, maskName)\n", "\n", "for featureName in featureVector.keys():\n", - " print \"Computed %s: %s\" % (featureName, featureVector[featureName])" + " print(\"Computed %s: %s\" % (featureName, featureVector[featureName]))" ] } ], "metadata": { + "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 2", + "display_name": "Python 3", "language": "python", - "name": "python2" + "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.11" + "pygments_lexer": "ipython3", + "version": "3.5.2" } }, "nbformat": 4, diff --git a/bin/addClassToBaseline.py b/bin/addClassToBaseline.py index 2c9b0b33..92939803 100644 --- a/bin/addClassToBaseline.py +++ b/bin/addClassToBaseline.py @@ -34,17 +34,17 @@ def main(): if len(testCases) > 0: break if len(testCases) == 0: - print ("No baselinefiles containing testcases found, exiting...") + print("No baselinefiles containing testcases found, exiting...") exit(-1) newClasses = [cls for cls in featureClasses if not os.path.exists(os.path.join(baselineDir, 'baseline_%s.csv' % (cls)))] if len(newClasses) == 0: - print "No new classes to add, exiting..." + print("No new classes to add, exiting...") exit(0) - print "Adding new classes: ", newClasses + print("Adding new classes: ", newClasses) newBaseline = {} @@ -59,21 +59,21 @@ def main(): for cls in newClasses: newBaseline[cls] = {} - print "Computing new baseline" + print("Computing new baseline") for testCase in testCases: - print "\tCalculating test case", testCase + print("\tCalculating test case", testCase) imagePath = os.path.join(dataDir, testCase + '_image.nrrd') maskPath = os.path.join(dataDir, testCase + '_label.nrrd') image, mask = extractor.loadImage(imagePath, maskPath) if image is None or mask is None: - print "Error during loading of image/mask, testcase:", testCase + print("Error during loading of image/mask, testcase:", testCase) continue # testImage or mask not found / error during loading provenance = extractor.getProvenance(imagePath, maskPath, mask) image, mask, bb = imageoperations.cropToTumorMask(image, mask) for cls in newClasses: - print "\t\tCalculating class", cls + print("\t\tCalculating class", cls) newBaseline[cls][testCase] = collections.OrderedDict() newBaseline[cls][testCase]["Patient ID"] = testCase newBaseline[cls][testCase].update(provenance) @@ -82,7 +82,7 @@ def main(): featureClass.calculateFeatures() newBaseline[cls][testCase].update(featureClass.featureValues) - print "Writing new baseline" + print("Writing new baseline") for cls in newClasses: baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % (cls)) with open(baselineFile, 'wb') as baseline: diff --git a/bin/batchExamples/DatasetHierarchyReader.py b/bin/batchExamples/DatasetHierarchyReader.py index 35199dad..dba4063d 100644 --- a/bin/batchExamples/DatasetHierarchyReader.py +++ b/bin/batchExamples/DatasetHierarchyReader.py @@ -52,7 +52,7 @@ def readReconstructionsDirectory(self, studyDirectory, subfolders, create=False) recDirectory = os.path.join(studyDirectory, "Reconstructions") if not os.path.exists(recDirectory): os.mkdir(recDirectory) - print "\tCreated:", recDirectory + print("\tCreated:", recDirectory) return recDirectory, images @@ -67,7 +67,7 @@ def readSegmentationsDirectory(self, studyDirectory, subfolders, create=False): segDirectory = os.path.join(studyDirectory, "Segmentations") if not os.path.exists(segDirectory): os.mkdir(segDirectory) - print "\tCreated:", segDirectory + print("\tCreated:", segDirectory) return segDirectory, labels @@ -82,7 +82,7 @@ def readResourcesDirectory(self, studyDirectory, subfolders, create=False): resDirectory = os.path.join(studyDirectory, "Resources") if not os.path.exists(resDirectory): os.mkdir(resDirectory) - print "\tCreated:", resDirectory + print("\tCreated:", resDirectory) return resDirectory, resources @@ -102,7 +102,7 @@ def findImageAndLabelPair(self, imageFilepaths, maskFilepaths, keywordSettings): conditions. """ - keywordSettings = {k: [str(keyword.strip()) for keyword in v.split(',')] for (k, v) in keywordSettings.iteritems()} + keywordSettings = {k: [str(keyword.strip()) for keyword in v.split(',')] for (k, v) in keywordSettings.items()} matchedImages = [] for imageFilepath in imageFilepaths: @@ -117,14 +117,14 @@ def findImageAndLabelPair(self, imageFilepaths, maskFilepaths, keywordSettings): matchedMasks.append(maskFilepath) if len(matchedImages) < 1: - print "ERROR: No Images Matched" + print("ERROR: No Images Matched") elif len(matchedImages) > 1: - print "ERROR: Multiple Images Matched" + print("ERROR: Multiple Images Matched") if len(matchedMasks) < 1: - print "ERROR: No Masks Matched" + print("ERROR: No Masks Matched") elif len(matchedMasks) > 1: - print "ERROR: Multiple Masks Matched" + print("ERROR: Multiple Masks Matched") if (len(matchedImages) == 1) and (len(matchedMasks) == 1): return matchedImages[0], matchedMasks[0] diff --git a/bin/batchExamples/GenerateInputCSV_Datasethierarchy.py b/bin/batchExamples/GenerateInputCSV_Datasethierarchy.py index 022fafe2..257706e1 100644 --- a/bin/batchExamples/GenerateInputCSV_Datasethierarchy.py +++ b/bin/batchExamples/GenerateInputCSV_Datasethierarchy.py @@ -1,6 +1,7 @@ +from __future__ import print_function, unicode_literals, division, absolute_import import os import csv -from DatasetHierarchyReader import DatasetHierarchyReader +from .DatasetHierarchyReader import DatasetHierarchyReader def main(): @@ -15,12 +16,12 @@ def main(): keywordSettings['mask'] = 'label' keywordSettings['maskExclusion'] = '' - print "Scanning files..." + print("Scanning files...") datasetReader = DatasetHierarchyReader(inputDirectory, filetype=filetype) datasetHierarchyDict = datasetReader.ReadDatasetHierarchy() - print "Found %s patients, writing csv" % (str(len(datasetHierarchyDict.keys()))) + print("Found %s patients, writing csv" % (str(len(datasetHierarchyDict.keys())))) try: with open(outputFile, 'wb') as outFile: @@ -42,8 +43,8 @@ def main(): # ReaderName is not extracted using DatasetHierarchyReader, set it to 'N/A' cw.writerow([patientID, studyDate, 'N/A', imageFilepath, maskFilepath]) - except Exception, e: - print e + except Exception as e: + print(e) if __name__ == '__main__': diff --git a/bin/batchExamples/GenerateInputCSV_Filename.py b/bin/batchExamples/GenerateInputCSV_Filename.py index 35678c33..557f65b0 100644 --- a/bin/batchExamples/GenerateInputCSV_Filename.py +++ b/bin/batchExamples/GenerateInputCSV_Filename.py @@ -1,3 +1,4 @@ +from __future__ import print_function, unicode_literals, division, absolute_import import os import csv @@ -32,22 +33,22 @@ def main(): outputFile = DATA_ROOT_PATH + r"/Included/FileList.csv" filetype = ".nrrd" - print "Scanning files..." + print("Scanning files...") datasetHierarchyDict = scanpatients(inputDirectory, filetype) - print "Found %s patients, writing csv" % (len(datasetHierarchyDict.keys())) + print("Found %s patients, writing csv" % (len(datasetHierarchyDict.keys()))) try: with open(outputFile, 'wb') as outFile: cw = csv.writer(outFile, lineterminator='\n') - for patient, Studies in sorted(datasetHierarchyDict.iteritems(), key=lambda t: t[0]): - for Study, im_fileList in sorted(Studies['reconstructions'].iteritems(), key=lambda t: t[0]): + for patient, Studies in sorted(datasetHierarchyDict.items(), key=lambda t: t[0]): + for Study, im_fileList in sorted(Studies['reconstructions'].items(), key=lambda t: t[0]): for i_idx, im_file in enumerate(im_fileList): if Studies['segmentations'].has_key(Study): - for Reader, seg_fileList in sorted(Studies['segmentations'][Study].iteritems(), key=lambda t: t[0]): + for Reader, seg_fileList in sorted(Studies['segmentations'][Study].items(), key=lambda t: t[0]): for s_idx, seg_file in enumerate(sorted(seg_fileList)): i_name = Study @@ -57,8 +58,8 @@ def main(): if s_idx > 0: s_name += " (%s)" % (str(s_idx + 1)) cw.writerow([patient, i_name, s_name, im_file, seg_file]) - except Exception, e: - print e + except Exception as e: + print(e) def scanpatients(f, filetype): @@ -74,9 +75,9 @@ def scanpatients(f, filetype): outputDict[PtNo] = {'reconstructions': {}} outputDict[PtNo]['segmentations'] = {} - for SqKey, SqVal in SqDic.iteritems(): + for SqKey, SqVal in SqDic.items(): if ("ROI_" + SqVal) in fname: - for ReaderKey, ReaderVal in LabelDic.iteritems(): + for ReaderKey, ReaderVal in LabelDic.items(): if (ReaderKey + '_') in fname: if not outputDict[PtNo]['segmentations'].has_key(SqVal): outputDict[PtNo]['segmentations'][SqVal] = {} diff --git a/bin/batchExamples/batchprocessing.py b/bin/batchExamples/batchprocessing.py index 3483d7b7..a47ed776 100644 --- a/bin/batchExamples/batchprocessing.py +++ b/bin/batchExamples/batchprocessing.py @@ -33,7 +33,7 @@ def main(): radiomics.logger.handlers[0].setLevel(logging.WARNING) logging.info('Loading CSV') - print "Loading CSV" + print("Loading CSV") flists = [] try: @@ -43,8 +43,8 @@ def main(): except Exception: logging.error('CSV READ FAILED:\n%s', traceback.format_exc()) - print "Loading Done" - print ("Patients: " + str(len(flists))) + print("Loading Done") + print("Patients: " + str(len(flists))) kwargs = {} kwargs['binWidth'] = 25 @@ -60,7 +60,7 @@ def main(): # extractor.enableInputImages(wavelet= {'level': 2}) for idx, entry in enumerate(flists, start=1): - print "(%d/%d) Processing Patient: %s, Study: %s, Reader: %s" % (idx, len(flists), entry[0], entry[1], entry[2]) + print("(%d/%d) Processing Patient: %s, Study: %s, Reader: %s" % (idx, len(flists), entry[0], entry[1], entry[2])) logging.info("(%d/%d) Processing Patient: %s, Study: %s, Reader: %s", idx, len(flists), entry[0], entry[1], entry[2]) diff --git a/bin/helloFeatureClass.py b/bin/helloFeatureClass.py index 430920f2..931fee35 100644 --- a/bin/helloFeatureClass.py +++ b/bin/helloFeatureClass.py @@ -1,3 +1,4 @@ +from __future__ import print_function, unicode_literals, division, absolute_import import os import SimpleITK as sitk import numpy @@ -12,10 +13,10 @@ maskName = os.path.join(dataDir, testCase + '_label.nrrd') if not os.path.exists(imageName): - print 'Error: problem finding input image', imageName + print('Error: problem finding input image', imageName) exit() if not os.path.exists(maskName): - print 'Error: problem finding input image', maskName + print('Error: problem finding input image', maskName) exit() image = sitk.ReadImage(imageName) @@ -49,18 +50,18 @@ firstOrderFeatures.enableFeatureByName('Mean', True) # firstOrderFeatures.enableAllFeatures() -print 'Will calculate the following first order features: ' +print('Will calculate the following first order features: ') for f in firstOrderFeatures.enabledFeatures.keys(): - print ' ', f - print eval('firstOrderFeatures.get' + f + 'FeatureValue.__doc__') + print(' ', f) + print(eval('firstOrderFeatures.get' + f + 'FeatureValue.__doc__')) -print 'Calculating first order features...', +print('Calculating first order features...') firstOrderFeatures.calculateFeatures() -print 'done' +print('done') -print 'Calculated first order features: ' -for (key, val) in firstOrderFeatures.featureValues.iteritems(): - print ' ', key, ':', val +print('Calculated first order features: ') +for (key, val) in firstOrderFeatures.featureValues.items(): + print(' ', key, ':', val) # # Show Shape features @@ -68,18 +69,18 @@ shapeFeatures = shape.RadiomicsShape(image, mask, **kwargs) shapeFeatures.enableAllFeatures() -print 'Will calculate the following Shape features: ' +print('Will calculate the following Shape features: ') for f in shapeFeatures.enabledFeatures.keys(): - print ' ', f - print eval('shapeFeatures.get' + f + 'FeatureValue.__doc__') + print(' ', f) + print(eval('shapeFeatures.get' + f + 'FeatureValue.__doc__')) -print 'Calculating Shape features...', +print('Calculating Shape features...') shapeFeatures.calculateFeatures() -print 'done' +print('done') -print 'Calculated Shape features: ' -for (key, val) in shapeFeatures.featureValues.iteritems(): - print ' ', key, ':', val +print('Calculated Shape features: ') +for (key, val) in shapeFeatures.featureValues.items(): + print(' ', key, ':', val) # # Show GLCM features @@ -87,18 +88,18 @@ glcmFeatures = glcm.RadiomicsGLCM(image, mask, **kwargs) glcmFeatures.enableAllFeatures() -print 'Will calculate the following GLCM features: ' +print('Will calculate the following GLCM features: ') for f in glcmFeatures.enabledFeatures.keys(): - print ' ', f - print eval('glcmFeatures.get' + f + 'FeatureValue.__doc__') + print(' ', f) + print(eval('glcmFeatures.get' + f + 'FeatureValue.__doc__')) -print 'Calculating GLCM features...', +print('Calculating GLCM features...') glcmFeatures.calculateFeatures() -print 'done' +print('done') -print 'Calculated GLCM features: ' -for (key, val) in glcmFeatures.featureValues.iteritems(): - print ' ', key, ':', val +print('Calculated GLCM features: ') +for (key, val) in glcmFeatures.featureValues.items(): + print(' ', key, ':', val) # # Show GLRLM features @@ -106,18 +107,18 @@ glrlmFeatures = glrlm.RadiomicsGLRLM(image, mask, **kwargs) glrlmFeatures.enableAllFeatures() -print 'Will calculate the following GLRLM features: ' +print('Will calculate the following GLRLM features: ') for f in glrlmFeatures.enabledFeatures.keys(): - print ' ', f - print eval('glrlmFeatures.get' + f + 'FeatureValue.__doc__') + print(' ', f) + print(eval('glrlmFeatures.get' + f + 'FeatureValue.__doc__')) -print 'Calculating GLRLM features...', +print('Calculating GLRLM features...') glrlmFeatures.calculateFeatures() -print 'done' +print('done') -print 'Calculated GLRLM features: ' -for (key, val) in glrlmFeatures.featureValues.iteritems(): - print ' ', key, ':', val +print('Calculated GLRLM features: ') +for (key, val) in glrlmFeatures.featureValues.items(): + print(' ', key, ':', val) # # Show GLSZM features @@ -125,18 +126,18 @@ glszmFeatures = glszm.RadiomicsGLSZM(image, mask, **kwargs) glszmFeatures.enableAllFeatures() -print 'Will calculate the following GLSZM features: ' +print('Will calculate the following GLSZM features: ') for f in glszmFeatures.enabledFeatures.keys(): - print ' ', f - print eval('glszmFeatures.get' + f + 'FeatureValue.__doc__') + print(' ', f) + print(eval('glszmFeatures.get' + f + 'FeatureValue.__doc__')) -print 'Calculating GLSZM features...', +print('Calculating GLSZM features...') glszmFeatures.calculateFeatures() -print 'done' +print('done') -print 'Calculated GLSZM features: ' -for (key, val) in glszmFeatures.featureValues.iteritems(): - print ' ', key, ':', val +print('Calculated GLSZM features: ') +for (key, val) in glszmFeatures.featureValues.items(): + print(' ', key, ':', val) # # Show FirstOrder features, calculated on a LoG filtered image @@ -147,9 +148,9 @@ logFirstorderFeatures = firstorder.RadiomicsFirstOrder(logImage, mask, **inputKwargs) logFirstorderFeatures.enableAllFeatures() logFirstorderFeatures.calculateFeatures() - for (key, val) in logFirstorderFeatures.featureValues.iteritems(): + for (key, val) in logFirstorderFeatures.featureValues.items(): laplacianFeatureName = '%s_%s' % (inputImageName, key) - print ' ', laplacianFeatureName, ':', val + print(' ', laplacianFeatureName, ':', val) # # Show FirstOrder features, calculated on a wavelet filtered image # @@ -158,7 +159,7 @@ waveletFirstOrderFeaturs = firstorder.RadiomicsFirstOrder(decompositionImage, mask, **inputKwargs) waveletFirstOrderFeaturs.enableAllFeatures() waveletFirstOrderFeaturs.calculateFeatures() - print 'Calculated firstorder features with wavelet ', decompositionName - for (key, val) in waveletFirstOrderFeaturs.featureValues.iteritems(): + print('Calculated firstorder features with wavelet ', decompositionName) + for (key, val) in waveletFirstOrderFeaturs.featureValues.items(): waveletFeatureName = 'wavelet-%s_%s' % (str(decompositionName), key) - print ' ', waveletFeatureName, ':', val + print(' ', waveletFeatureName, ':', val) diff --git a/bin/helloRadiomics.py b/bin/helloRadiomics.py index e476f621..079362ae 100644 --- a/bin/helloRadiomics.py +++ b/bin/helloRadiomics.py @@ -10,10 +10,10 @@ maskName = os.path.join(dataDir, testCase + '_label.nrrd') if not os.path.exists(imageName): - print 'Error: problem finding input image', imageName + print('Error: problem finding input image', imageName) exit() if not os.path.exists(maskName): - print 'Error: problem finding input labelmap', maskName + print('Error: problem finding input labelmap', maskName) exit() # Define settings for signature calculation @@ -53,16 +53,16 @@ handler.setFormatter(formatter) logger.addHandler(handler) -print "Active features:" -for cls, features in extractor.enabledFeatures.iteritems(): +print("Active features:") +for cls, features in extractor.enabledFeatures.items(): if len(features) == 0: features = extractor.getFeatureNames(cls) for f in features: - print f - print eval('extractor.featureClasses["%s"].get%sFeatureValue.__doc__' % (cls, f)) + print(f) + print(eval('extractor.featureClasses["%s"].get%sFeatureValue.__doc__' % (cls, f))) -print "Calculating features" +print("Calculating features") featureVector = extractor.execute(imageName, maskName) for featureName in featureVector.keys(): - print "Computed %s: %s" % (featureName, featureVector[featureName]) + print("Computed %s: %s" % (featureName, featureVector[featureName])) diff --git a/data/schemaFuncs.py b/data/schemaFuncs.py index 80765fe2..708e027e 100644 --- a/data/schemaFuncs.py +++ b/data/schemaFuncs.py @@ -1,11 +1,12 @@ import pywt from radiomics.featureextractor import RadiomicsFeaturesExtractor +from radiomics import c_str_type featureClasses = RadiomicsFeaturesExtractor.getFeatureClasses() def checkWavelet(value, rule_obj, path): - if not isinstance(value, basestring): + if not isinstance(value, c_str_type): raise TypeError('Wavelet not expected type (str)') wavelist = pywt.wavelist() if value not in wavelist: @@ -16,7 +17,7 @@ def checkWavelet(value, rule_obj, path): def checkInterpolator(value, rule_obj, path): if value is None: return True - if isinstance(value, basestring): + if isinstance(value, c_str_type): enum = {'sitkNearestNeighbor', 'sitkLinear', 'sitkBSpline', @@ -40,7 +41,7 @@ def checkInterpolator(value, rule_obj, path): def checkWeighting(value, rule_obj, path): if value is None: return True - elif isinstance(value, basestring): + elif isinstance(value, c_str_type): enum = ['euclidean', 'manhattan', 'infinity', 'no_weighting'] if value not in enum: raise ValueError('WeightingNorm value "%s" not valid, possible values: %s' % (value, enum)) @@ -53,7 +54,7 @@ def checkFeatureClass(value, rule_obj, path): global featureClasses if value is None: raise TypeError('featureClass dictionary cannot be None value') - for className, features in value.iteritems(): + for className, features in value.items(): if className not in featureClasses.keys(): raise ValueError( 'Feature Class %s is not recognized. Available feature classes are %s' % (className, featureClasses.keys())) diff --git a/radiomics/__init__.py b/radiomics/__init__.py index cf0c7410..2425a82f 100644 --- a/radiomics/__init__.py +++ b/radiomics/__init__.py @@ -1,9 +1,18 @@ +from __future__ import print_function, unicode_literals, division, absolute_import import sys if sys.version_info < (2, 6, 0): raise ImportError("pyradiomics > 0.9.7 requires python 2.6 or later") in_py3 = sys.version_info[0] > 2 +if in_py3: + c_str_type = str + safe_xrange = lambda *x, **kwargs: iter(range(*x, **kwargs)) +else: + c_str_type = basestring + safe_xrange = xrange + + import pkgutil import inspect import os diff --git a/radiomics/_version.py b/radiomics/_version.py index c6491c5f..e406c78a 100644 --- a/radiomics/_version.py +++ b/radiomics/_version.py @@ -74,8 +74,8 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, assert isinstance(commands, list) p = None for c in commands: + dispcmd = str([c] + args) try: - dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, diff --git a/radiomics/featureextractor.py b/radiomics/featureextractor.py index 80ed4cc6..fc94d764 100644 --- a/radiomics/featureextractor.py +++ b/radiomics/featureextractor.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import print_function, unicode_literals, division, absolute_import import os import logging import collections @@ -6,7 +7,8 @@ import SimpleITK as sitk import pykwalify.core import radiomics -from radiomics import getFeatureClasses, getInputImageTypes, imageoperations, generalinfo +from radiomics import getFeatureClasses, getInputImageTypes, imageoperations, generalinfo, c_str_type + class RadiomicsFeaturesExtractor: @@ -71,7 +73,7 @@ def __init__(self, *args, **kwargs): self.inputImages = {} self.enabledFeatures = {} - if len(args) == 1 and isinstance(args[0], basestring): + if len(args) == 1 and isinstance(args[0], c_str_type): self.loadParams(args[0]) else: # Set default settings and update with and changed settings contained in kwargs @@ -317,15 +319,15 @@ def execute(self, imageFilepath, maskFilepath, label=None): for feature in enabledFeatures: shapeClass.enableFeatureByName(feature) - if self.kwargs['verbose']: print "\t\tComputing shape" + if self.kwargs['verbose']: print("\t\tComputing shape") shapeClass.calculateFeatures() - for (featureName, featureValue) in shapeClass.featureValues.iteritems(): + for (featureName, featureValue) in shapeClass.featureValues.items(): newFeatureName = "original_shape_%s" % (featureName) featureVector[newFeatureName] = featureValue # Make generators for all enabled input image types imageGenerators = [] - for imageType, customKwargs in self.inputImages.iteritems(): + for imageType, customKwargs in self.inputImages.items(): args = self.kwargs.copy() args.update(customKwargs) self.logger.info("Applying filter: '%s' with settings: %s" % (imageType, str(args))) @@ -350,22 +352,22 @@ def loadImage(self, ImageFilePath, MaskFilePath): If resampling is enabled, both image and mask are resampled and cropped to the tumormask (with additional padding as specified in padDistance) after assignment of image and mask. """ - if isinstance(ImageFilePath, basestring) and os.path.exists(ImageFilePath): + if isinstance(ImageFilePath, c_str_type) and os.path.exists(ImageFilePath): image = sitk.ReadImage(ImageFilePath) elif isinstance(ImageFilePath, sitk.SimpleITK.Image): image = ImageFilePath else: self.logger.warning('Error reading image Filepath or SimpleITK object') - if self.kwargs['verbose']: print "Error reading image Filepath or SimpleITK object" + if self.kwargs['verbose']: print("Error reading image Filepath or SimpleITK object") image = None - if isinstance(MaskFilePath, basestring) and os.path.exists(MaskFilePath): + if isinstance(MaskFilePath, c_str_type) and os.path.exists(MaskFilePath): mask = sitk.ReadImage(MaskFilePath) elif isinstance(ImageFilePath, sitk.SimpleITK.Image): mask = MaskFilePath else: self.logger.warning('Error reading mask Filepath or SimpleITK object') - if self.kwargs['verbose']: print "Error reading mask Filepath or SimpleITK object" + if self.kwargs['verbose']: print("Error reading mask Filepath or SimpleITK object") mask = None if self.kwargs['interpolator'] is not None and self.kwargs['resampledPixelSpacing'] is not None: @@ -385,7 +387,7 @@ def getProvenance(self, imageFilepath, maskFilepath, mask): """ provenanceVector = collections.OrderedDict() generalinfoClass = generalinfo.GeneralInfo(imageFilepath, maskFilepath, mask, self.kwargs, self.inputImages) - for k, v in generalinfoClass.execute().iteritems(): + for k, v in generalinfoClass.execute().items(): provenanceVector['general_info_%s' % (k)] = v return provenanceVector @@ -400,7 +402,7 @@ def computeFeatures(self, image, mask, inputImageName, **kwargs): featureVector = collections.OrderedDict() # Calculate feature classes - for featureClassName, enabledFeatures in self.enabledFeatures.iteritems(): + for featureClassName, enabledFeatures in self.enabledFeatures.items(): # Handle calculation of shape features separately if featureClassName == 'shape': continue @@ -414,9 +416,9 @@ def computeFeatures(self, image, mask, inputImageName, **kwargs): for feature in enabledFeatures: featureClass.enableFeatureByName(feature) - if self.kwargs['verbose']: print "\t\tComputing %s" % (featureClassName) + if self.kwargs['verbose']: print("\t\tComputing %s" % (featureClassName)) featureClass.calculateFeatures() - for (featureName, featureValue) in featureClass.featureValues.iteritems(): + for (featureName, featureValue) in featureClass.featureValues.items(): newFeatureName = "%s_%s_%s" % (inputImageName, featureClassName, featureName) featureVector[newFeatureName] = featureValue diff --git a/radiomics/generalinfo.py b/radiomics/generalinfo.py index 04e966de..485ab35a 100644 --- a/radiomics/generalinfo.py +++ b/radiomics/generalinfo.py @@ -3,13 +3,14 @@ import numpy import SimpleITK as sitk import radiomics +from radiomics import c_str_type class GeneralInfo(): def __init__(self, imagePath, maskPath, resampledMask, kwargs, inputImages): self.logger = logging.getLogger(self.__module__) - if isinstance(imagePath, basestring): + if isinstance(imagePath, c_str_type): self.image = sitk.ReadImage(imagePath) elif isinstance(imagePath, sitk.Image): self.image = imagePath @@ -17,7 +18,7 @@ def __init__(self, imagePath, maskPath, resampledMask, kwargs, inputImages): self.logger.warning('Error reading image Filepath or SimpleITK object') self.image = None - if isinstance(maskPath, basestring): + if isinstance(maskPath, c_str_type): self.mask = sitk.ReadImage(maskPath) elif isinstance(maskPath, sitk.Image): self.mask = maskPath diff --git a/radiomics/glcm.py b/radiomics/glcm.py index 489027df..c8700591 100644 --- a/radiomics/glcm.py +++ b/radiomics/glcm.py @@ -1,6 +1,6 @@ import numpy import collections -from radiomics import base, imageoperations +from radiomics import base, imageoperations, safe_xrange import SimpleITK as sitk from tqdm import trange @@ -147,7 +147,7 @@ def _calculateGLCM(self): if self.verbose: bar = trange(Ng, desc='calculate GLCM') # iterate over gray levels for center voxel - for i in xrange(1, Ng + 1): + for i in safe_xrange(1, Ng + 1): # give some progress if self.verbose: bar.update() @@ -155,7 +155,7 @@ def _calculateGLCM(self): i_indices = numpy.where(self.matrix == i) # iterate over gray levels for neighbouring voxel - for j in xrange(1, Ng + 1): + for j in safe_xrange(1, Ng + 1): # get the indices to all voxels which have the current gray level j j_indices = set(zip(*numpy.where(self.matrix == j))) diff --git a/radiomics/glrlm.py b/radiomics/glrlm.py index 14ff999e..b3380dd9 100644 --- a/radiomics/glrlm.py +++ b/radiomics/glrlm.py @@ -1,7 +1,7 @@ from itertools import chain import numpy import SimpleITK as sitk -from radiomics import base, imageoperations +from radiomics import base, imageoperations, safe_xrange class RadiomicsGLRLM(base.RadiomicsFeaturesBase): @@ -119,17 +119,16 @@ def _calculateGLRLM(self): d2 = movingDims[1] direction = numpy.where(angle < 0, -1, 1) diags = chain.from_iterable([self.matrix[::direction[0], ::direction[1], ::direction[2]].diagonal(a, d1, d2) - for a in xrange(-self.matrix.shape[d1] + 1, self.matrix.shape[d2])]) + for a in safe_xrange(-self.matrix.shape[d1] + 1, self.matrix.shape[d2])]) else: # movement in 3 dimensions, e.g. angle (1, 1, 1) diags = [] direction = numpy.where(angle < 0, -1, 1) for h in [self.matrix[::direction[0], ::direction[1], ::direction[2]].diagonal(a, 0, 1) - for a in xrange(-self.matrix.shape[0] + 1, self.matrix.shape[1])]: - diags.extend([h.diagonal(b, 0, 1) for b in xrange(-h.shape[0] + 1, h.shape[1])]) - - matrixDiagonals.append(filter(lambda diag: numpy.nonzero(diag != padVal)[0].size > 0, diags)) + for a in safe_xrange(-self.matrix.shape[0] + 1, self.matrix.shape[1])]: + diags.extend([h.diagonal(b, 0, 1) for b in safe_xrange(-h.shape[0] + 1, h.shape[1])]) + matrixDiagonals.append(filter(lambda diag: numpy.any(diag != padVal), diags)) P_glrlm = numpy.zeros((Ng, Nr, int(len(matrixDiagonals)))) # Run-Length Encoding (rle) for the list of diagonals @@ -138,18 +137,17 @@ def _calculateGLRLM(self): P = P_glrlm[:, :, angle_idx] # Check whether delineation is 2D for current angle (all diagonals contain 0 or 1 non-pad value) isMultiElement = False - for d in angle: - if numpy.where(d != padVal)[0].shape[0] > 1: + for diagonal in angle: + if not isMultiElement and numpy.sum(diagonal != padVal) > 1: isMultiElement = True - break - if isMultiElement: - for diagonal in angle: - pos, = numpy.where(numpy.diff(diagonal) != 0) - pos = numpy.concatenate(([0], pos + 1, [len(diagonal)])) - rle = zip([int(n) for n in diagonal[pos[:-1]]], pos[1:] - pos[:-1]) - for level, run_length in rle: - if level != padVal: - P[level - 1, run_length - 1] += 1 + pos, = numpy.where(numpy.diff(diagonal) != 0) + pos = numpy.concatenate(([0], pos + 1, [len(diagonal)])) + rle = zip([int(n) for n in diagonal[pos[:-1]]], pos[1:] - pos[:-1]) + for level, run_length in rle: + if level != padVal: + P[level - 1, run_length - 1] += 1 + if not isMultiElement: + P[:] = 0 # Crop gray-level axis of GLRLMs to between minimum and maximum observed gray-levels # Crop run-length axis of GLRLMs up to maximum observed run-length diff --git a/radiomics/glszm.py b/radiomics/glszm.py index b4539421..755701f0 100644 --- a/radiomics/glszm.py +++ b/radiomics/glszm.py @@ -1,10 +1,12 @@ +import sys import numpy import SimpleITK as sitk -from radiomics import base, imageoperations +from radiomics import base, imageoperations, safe_xrange import pdb from tqdm import trange + class RadiomicsGLSZM(base.RadiomicsFeaturesBase): r""" A Gray Level Size Zone (GLSZM) quantifies gray level zones in an image. @@ -87,7 +89,7 @@ def _calculateGLSZM(self): if self.verbose: bar = trange(numGrayLevels - 1, desc='calculate GLSZM') - for i in xrange(1, numGrayLevels): + for i in safe_xrange(1, numGrayLevels): # give some progress if self.verbose: bar.update() diff --git a/radiomics/imageoperations.py b/radiomics/imageoperations.py index 2db38b12..2f04c9a3 100644 --- a/radiomics/imageoperations.py +++ b/radiomics/imageoperations.py @@ -4,6 +4,7 @@ import SimpleITK as sitk import numpy import pywt +from radiomics import safe_xrange logger = logging.getLogger(__name__) @@ -64,11 +65,11 @@ def generateAngles(size, maxDistance=1): angles = [] - for z in xrange(1, maxDistance + 1): + for z in safe_xrange(1, maxDistance + 1): angles.append((0, 0, z)) - for y in xrange(-maxDistance, maxDistance + 1): + for y in safe_xrange(-maxDistance, maxDistance + 1): angles.append((0, z, y)) - for x in xrange(-maxDistance, maxDistance + 1): + for x in safe_xrange(-maxDistance, maxDistance + 1): angles.append((z, y, x)) angles = numpy.array(angles) @@ -119,8 +120,13 @@ def cropToTumorMask(imageNode, maskNode, label=1, boundingBox=None): # Crop Image logger.debug('Cropping to size %s', (boundingBox[1::2] - boundingBox[0::2]) + 1) cif = sitk.CropImageFilter() - cif.SetLowerBoundaryCropSize(ijkMinBounds) - cif.SetUpperBoundaryCropSize(ijkMaxBounds) + try: + cif.SetLowerBoundaryCropSize(ijkMinBounds) + cif.SetUpperBoundaryCropSize(ijkMaxBounds) + except TypeError: + # newer versions of SITK/python want a tuple or list + cif.SetLowerBoundaryCropSize(ijkMinBounds.tolist()) + cif.SetUpperBoundaryCropSize(ijkMaxBounds.tolist()) croppedImageNode = cif.Execute(imageNode) croppedMaskNode = cif.Execute(maskNode) @@ -274,14 +280,14 @@ def getLoGImage(inputImage, **kwargs): if numpy.min(size) < 4: logger.warning('Image too small to apply LoG filter, size: %s', size) - if kwargs.get('verbose', False): print 'Image too small to apply LoG filter' + if kwargs.get('verbose', False): print('Image too small to apply LoG filter') return sigmaValues = kwargs.get('sigma', numpy.arange(5., 0., -.5)) for sigma in sigmaValues: logger.debug('Computing LoG with sigma %g', sigma) - if kwargs.get('verbose', False): print "\tComputing LoG with sigma %g" % (sigma) + if kwargs.get('verbose', False): print("\tComputing LoG with sigma %g" % (sigma)) if sigma > 0.0: if numpy.all(size >= numpy.ceil(sigma / spacing) + 1): @@ -334,7 +340,7 @@ def getWaveletImage(inputImage, **kwargs): for idx, wl in enumerate(ret, start=1): for decompositionName, decompositionImage in wl.items(): logger.debug('Computing Wavelet %s', decompositionName) - if kwargs.get('verbose', False): print "\tComputing Wavelet %s" % (decompositionName) + if kwargs.get('verbose', False): print("\tComputing Wavelet %s" % (decompositionName)) if idx == 1: inputImageName = 'wavelet-%s' % (decompositionName) @@ -391,7 +397,7 @@ def _swt3(inputImage, wavelet="coif1", level=1, start_level=0): 'LHH': LHH, 'LHL': LHL, 'LLH': LLH} - for decName, decImage in dec.iteritems(): + for decName, decImage in dec.items(): decTemp = decImage.copy() decTemp = numpy.resize(decTemp, original_shape) sitkImage = sitk.GetImageFromArray(decTemp) diff --git a/radiomics/scripts/commandline.py b/radiomics/scripts/commandline.py index 7a4ac506..ccff36d1 100644 --- a/radiomics/scripts/commandline.py +++ b/radiomics/scripts/commandline.py @@ -71,7 +71,7 @@ def main(): json.dump(featureVector, args.out) args.out.write('\n') else: - for k, v in featureVector.iteritems(): + for k, v in featureVector.items(): args.out.write('%s: %s\n' % (k, v)) except Exception: logging.error('FEATURE EXTRACTION FAILED:\n%s', traceback.format_exc()) diff --git a/radiomics/shape.py b/radiomics/shape.py index 23b9b152..8ca1854e 100644 --- a/radiomics/shape.py +++ b/radiomics/shape.py @@ -1,7 +1,7 @@ import numpy import operator import collections -from radiomics import base, imageoperations +from radiomics import base, imageoperations, safe_xrange import SimpleITK as sitk @@ -27,8 +27,13 @@ def __init__(self, inputImage, inputMask, **kwargs): cpif = sitk.ConstantPadImageFilter() padding = numpy.tile(1, 3) - cpif.SetPadLowerBound(padding) - cpif.SetPadUpperBound(padding) + try: + cpif.SetPadLowerBound(padding) + cpif.SetPadUpperBound(padding) + except TypeError: + # newer versions of SITK/python want a tuple or list + cpif.SetPadLowerBound(padding.tolist()) + cpif.SetPadUpperBound(padding.tolist()) self.inputMask = cpif.Execute(self.inputMask) @@ -56,9 +61,9 @@ def _calculateSurfaceArea(self): S_A = 0.0 # iterate over all voxels which may border segmentation or are a part of it - for v_z in xrange(minBounds[0] - 1, maxBounds[0] + 1): - for v_y in xrange(minBounds[1] - 1, maxBounds[1] + 1): - for v_x in xrange(minBounds[2] - 1, maxBounds[2] + 1): + for v_z in safe_xrange(minBounds[0] - 1, maxBounds[0] + 1): + for v_y in safe_xrange(minBounds[1] - 1, maxBounds[1] + 1): + for v_x in safe_xrange(minBounds[2] - 1, maxBounds[2] + 1): # indices to corners of current sampling cube gridCell = gridAngles + [v_z, v_y, v_x] @@ -114,7 +119,7 @@ def _calculateSurfaceArea(self): def _getMaximum2Ddiameter(self, dim): otherDims = tuple(set([0, 1, 2]) - set([dim])) - a = numpy.array(zip(*self.matrixCoordinates)) + a = numpy.array(list(zip(*self.matrixCoordinates))) maxDiameter = 0 # Check maximum diameter in every slice, retain the overall maximum diff --git a/tests/testUtils.py b/tests/testUtils.py index be866044..c8ae445b 100644 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,3 +1,4 @@ +from __future__ import print_function, unicode_literals, division, absolute_import import SimpleITK as sitk import sys, os import ast @@ -6,11 +7,12 @@ import math import numpy from nose_parameterized import parameterized -from radiomics import imageoperations +from radiomics import imageoperations, in_py3 # Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func logger = logging.getLogger('testUtils') + def custom_name_func(testcase_func, param_num, param): """ A custom test name function that will ensure that the tests are run such that they're batched with all tests for a @@ -37,10 +39,10 @@ def custom_name_func(testcase_func, param_num, param): logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num), testcase_func.__name__, param.args) - return "%s_%s" % ( + return str("%s_%s" % ( testcase_func.__name__, parameterized.to_safe_name("_".join(str(x) for x in param.args)), - ) + )) class RadiomicsTestUtils: @@ -49,6 +51,7 @@ class RadiomicsTestUtils: It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated by the test. """ + def __init__(self): global logger @@ -104,14 +107,14 @@ class or test case is changed, function returns True. self._featureClassName = className # Check if test settings have changed - if cmp(self._kwargs, self.getBaselineDict(className, testCase)) != 0: + if self._kwargs != self.getBaselineDict(className, testCase): self._kwargs = self.getBaselineDict(className, testCase) self._testCase = None # forces image to be reloaded (as settings have changed) # Next, set testCase if necessary if self._testCase != testCase: - imageName = os.path.join(self._dataDir, testCase + '_image.nrrd') - maskName = os.path.join(self._dataDir, testCase + '_label.nrrd') + imageName = str(os.path.join(self._dataDir, testCase + '_image.nrrd')) + maskName = str(os.path.join(self._dataDir, testCase + '_label.nrrd')) self._logger.info("Reading the image and mask for test case %s", testCase) self._image = sitk.ReadImage(imageName) @@ -155,7 +158,7 @@ def getTestCases(self): """ Return all the test cases for which there are baseline information. """ - return self._baseline[self._baseline.keys()[0]].keys() + return self._baseline[list(self._baseline.keys())[0]].keys() def getFeatureClasses(self): """ @@ -175,9 +178,9 @@ def readBaselineFiles(self): cls = baselineFile[9:-4] self._logger.debug('Reading baseline for class %s', cls) self._baseline[cls] = {} - with open(os.path.join(self._baselineDir, baselineFile), 'rb') as baselineReader: + with open(os.path.join(self._baselineDir, baselineFile), 'r' if in_py3 else 'rb') as baselineReader: csvReader = csv.reader(baselineReader) - headers = csvReader.next() + headers = next(csvReader) for testRow in csvReader: self._baseline[cls][testRow[0]] = {} for val_idx, val in enumerate(testRow[1:], start=1): @@ -187,21 +190,21 @@ def checkResult(self, featureName, value): """ Use utility methods to get and test the results against the expected baseline value for this key. """ - + longName = '%s_%s' % (self._featureClassName, featureName) if value is None: - self._diffs[self._testCase][featureName] = None - self._results[self._testCase][featureName] = None - assert (value != None) + self._diffs[self._testCase][longName] = None + self._results[self._testCase][longName] = None + assert (value is not None) if math.isnan(value): - self._diffs[self._testCase][featureName] = numpy.nan - self._results[self._testCase][featureName] = numpy.nan + self._diffs[self._testCase][longName] = numpy.nan + self._results[self._testCase][longName] = numpy.nan assert (not math.isnan(value)) # save the result using the baseline class and feature names self._logger.debug('checkResults: featureName = %s', featureName) - self._results[self._testCase][featureName] = value + self._results[self._testCase][longName] = value assert featureName in self._baseline[self._featureClassName][self._testCase] baselineValue = float(self._baseline[self._featureClassName][self._testCase][featureName]) @@ -217,7 +220,7 @@ def checkResult(self, featureName, value): percentDiff = abs(1.0 - (value / baselineValue)) # save the difference - self._diffs[self._testCase][featureName] = percentDiff + self._diffs[self._testCase][longName] = percentDiff # check for a less than three percent difference if (percentDiff >= 0.03): @@ -241,10 +244,10 @@ def writeCSV(self, data, fileName): {'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}} """ - csvFile = open(fileName, 'wb') + csvFile = open(fileName, 'w') csvFileWriter = csv.writer(csvFile) # get the headers from the first row - header = sorted(data[data.keys()[0]].keys()) + header = list(data[list(data.keys())[0]].keys()) header = ['testCase'] + header csvFileWriter.writerow(header) for testCase in sorted(data.keys()): @@ -252,7 +255,7 @@ def writeCSV(self, data, fileName): thisCase['testCase'] = testCase row = [] for h in header: - row = row + [thisCase[h]] + row = row + [thisCase.get(h, "N/A")] csvFileWriter.writerow(row) csvFile.close() self._logger.info('Wrote to file %s', fileName) diff --git a/tests/test_docstrings.py b/tests/test_docstrings.py index 12353f8e..a61f80d5 100644 --- a/tests/test_docstrings.py +++ b/tests/test_docstrings.py @@ -12,31 +12,31 @@ def setup_module(module): # runs before anything in this file - print ("") # this is to get a newline after the dots + print("") # this is to get a newline after the dots return class TestDocStrings: def setup(self): # setup before each test method - print ("") # this is to get a newline after the dots + print("") # this is to get a newline after the dots @classmethod def setup_class(self): # called before any methods in this class - print ("") # this is to get a newline after the dots + print("") # this is to get a newline after the dots @classmethod def teardown_class(self): # run after any methods in this class - print ("") # this is to get a newline after the dots + print("") # this is to get a newline after the dots def generate_scenarios(): global featureClasses - for featureClassName, featureClass in featureClasses.iteritems(): + for featureClassName, featureClass in featureClasses.items(): logging.info('generate_scenarios %s', featureClassName) doc = featureClass.__doc__ - assert (doc != None) + assert (doc is not None) featureNames = featureClass.getFeatureNames() for f in featureNames: @@ -49,4 +49,4 @@ def test_class(self, featureClassName, featureName): features = featureClasses[featureClassName] doc = eval('features.get' + featureName + 'FeatureValue.__doc__') logging.info('%s', doc) - assert (doc != None) + assert (doc is not None) diff --git a/tests/test_features.py b/tests/test_features.py index deda4ac2..3050c126 100644 --- a/tests/test_features.py +++ b/tests/test_features.py @@ -1,6 +1,7 @@ # to run this test, from directory above: # setenv PYTHONPATH /path/to/pyradiomics/radiomics # nosetests --nocapture -v tests/test_features.py +from __future__ import print_function, unicode_literals, division from radiomics.featureextractor import RadiomicsFeaturesExtractor from testUtils import RadiomicsTestUtils, custom_name_func @@ -26,7 +27,7 @@ def generate_scenarios(): for testCase in testCases: for featureClassName in featureClassNames: featureNames = extractor.featureClasses[featureClassName].getFeatureNames() - assert (featureNames != None) + assert (featureNames is not None) assert (len(featureNames) > 0) logging.debug('generate_scenarios: featureNames = %s', featureNames) for featureName in featureNames: @@ -52,7 +53,7 @@ def test_scenario(self, testCase, featureClassName, featureName): logging.debug('Init %s' % (featureClassName)) featureClass = extractor.featureClasses[featureClassName](testImage, testMask, **testUtils.getKwargs()) - assert (featureClass != None) + assert featureClass is not None featureClass.disableAllFeatures() featureClass.enableFeatureByName(featureName) @@ -65,15 +66,15 @@ def test_scenario(self, testCase, featureClassName, featureName): def teardown_module(): print("") res = testUtils.getResults() - print 'Results:' - print res + print('Results:') + print(res) resultsFile = os.path.join(testUtils.getDataDir(), 'PyradiomicsFeatures.csv') testUtils.writeCSV(res, resultsFile) diff = testUtils.getDiffs() - print 'Differences from baseline:' - print diff + print('Differences from baseline:') + print(diff) diffFile = os.path.join(testUtils.getDataDir(), 'Baseline2PyradiomicsFeaturesDiff.csv') testUtils.writeCSV(diff, diffFile) logging.info( - "Wrote calculated features to %s, and the differences between the matlab features and the pyradiomics ones to %s.", + "Wrote calculated features to %s, and the differences between the baseline features and the calculated ones to %s.", resultsFile, diffFile) diff --git a/versioneer.py b/versioneer.py index f250cde5..0116069f 100644 --- a/versioneer.py +++ b/versioneer.py @@ -339,9 +339,20 @@ def get_config_from_root(root): # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() + try: + # python 2 + parser = configparser.SafeConfigParser() + except: + # python 3.2+ + parser = configparser.ConfigParser() + with open(setup_cfg, "r") as f: - parser.readfp(f) + try: + # python 2 + parser.readfp(f) + except: + # python 3.2+ + parser.read_file(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name):