Browse Source

Converted hash.rs into a module. Refactored lib.rs and the tests within.

develop
Drew Short 8 years ago
parent
commit
188fbe9101
  1. 2
      Cargo.toml
  2. 54
      src/cache.rs
  3. 484
      src/hash.rs
  4. 61
      src/hash/ahash.rs
  5. 62
      src/hash/dhash.rs
  6. 197
      src/hash/mod.rs
  7. 219
      src/hash/phash.rs
  8. 693
      src/lib.rs

2
Cargo.toml

@ -1,6 +1,6 @@
[package]
name = "pihash"
version = "0.2.6"
version = "0.2.7"
authors = ["Drew Short <warrick@sothr.com>"]
description = "A simple library for generating perceptual hashes for images and comparing images based on their perceptual hashes."
repository = "https://github.com/warricksothr/Perceptual-Image-Hashing/"

54
src/cache.rs

@ -3,12 +3,16 @@
// Licensed under the MIT license<LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
use super::image;
use super::image::ImageBuffer;
use super::sha1::Sha1;
use super::flate2::Compression;
use super::flate2::write::ZlibEncoder;
use super::flate2::read::ZlibDecoder;
extern crate complex;
extern crate flate2;
extern crate image;
extern crate sha1;
use self::image::ImageBuffer;
use self::sha1::Sha1;
use self::flate2::Compression;
use self::flate2::write::ZlibEncoder;
use self::flate2::read::ZlibDecoder;
use std::str::FromStr;
use std::path::Path;
use std::fs::{File, create_dir_all, remove_dir_all};
@ -69,26 +73,30 @@ impl<'a> Cache<'a> {
Ok(mut file) => {
// Metadata file exists, compare them
let mut loaded_metadata_string = String::new();
let _ = file.read_to_string(&mut loaded_metadata_string);
let loaded_metadata: CacheMetadata = match json::decode(&loaded_metadata_string) {
Ok(data) => data,
Err(_) => CacheMetadata { cache_version: 0 },
};
match file.read_to_string(&mut loaded_metadata_string) {
Ok(_) => {
let loaded_metadata: CacheMetadata = match json::decode(&loaded_metadata_string) {
Ok(data) => data,
Err(_) => CacheMetadata { cache_version: 0 },
};
// If they match, continue
if current_metadata != loaded_metadata {
// If they don't wipe the cache to start new
match remove_dir_all(self.cache_dir) {
Ok(_) => {
match create_dir_all(self.cache_dir) {
Ok(_) => (),
// If they match, continue
if current_metadata != loaded_metadata {
// If they don't wipe the cache to start new
match remove_dir_all(self.cache_dir) {
Ok(_) => {
match create_dir_all(self.cache_dir) {
Ok(_) => (),
Err(e) => println!("Error: {}", e),
}
},
Err(e) => println!("Error: {}", e),
}
},
Err(e) => println!("Error: {}", e),
};
};
};
},
Err(e) => println!("Error: {}", e),
};
}
},
// Metadata file doesn't exist, do nothing assume all is well, create new metadata file
Err(_) => {},
};

484
src/hash.rs

@ -1,484 +0,0 @@
// Copyright 2015 Drew Short <drew@sothr.com>.
//
// Licensed under the MIT license<LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
use std::path::Path;
use std::f64;
use super::image;
use super::image::{GenericImage, Pixel, FilterType};
use super::dft;
use super::dft::Transform;
use cache::Cache;
// Used to get ranges for the precision of rounding floats
// Can round to 1 significant factor of precision
const FLOAT_PRECISION_MAX_1: f64 = f64::MAX / 10_f64;
const FLOAT_PRECISION_MIN_1: f64 = f64::MIN / 10_f64;
// Can round to 2 significant factors of precision
const FLOAT_PRECISION_MAX_2: f64 = f64::MAX / 100_f64;
const FLOAT_PRECISION_MIN_2: f64 = f64::MIN / 100_f64;
// Can round to 3 significant factors of precision
const FLOAT_PRECISION_MAX_3: f64 = f64::MAX / 1000_f64;
const FLOAT_PRECISION_MIN_3: f64 = f64::MIN / 1000_f64;
// Can round to 4 significant factors of precision
const FLOAT_PRECISION_MAX_4: f64 = f64::MAX / 10000_f64;
const FLOAT_PRECISION_MIN_4: f64 = f64::MIN / 10000_f64;
// Can round to 5 significant factors of precision
const FLOAT_PRECISION_MAX_5: f64 = f64::MAX / 100000_f64;
const FLOAT_PRECISION_MIN_5: f64 = f64::MIN / 100000_f64;
/**
* Prepared image that can be used to generate hashes
*/
pub struct PreparedImage<'a> {
orig_path: &'a str,
image: image::ImageBuffer<image::Luma<u8>, Vec<u8>>,
cache: &'a Cache<'a>,
}
/**
* Wraps the various perceptual hashes
*/
pub struct PerceptualHashes<'a> {
pub orig_path: &'a str,
pub ahash: u64,
pub dhash: u64,
pub phash: u64,
}
/**
* All the supported precision types
*
* Low aims for 32 bit precision
* Medium aims for 64 bit precision
* High aims for 128 bit precision
*/
#[allow(dead_code)]
pub enum Precision {
Low,
Medium,
High,
}
// Get the size of the required image
//
impl Precision {
fn get_size(&self) -> u32 {
match *self {
Precision::Low => 4,
Precision::Medium => 8,
Precision::High => 16,
}
}
}
/**
* Types of hashes supported
*/
pub enum HashType {
Ahash,
Dhash,
Phash,
}
/**
* Resonsible for parsing a path, converting an image and package it to be
* hashed.
*
* # Arguments
*
* * 'path' - The path to the image requested to be hashed
* * 'size' - The size that the image should be resize to, in the form of size x size
*
* # Returns
*
* A PreparedImage struct with the required information for performing hashing
*
*/
pub fn prepare_image<'a>(path: &'a Path,
hash_type: &HashType,
precision: &Precision,
cache: &'a Cache<'a>)
-> PreparedImage<'a> {
let image_path = path.to_str().unwrap();
let size: u32 = match *hash_type {
HashType::Phash => precision.get_size() * 4,
_ => precision.get_size(),
};
// Check if we have the already converted image in a cache and use that if possible.
match cache.get_image_from_cache(&path, size) {
Some(image) => {
PreparedImage {
orig_path: &*image_path,
image: image,
cache: &cache
}
}
None => {
// Otherwise let's do that work now and store it.
let image = image::open(path).unwrap();
let small_image = image.resize_exact(size, size, FilterType::Lanczos3);
let grey_image = small_image.to_luma();
match cache.put_image_in_cache(&path, size, &grey_image) {
Ok(_) => {}
Err(e) => println!("Unable to store image in cache. {}", e),
};
PreparedImage {
orig_path: &*image_path,
image: grey_image,
cache: &cache,
}
}
}
}
/**
* Get all perceptual hashes for an image
*/
pub fn get_perceptual_hashes<'a>(path: &'a Path, precision: &Precision, cache: &Cache) -> PerceptualHashes<'a> {
let image_path = path.to_str().unwrap();
let ahash = AHash::new(&path, &precision, &cache).get_hash();
let dhash = DHash::new(&path, &precision, &cache).get_hash();
let phash = PHash::new(&path, &precision, &cache).get_hash();
PerceptualHashes {
orig_path: &*image_path,
ahash: ahash,
dhash: dhash,
phash: phash,
}
}
/**
* Calculate the number of bits different between two hashes
* Add to the PerceptualHashTrait
*/
pub fn calculate_hamming_distance(hash1: u64, hash2: u64) -> u64 {
// The binary xor of the two hashes should give us a number representing
// the differences between the two hashes. All that's left is to count
// the number of 1's in the difference to determine the hamming distance
let bin_diff = hash1 ^ hash2;
let bin_diff_str = format!("{:b}", bin_diff);
let mut hamming = 0u64;
for bit in bin_diff_str.chars() {
match bit {
'1' => hamming += 1,
_ => continue,
}
}
hamming
}
pub trait PerceptualHash {
fn get_hash(&self) -> u64;
}
pub struct AHash<'a> {
prepared_image: Box<PreparedImage<'a>>,
}
impl<'a> AHash<'a> {
pub fn new(path: &'a Path, precision: &Precision, cache: &'a Cache) -> Self {
AHash { prepared_image: Box::new(prepare_image(&path, &HashType::Ahash, &precision, &cache)) }
}
}
impl<'a> PerceptualHash for AHash<'a> {
/**
* Calculate the ahash of the provided prepared image.
*
* # Returns
*
* A u64 representing the value of the hash
*/
fn get_hash(&self) -> u64 {
let (width, height) = self.prepared_image.image.dimensions();
// calculating the average pixel value
let mut total = 0u64;
for pixel in self.prepared_image.image.pixels() {
let channels = pixel.channels();
// println!("Pixel is: {}", channels[0]);
total += channels[0] as u64;
}
let mean = total / (width * height) as u64;
// println!("Mean for {} is {}", prepared_image.orig_path, mean);
// Calculating a hash based on the mean
let mut hash = 0u64;
for pixel in self.prepared_image.image.pixels() {
let channels = pixel.channels();
let pixel_sum = channels[0] as u64;
if pixel_sum >= mean {
hash |= 1;
// println!("Pixel {} is >= {} therefore {:b}", pixel_sum, mean, hash);
} else {
hash |= 0;
// println!("Pixel {} is < {} therefore {:b}", pixel_sum, mean, hash);
}
hash <<= 1;
}
// println!("Hash for {} is {}", prepared_image.orig_path, hash);
hash
}
}
pub struct DHash<'a> {
prepared_image: Box<PreparedImage<'a>>,
}
impl<'a> DHash<'a> {
pub fn new(path: &'a Path, precision: &Precision, cache: &'a Cache) -> Self {
DHash { prepared_image: Box::new(prepare_image(&path, &HashType::Dhash, &precision, &cache)) }
}
}
impl<'a> PerceptualHash for DHash<'a> {
/**
* Calculate the dhash of the provided prepared image
*
* # Return
*
* Returns a u64 representing the value of the hash
*/
fn get_hash(&self) -> u64 {
// Stored for later
let first_pixel_val = self.prepared_image.image.pixels().nth(0).unwrap().channels()[0];
let last_pixel_val = self.prepared_image.image.pixels().last().unwrap().channels()[0];
// Calculate the dhash
let mut previous_pixel_val = 0u64;
let mut hash = 0u64;
for (index, pixel) in self.prepared_image.image.pixels().enumerate() {
if index == 0 {
previous_pixel_val = pixel.channels()[0] as u64;
continue;
}
let channels = pixel.channels();
let pixel_val = channels[0] as u64;
if pixel_val >= previous_pixel_val {
hash |= 1;
} else {
hash |= 0;
}
hash <<= 1;
previous_pixel_val = channels[0] as u64;
}
if first_pixel_val >= last_pixel_val {
hash |= 1;
} else {
hash |= 0;
}
hash
}
}
pub struct PHash<'a> {
prepared_image: Box<PreparedImage<'a>>,
}
impl<'a> PHash<'a> {
pub fn new(path: &'a Path, precision: &Precision, cache: &'a Cache) -> Self {
PHash { prepared_image: Box::new(prepare_image(&path, &HashType::Phash, &precision, &cache)) }
}
}
impl<'a> PerceptualHash for PHash<'a> {
/**
* Calculate the phash of the provided prepared image
*
* # Return
*
* Returns a u64 representing the value of the hash
*/
fn get_hash(&self) -> u64 {
// Get the image data into a vector to perform the DFT on.
let width = self.prepared_image.image.width() as usize;
let height = self.prepared_image.image.height() as usize;
// Get 2d data to 2d FFT/DFT
// Either from the cache or calculate it
// Pretty fast already, so caching doesn't make a huge difference
// Atleast compared to opening and processing the images
let mut data_matrix: Vec<Vec<f64>> = Vec::new();
match self.prepared_image.cache.get_matrix_from_cache(&Path::new(self.prepared_image.orig_path),
width as u32) {
Some(matrix) => data_matrix = matrix,
None => {
// Preparing the results
for x in 0..width {
data_matrix.push(Vec::new());
for y in 0..height {
let pos_x = x as u32;
let pos_y = y as u32;
data_matrix[x]
.push(self.prepared_image
.image
.get_pixel(pos_x, pos_y)
.channels()[0] as f64);
}
}
// Perform the 2D DFT operation on our matrix
calculate_2d_dft(&mut data_matrix);
// Store this DFT in the cache
match self.prepared_image.cache.put_matrix_in_cache(&Path::new(self.prepared_image.orig_path),
width as u32,
&data_matrix) {
Ok(_) => {}
Err(e) => println!("Unable to store matrix in cache. {}", e),
};
}
}
// Only need the top left quadrant
let target_width = (width / 4) as usize;
let target_height = (height / 4) as usize;
let dft_width = (width / 4) as f64;
let dft_height = (height / 4) as f64;
// Calculate the mean
let mut total = 0f64;
for x in 0..target_width {
for y in 0..target_height {
total += data_matrix[x][y];
}
}
let mean = total / (dft_width * dft_height);
// Calculating a hash based on the mean
let mut hash = 0u64;
for x in 0..target_width {
// println!("Mean: {} Values: {:?}",mean,data_matrix[x]);
for y in 0..target_height {
if data_matrix[x][y] >= mean {
hash |= 1;
// println!("Pixel {} is >= {} therefore {:b}", pixel_sum, mean, hash);
} else {
hash |= 0;
// println!("Pixel {} is < {} therefore {:b}", pixel_sum, mean, hash);
}
hash <<= 1;
}
}
// println!("Hash for {} is {}", prepared_image.orig_path, hash);
hash
}
}
// Use a 1D DFT to cacluate the 2D DFT.
//
// This is achieved by calculating the DFT for each row, then calculating the
// DFT for each column of DFT row data. This means that a 32x32 image with have
// 1024 1D DFT operations performed on it. (Slightly caclulation intensive)
//
// This operation is in place on the data in the provided vector
//
// Inspired by:
// http://www.inf.ufsc.br/~visao/khoros/html-dip/c5/s2/front-page.html
//
// Checked with:
// http://calculator.vhex.net/post/calculator-result/2d-discrete-fourier-transform
//
fn calculate_2d_dft(data_matrix: &mut Vec<Vec<f64>>) {
// println!("{:?}", data_matrix);
let width = data_matrix.len();
let height = data_matrix[0].len();
let mut complex_data_matrix = Vec::with_capacity(width);
// Perform DCT on the columns of data
for x in 0..width {
let mut column: Vec<f64> = Vec::with_capacity(height);
for y in 0..height {
column.push(data_matrix[x][y]);
}
// Perform the DCT on this column
// println!("column[{}] before: {:?}", x, column);
let forward_plan = dft::Plan::new(dft::Operation::Forward, column.len());
column.transform(&forward_plan);
let complex_column = dft::unpack(&column);
// println!("column[{}] after: {:?}", x, complex_column);
complex_data_matrix.push(complex_column);
}
// Perform DCT on the rows of data
for y in 0..height {
let mut row = Vec::with_capacity(width);
for x in 0..width {
row.push(complex_data_matrix[x][y]);
}
// Perform DCT on the row
// println!("row[{}] before: {:?}", y, row);
let forward_plan = dft::Plan::new(dft::Operation::Forward, row.len());
row.transform(&forward_plan);
// println!("row[{}] after: {:?}", y, row);
// Put the row values back
for x in 0..width {
data_matrix[x][y] = round_float(row[x].re);
}
}
}
fn round_float(f: f64) -> f64 {
if f >= FLOAT_PRECISION_MAX_1 || f <= FLOAT_PRECISION_MIN_1 {
f
} else if f >= FLOAT_PRECISION_MAX_2 || f <= FLOAT_PRECISION_MIN_2 {
(f * 10_f64).round() / 10_f64
} else if f >= FLOAT_PRECISION_MAX_3 || f <= FLOAT_PRECISION_MIN_3 {
(f * 100_f64).round() / 100_f64
} else if f >= FLOAT_PRECISION_MAX_4 || f <= FLOAT_PRECISION_MIN_4 {
(f * 1000_f64).round() / 1000_f64
} else if f >= FLOAT_PRECISION_MAX_5 || f <= FLOAT_PRECISION_MIN_5 {
(f * 10000_f64).round() / 10000_f64
} else {
(f * 100000_f64).round() / 100000_f64
}
}
#[test]
fn test_2d_dft() {
let mut test_matrix: Vec<Vec<f64>> = Vec::new();
test_matrix.push(vec![1f64, 1f64, 1f64, 3f64]);
test_matrix.push(vec![1f64, 2f64, 2f64, 1f64]);
test_matrix.push(vec![1f64, 2f64, 2f64, 1f64]);
test_matrix.push(vec![3f64, 1f64, 1f64, 1f64]);
println!("{:?}", test_matrix[0]);
println!("{:?}", test_matrix[1]);
println!("{:?}", test_matrix[2]);
println!("{:?}", test_matrix[3]);
println!("Performing 2d DFT");
calculate_2d_dft(&mut test_matrix);
println!("{:?}", test_matrix[0]);
println!("{:?}", test_matrix[1]);
println!("{:?}", test_matrix[2]);
println!("{:?}", test_matrix[3]);
assert!(test_matrix[0][0] == 24_f64);
assert!(test_matrix[0][1] == 0_f64);
assert!(test_matrix[0][2] == 0_f64);
assert!(test_matrix[0][3] == 0_f64);
assert!(test_matrix[1][0] == 0_f64);
assert!(test_matrix[1][1] == 0_f64);
assert!(test_matrix[1][2] == -2_f64);
assert!(test_matrix[1][3] == 2_f64);
assert!(test_matrix[2][0] == 0_f64);
assert!(test_matrix[2][1] == -2_f64);
assert!(test_matrix[2][2] == -4_f64);
assert!(test_matrix[2][3] == -2_f64);
assert!(test_matrix[3][0] == 0_f64);
assert!(test_matrix[3][1] == 2_f64);
assert!(test_matrix[3][2] == -2_f64);
assert!(test_matrix[3][3] == 0_f64);
}

61
src/hash/ahash.rs

@ -0,0 +1,61 @@
// Copyright 2016 Drew Short <drew@sothr.com>.
//
// Licensed under the MIT license<LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
use super::{HashType, PerceptualHash, Precision, PreparedImage};
use super::prepare_image;
use super::image::{GenericImage, Pixel};
use std::path::Path;
use cache::Cache;
pub struct AHash<'a> {
prepared_image: Box<PreparedImage<'a>>,
}
impl<'a> AHash<'a> {
pub fn new(path: &'a Path, precision: &Precision, cache: &'a Cache) -> Self {
AHash { prepared_image: Box::new(prepare_image(&path, &HashType::AHash, &precision, &cache)) }
}
}
impl<'a> PerceptualHash for AHash<'a> {
/**
* Calculate the ahash of the provided prepared image.
*
* # Returns
*
* A u64 representing the value of the hash
*/
fn get_hash(&self) -> u64 {
let (width, height) = self.prepared_image.image.dimensions();
// calculating the average pixel value
let mut total = 0u64;
for pixel in self.prepared_image.image.pixels() {
let channels = pixel.channels();
// println!("Pixel is: {}", channels[0]);
total += channels[0] as u64;
}
let mean = total / (width * height) as u64;
// println!("Mean for {} is {}", prepared_image.orig_path, mean);
// Calculating a hash based on the mean
let mut hash = 0u64;
for pixel in self.prepared_image.image.pixels() {
let channels = pixel.channels();
let pixel_sum = channels[0] as u64;
if pixel_sum >= mean {
hash |= 1;
// println!("Pixel {} is >= {} therefore {:b}", pixel_sum, mean, hash);
} else {
hash |= 0;
// println!("Pixel {} is < {} therefore {:b}", pixel_sum, mean, hash);
}
hash <<= 1;
}
// println!("Hash for {} is {}", prepared_image.orig_path, hash);
hash
}
}

62
src/hash/dhash.rs

@ -0,0 +1,62 @@
// Copyright 2016 Drew Short <drew@sothr.com>.
//
// Licensed under the MIT license<LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
use super::{HashType, PerceptualHash, Precision, PreparedImage};
use super::prepare_image;
use super::image::{GenericImage, Pixel};
use std::path::Path;
use cache::Cache;
pub struct DHash<'a> {
prepared_image: Box<PreparedImage<'a>>,
}
impl<'a> DHash<'a> {
pub fn new(path: &'a Path, precision: &Precision, cache: &'a Cache) -> Self {
DHash { prepared_image: Box::new(prepare_image(&path, &HashType::DHash, &precision, &cache)) }
}
}
impl<'a> PerceptualHash for DHash<'a> {
/**
* Calculate the dhash of the provided prepared image
*
* # Return
*
* Returns a u64 representing the value of the hash
*/
fn get_hash(&self) -> u64 {
// Stored for later
let first_pixel_val = self.prepared_image.image.pixels().nth(0).unwrap().channels()[0];
let last_pixel_val = self.prepared_image.image.pixels().last().unwrap().channels()[0];
// Calculate the dhash
let mut previous_pixel_val = 0u64;
let mut hash = 0u64;
for (index, pixel) in self.prepared_image.image.pixels().enumerate() {
if index == 0 {
previous_pixel_val = pixel.channels()[0] as u64;
continue;
}
let channels = pixel.channels();
let pixel_val = channels[0] as u64;
if pixel_val >= previous_pixel_val {
hash |= 1;
} else {
hash |= 0;
}
hash <<= 1;
previous_pixel_val = channels[0] as u64;
}
if first_pixel_val >= last_pixel_val {
hash |= 1;
} else {
hash |= 0;
}
hash
}
}

197
src/hash/mod.rs

@ -0,0 +1,197 @@
// Copyright 2016 Drew Short <drew@sothr.com>.
//
// Licensed under the MIT license<LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
extern crate dft;
extern crate image;
mod ahash;
mod dhash;
mod phash;
use std::path::Path;
use std::f64;
use self::image::{Pixel, FilterType};
use cache::Cache;
// Constants //
// Used to get ranges for the precision of rounding floats
// Can round to 1 significant factor of precision
const FLOAT_PRECISION_MAX_1: f64 = f64::MAX / 10_f64;
const FLOAT_PRECISION_MIN_1: f64 = f64::MIN / 10_f64;
// Can round to 2 significant factors of precision
const FLOAT_PRECISION_MAX_2: f64 = f64::MAX / 100_f64;
const FLOAT_PRECISION_MIN_2: f64 = f64::MIN / 100_f64;
// Can round to 3 significant factors of precision
const FLOAT_PRECISION_MAX_3: f64 = f64::MAX / 1000_f64;
const FLOAT_PRECISION_MIN_3: f64 = f64::MIN / 1000_f64;
// Can round to 4 significant factors of precision
const FLOAT_PRECISION_MAX_4: f64 = f64::MAX / 10000_f64;
const FLOAT_PRECISION_MIN_4: f64 = f64::MIN / 10000_f64;
// Can round to 5 significant factors of precision
const FLOAT_PRECISION_MAX_5: f64 = f64::MAX / 100000_f64;
const FLOAT_PRECISION_MIN_5: f64 = f64::MIN / 100000_f64;
// Structs/Enums //
/**
* Prepared image that can be used to generate hashes
*/
pub struct PreparedImage<'a> {
orig_path: &'a str,
image: image::ImageBuffer<image::Luma<u8>, Vec<u8>>,
cache: &'a Cache<'a>,
}
/**
* Wraps the various perceptual hashes
*/
pub struct PerceptualHashes<'a> {
pub orig_path: &'a str,
pub ahash: u64,
pub dhash: u64,
pub phash: u64,
}
/**
* All the supported precision types
*
* Low aims for 32 bit precision
* Medium aims for 64 bit precision
* High aims for 128 bit precision
*/
#[allow(dead_code)]
pub enum Precision {
Low,
Medium,
High,
}
// Get the size of the required image
//
impl Precision {
fn get_size(&self) -> u32 {
match *self {
Precision::Low => 4,
Precision::Medium => 8,
Precision::High => 16,
}
}
}
/**
* Types of hashes supported
*/
pub enum HashType {
AHash,
DHash,
PHash,
}
// Traits //
pub trait PerceptualHash {
fn get_hash(&self) -> u64;
}
// Functions //
/**
* Resonsible for parsing a path, converting an image and package it to be
* hashed.
*
* # Arguments
*
* * 'path' - The path to the image requested to be hashed
* * 'size' - The size that the image should be resize to, in the form of size x size
*
* # Returns
*
* A PreparedImage struct with the required information for performing hashing
*
*/
pub fn prepare_image<'a>(path: &'a Path,
hash_type: &HashType,
precision: &Precision,
cache: &'a Cache<'a>)
-> PreparedImage<'a> {
let image_path = path.to_str().unwrap();
let size: u32 = match *hash_type {
HashType::PHash => precision.get_size() * 4,
_ => precision.get_size(),
};
// Check if we have the already converted image in a cache and use that if possible.
match cache.get_image_from_cache(&path, size) {
Some(image) => {
PreparedImage {
orig_path: &*image_path,
image: image,
cache: &cache
}
}
None => {
// Otherwise let's do that work now and store it.
let image = image::open(path).unwrap();
let small_image = image.resize_exact(size, size, FilterType::Lanczos3);
let grey_image = small_image.to_luma();
match cache.put_image_in_cache(&path, size, &grey_image) {
Ok(_) => {}
Err(e) => println!("Unable to store image in cache. {}", e),
};
PreparedImage {
orig_path: &*image_path,
image: grey_image,
cache: &cache,
}
}
}
}
/**
* Get a specific HashType hash
*/
pub fn get_perceptual_hash<'a>(path: &'a Path, precision: &Precision, hash_type: &HashType, cache: &Cache) -> u64 {
match *hash_type {
HashType::AHash => ahash::AHash::new(&path, &precision, &cache).get_hash(),
HashType::DHash => dhash::DHash::new(&path, &precision, &cache).get_hash(),
HashType::PHash => phash::PHash::new(&path, &precision, &cache).get_hash()
}
}
/**
* Get all perceptual hashes for an image
*/
pub fn get_perceptual_hashes<'a>(path: &'a Path, precision: &Precision, cache: &Cache) -> PerceptualHashes<'a> {
let image_path = path.to_str().unwrap();
let ahash = ahash::AHash::new(&path, &precision, &cache).get_hash();
let dhash = dhash::DHash::new(&path, &precision, &cache).get_hash();
let phash = phash::PHash::new(&path, &precision, &cache).get_hash();
PerceptualHashes {
orig_path: &*image_path,
ahash: ahash,
dhash: dhash,
phash: phash,
}
}
/**
* Calculate the number of bits different between two hashes
* Add to the PerceptualHashTrait
*/
pub fn calculate_hamming_distance(hash1: u64, hash2: u64) -> u64 {
// The binary xor of the two hashes should give us a number representing
// the differences between the two hashes. All that's left is to count
// the number of 1's in the difference to determine the hamming distance
let bin_diff = hash1 ^ hash2;
let bin_diff_str = format!("{:b}", bin_diff);
let mut hamming = 0u64;
for bit in bin_diff_str.chars() {
match bit {
'1' => hamming += 1,
_ => continue,
}
}
hamming
}

219
src/hash/phash.rs

@ -0,0 +1,219 @@
// Copyright 2016 Drew Short <drew@sothr.com>.
//
// Licensed under the MIT license<LICENSE-MIT or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
use super::dft;
use super::dft::Transform;
use super::{HashType, PerceptualHash, Precision, PreparedImage};
use super::prepare_image;
use super::image::{GenericImage, Pixel};
use std::path::Path;
use cache::Cache;
pub struct PHash<'a> {
prepared_image: Box<PreparedImage<'a>>,
}
impl<'a> PHash<'a> {
pub fn new(path: &'a Path, precision: &Precision, cache: &'a Cache) -> Self {
PHash { prepared_image: Box::new(prepare_image(&path, &HashType::PHash, &precision, &cache)) }
}
}
impl<'a> PerceptualHash for PHash<'a> {
/**
* Calculate the phash of the provided prepared image
*
* # Return
*
* Returns a u64 representing the value of the hash
*/
fn get_hash(&self) -> u64 {
// Get the image data into a vector to perform the DFT on.
let width = self.prepared_image.image.width() as usize;
let height = self.prepared_image.image.height() as usize;
// Get 2d data to 2d FFT/DFT
// Either from the cache or calculate it
// Pretty fast already, so caching doesn't make a huge difference
// Atleast compared to opening and processing the images
let mut data_matrix: Vec<Vec<f64>> = Vec::new();
match self.prepared_image.cache.get_matrix_from_cache(&Path::new(self.prepared_image.orig_path),
width as u32) {
Some(matrix) => data_matrix = matrix,
None => {
// Preparing the results
for x in 0..width {
data_matrix.push(Vec::new());
for y in 0..height {
let pos_x = x as u32;
let pos_y = y as u32;
data_matrix[x]
.push(self.prepared_image
.image
.get_pixel(pos_x, pos_y)
.channels()[0] as f64);
}
}
// Perform the 2D DFT operation on our matrix
calculate_2d_dft(&mut data_matrix);
// Store this DFT in the cache
match self.prepared_image.cache.put_matrix_in_cache(&Path::new(self.prepared_image.orig_path),
width as u32,
&data_matrix) {
Ok(_) => {}
Err(e) => println!("Unable to store matrix in cache. {}", e),
};
}
}
// Only need the top left quadrant
let target_width = (width / 4) as usize;
let target_height = (height / 4) as usize;
let dft_width = (width / 4) as f64;
let dft_height = (height / 4) as f64;
// Calculate the mean
let mut total = 0f64;
for x in 0..target_width {
for y in 0..target_height {
total += data_matrix[x][y];
}
}
let mean = total / (dft_width * dft_height);
// Calculating a hash based on the mean
let mut hash = 0u64;
for x in 0..target_width {
// println!("Mean: {} Values: {:?}",mean,data_matrix[x]);
for y in 0..target_height {
if data_matrix[x][y] >= mean {
hash |= 1;
// println!("Pixel {} is >= {} therefore {:b}", pixel_sum, mean, hash);
} else {
hash |= 0;
// println!("Pixel {} is < {} therefore {:b}", pixel_sum, mean, hash);
}
hash <<= 1;
}
}
// println!("Hash for {} is {}", prepared_image.orig_path, hash);
hash
}
}
// Use a 1D DFT to cacluate the 2D DFT.
//
// This is achieved by calculating the DFT for each row, then calculating the
// DFT for each column of DFT row data. This means that a 32x32 image with have
// 1024 1D DFT operations performed on it. (Slightly caclulation intensive)
//
// This operation is in place on the data in the provided vector
//
// Inspired by:
// http://www.inf.ufsc.br/~visao/khoros/html-dip/c5/s2/front-page.html
//
// Checked with:
// http://calculator.vhex.net/post/calculator-result/2d-discrete-fourier-transform
//
fn calculate_2d_dft(data_matrix: &mut Vec<Vec<f64>>) {
// println!("{:?}", data_matrix);
let width = data_matrix.len();
let height = data_matrix[0].len();
let mut complex_data_matrix = Vec::with_capacity(width);
// Perform DCT on the columns of data
for x in 0..width {
let mut column: Vec<f64> = Vec::with_capacity(height);
for y in 0..height {
column.push(data_matrix[x][y]);
}
// Perform the DCT on this column
// println!("column[{}] before: {:?}", x, column);
let forward_plan = dft::Plan::new(dft::Operation::Forward, column.len());
column.transform(&forward_plan);
let complex_column = dft::unpack(&column);
// println!("column[{}] after: {:?}", x, complex_column);
complex_data_matrix.push(complex_column);
}
// Perform DCT on the rows of data
for y in 0..height {
let mut row = Vec::with_capacity(width);
for x in 0..width {
row.push(complex_data_matrix[x][y]);
}
// Perform DCT on the row
// println!("row[{}] before: {:?}", y, row);
let forward_plan = dft::Plan::new(dft::Operation::Forward, row.len());
row.transform(&forward_plan);
// println!("row[{}] after: {:?}", y, row);
// Put the row values back
for x in 0..width {
data_matrix[x][y] = round_float(row[x].re);
}
}
}
fn round_float(f: f64) -> f64 {
if f >= super::FLOAT_PRECISION_MAX_1 || f <= super::FLOAT_PRECISION_MIN_1 {
f
} else if f >= super::FLOAT_PRECISION_MAX_2 || f <= super::FLOAT_PRECISION_MIN_2 {
(f * 10_f64).round() / 10_f64
} else if f >= super::FLOAT_PRECISION_MAX_3 || f <= super::FLOAT_PRECISION_MIN_3 {
(f * 100_f64).round() / 100_f64
} else if f >= super::FLOAT_PRECISION_MAX_4 || f <= super::FLOAT_PRECISION_MIN_4 {
(f * 1000_f64).round() / 1000_f64
} else if f >= super::FLOAT_PRECISION_MAX_5 || f <= super::FLOAT_PRECISION_MIN_5 {
(f * 10000_f64).round() / 10000_f64
} else {
(f * 100000_f64).round() / 100000_f64
}
}
#[test]
fn test_2d_dft() {
let mut test_matrix: Vec<Vec<f64>> = Vec::new();
test_matrix.push(vec![1f64, 1f64, 1f64, 3f64]);
test_matrix.push(vec![1f64, 2f64, 2f64, 1f64]);
test_matrix.push(vec![1f64, 2f64, 2f64, 1f64]);
test_matrix.push(vec![3f64, 1f64, 1f64, 1f64]);
println!("{:?}", test_matrix[0]);
println!("{:?}", test_matrix[1]);
println!("{:?}", test_matrix[2]);
println!("{:?}", test_matrix[3]);
println!("Performing 2d DFT");
calculate_2d_dft(&mut test_matrix);
println!("{:?}", test_matrix[0]);
println!("{:?}", test_matrix[1]);
println!("{:?}", test_matrix[2]);
println!("{:?}", test_matrix[3]);
assert!(test_matrix[0][0] == 24_f64);
assert!(test_matrix[0][1] == 0_f64);
assert!(test_matrix[0][2] == 0_f64);
assert!(test_matrix[0][3] == 0_f64);
assert!(test_matrix[1][0] == 0_f64);
assert!(test_matrix[1][1] == 0_f64);
assert!(test_matrix[1][2] == -2_f64);
assert!(test_matrix[1][3] == 2_f64);
assert!(test_matrix[2][0] == 0_f64);
assert!(test_matrix[2][1] == -2_f64);
assert!(test_matrix[2][2] == -4_f64);
assert!(test_matrix[2][3] == -2_f64);
assert!(test_matrix[3][0] == 0_f64);
assert!(test_matrix[3][1] == 2_f64);
assert!(test_matrix[3][2] == -2_f64);
assert!(test_matrix[3][3] == 0_f64);
}

693
src/lib.rs

@ -5,17 +5,11 @@
extern crate libc;
extern crate rustc_serialize;
extern crate image;
extern crate dft;
extern crate complex;
extern crate sha1;
extern crate flate2;
mod hash;
mod cache;
use std::path::Path;
use hash::PerceptualHash;
use std::ffi::CStr;
use cache::Cache;
@ -26,403 +20,298 @@ static LIB_CACHE: Cache<'static> = Cache { cache_dir: cache::CACHE_DIR, use_cach
*
* Not performing this step may cause parts to fail.
*/
#[no_mangle]
pub extern "C" fn init() {
match LIB_CACHE.init() {
Ok(_) => {}
Err(e) => println!("Error: {}", e),
}
}
#[no_mangle]
pub extern "C" fn init() {
match LIB_CACHE.init() {
Ok(_) => {}
Err(e) => println!("Error: {}", e),
}
}
/**
* Teardown for the library
*/
#[no_mangle]
pub extern "C" fn teardown() {
match LIB_CACHE.clean() {
Ok(_) => {}
Err(e) => println!("Error: {}", e),
}
}
pub fn get_phashes(path: &Path) -> hash::PerceptualHashes {
hash::get_perceptual_hashes(path, &hash::Precision::Medium, &LIB_CACHE)
}
pub fn get_ahash(path: &Path) -> u64 {
hash::AHash::new(&path, &hash::Precision::Medium, &LIB_CACHE).get_hash()
}
pub fn get_dhash(path: &Path) -> u64 {
hash::DHash::new(&path, &hash::Precision::Medium, &LIB_CACHE).get_hash()
}
pub fn get_phash(path: &Path) -> u64 {
hash::PHash::new(&path, &hash::Precision::Medium, &LIB_CACHE).get_hash()
}
pub fn get_hamming_distance(hash1: u64, hash2: u64) -> u64 {
hash::calculate_hamming_distance(hash1, hash2)
}
// External proxies for the get_*hash methods
#[no_mangle]
pub extern "C" fn ext_get_ahash(path_char: *const libc::c_char) -> libc::uint64_t {
unsafe {
let path_str = CStr::from_ptr(path_char);
let image_path = match path_str.to_str() {
Ok(result) => result,
Err(e) => {
println!("Error: {}. Unable to parse '{}'",
e,
to_hex_string(path_str.to_bytes()));
panic!("Unable to parse path")
}
};
let path = Path::new(&image_path);
get_ahash(&path)
}
}
#[no_mangle]
pub extern "C" fn ext_get_dhash(path_char: *const libc::c_char) -> libc::uint64_t {
unsafe {
let path_str = CStr::from_ptr(path_char);
let image_path = match path_str.to_str() {
Ok(result) => result,
Err(e) => {
println!("Error: {}. Unable to parse '{}'",
e,
to_hex_string(path_str.to_bytes()));
panic!("Unable to parse path")
}
};
let path = Path::new(&image_path);
get_dhash(&path)
}
}
#[no_mangle]
pub extern "C" fn ext_get_phash(path_char: *const libc::c_char) -> libc::uint64_t {
unsafe {
let path_str = CStr::from_ptr(path_char);
let image_path = match path_str.to_str() {
Ok(result) => result,
Err(e) => {
println!("Error: {}. Unable to parse '{}'",
e,
to_hex_string(path_str.to_bytes()));
panic!("Unable to parse path")
}
};
let path = Path::new(&image_path);
get_phash(&path)
}
}
fn to_hex_string(bytes: &[u8]) -> String {
println!("length: {}", bytes.len());
let mut strs: Vec<String> = Vec::new();
for byte in bytes {
// println!("{:02x}", byte);
strs.push(format!("{:02x}", byte));
}
strs.join("\\x")
}
// Module for the tests
//
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::path;
use hash;
#[test]
fn test_can_get_test_images() {
let paths = fs::read_dir(&path::Path::new("./test_images")).unwrap();
let mut num_paths = 0;
for path in paths {
let orig_path = path.unwrap().path();
let ext = path::Path::new(&orig_path).extension();
match ext {
Some(_) => {
if ext.unwrap() == "jpg" {
num_paths += 1;
println!("Is a image {}: {:?}", num_paths, orig_path) ;
}
}
_ => {
println!("Not an image: {:?}", orig_path) ;
continue;
}
}
// println!("Name: {}", path.unwrap().path().display())
}
// Currently 12 images in the test imaages directory
assert!(num_paths == 12);
}
// Simple function for the unit tests to succinctly test a set of images
// that are organized in the fashion of large->medium->small
fn test_imageset_hash(large_phash: &hash::PerceptualHash,
medium_phash: &hash::PerceptualHash,
small_phash: &hash::PerceptualHash,
expected_large_hash: u64,
expected_medium_hash: u64,
expected_small_hash: u64,
expected_large_medium_hamming: u64,
expected_large_small_hamming: u64,
expected_medium_small_hamming: u64) {
let actual_large_hash = large_phash.get_hash();
let actual_medium_hash = medium_phash.get_hash();
let actual_small_hash = small_phash.get_hash();
// println for the purpose of debugging
println!("Large Image: expected: {} actual: {}",
expected_large_hash,
actual_large_hash);
println!("Medium Image: expected: {} actual: {}",
expected_medium_hash,
actual_medium_hash);
println!("Small Image: expected: {} actual: {}",
expected_small_hash,
actual_small_hash);
let actual_large_medium_hamming = hash::calculate_hamming_distance(actual_large_hash,
actual_medium_hash);
let actual_large_small_hamming = hash::calculate_hamming_distance(actual_large_hash,
actual_small_hash);
let actual_medium_small_hamming = hash::calculate_hamming_distance(actual_medium_hash,
actual_small_hash);
println!("Large-Medium Hamming Distance: expected: {} actual: {}",
expected_large_medium_hamming,
actual_large_medium_hamming);
println!("Large-Small Hamming Distance: expected: {} actual: {}",
expected_large_small_hamming,
actual_large_small_hamming);
println!("Medium-Small Hamming Distance: expected: {} actual: {}",
expected_medium_small_hamming,
actual_medium_small_hamming);
// Doing that asserts
assert!(actual_large_hash == expected_large_hash);
assert!(actual_medium_hash == expected_medium_hash);
assert!(actual_small_hash == expected_small_hash);
assert!(actual_large_medium_hamming == expected_large_medium_hamming);
assert!(actual_large_small_hamming == expected_large_small_hamming);
assert!(actual_medium_small_hamming == expected_medium_small_hamming);
}
#[test]
fn test_confirm_ahash_results() {
// Prep_Cache
super::init();
// Sample_01 tests
test_imageset_hash(&hash::AHash::new(path::Path::new("./test_images/sample_01_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_01_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_01_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
857051991849750,
857051991849750,
857051991849750,
0u64,
0u64,
0u64);
// Sample_02 tests
test_imageset_hash(&hash::AHash::new(path::Path::new("./test_images/sample_02_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_02_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_02_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
18446744073441116160,
18446744073441116160,
18446744073441116160,
0u64,
0u64,
0u64);
// Sample_03 tests
test_imageset_hash(&hash::AHash::new(path::Path::new("./test_images/sample_03_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_03_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_03_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
135670932300497406,
135670932300497406,
135670932300497406,
0u64,
0u64,
0u64);
// Sample_04 tests
test_imageset_hash(&hash::AHash::new(path::Path::new("./test_images/sample_04_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_04_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::AHash::new(path::Path::new("./test_images/sample_04_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
18446460933225054208,
18446460933090836480,
18446460933090836480,
1u64,
1u64,
0u64);
// Clean_Cache
// super::teardown();
}
#[test]
fn test_confirm_dhash_results() {
// Prep_Cache
super::init();
// Sample_01 tests
test_imageset_hash(&hash::DHash::new(path::Path::new("./test_images/sample_01_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_01_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_01_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
7937395827556495926,
7937395827556495926,
7939647627370181174,
0u64,
1u64,
1u64);
// Sample_02 tests
test_imageset_hash(&hash::DHash::new(path::Path::new("./test_images/sample_02_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_02_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_02_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
11009829669713008949,
11009829670249879861,
11009829669713008949,
1u64,
0u64,
1u64);
// Sample_03 tests
test_imageset_hash(&hash::DHash::new(path::Path::new("./test_images/sample_03_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_03_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_03_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
225528496439353286,
225528496439353286,
226654396346195908,
0u64,
2u64,
2u64);
// Sample_04 tests
test_imageset_hash(&hash::DHash::new(path::Path::new("./test_images/sample_04_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_04_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::DHash::new(path::Path::new("./test_images/sample_04_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
14620651386429567209,
14620651386429567209,
14620651386429567209,
0u64,
0u64,
0u64);
// Clean_Cache
// super::teardown();
}
#[test]
fn test_confirm_phash_results() {
// Prep_Cache
super::init();
// Sample_01 tests
test_imageset_hash(&hash::PHash::new(path::Path::new("./test_images/sample_01_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_01_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_01_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
72357778504597504,
72357778504597504,
72357778504597504,
0u64,
0u64,
0u64);
// Sample_02 tests
test_imageset_hash(&hash::PHash::new(path::Path::new("./test_images/sample_02_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_02_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_02_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
5332332327550844928,
5332332327550844928,
5332332327550844928,
0u64,
0u64,
0u64);
// Sample_03 tests
test_imageset_hash(&hash::PHash::new(path::Path::new("./test_images/sample_03_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_03_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_03_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
6917529027641081856,
6917529027641081856,
6917529027641081856,
0u64,
0u64,
0u64);
// Sample_04 tests
test_imageset_hash(&hash::PHash::new(path::Path::new("./test_images/sample_04_large.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_04_medium.\
jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
&hash::PHash::new(path::Path::new("./test_images/sample_04_small.jpg"),
&hash::Precision::Medium, &super::LIB_CACHE),
10997931646002397184,
10997931646002397184,
11142046834078253056,
0u64,
1u64,
1u64);
// Clean_Cache
// super::teardown();
}
}
#[no_mangle]
pub extern "C" fn teardown() {
match LIB_CACHE.clean() {
Ok(_) => {}
Err(e) => println!("Error: {}", e),
}
}
pub fn get_phashes(path: &Path) -> hash::PerceptualHashes {
hash::get_perceptual_hashes(path, &hash::Precision::Medium, &LIB_CACHE)
}
pub fn get_ahash(path: &Path) -> u64 {
hash::get_perceptual_hash(&path, &hash::Precision::Medium, &hash::HashType::AHash, &LIB_CACHE)
}
pub fn get_dhash(path: &Path) -> u64 {
hash::get_perceptual_hash(&path, &hash::Precision::Medium, &hash::HashType::DHash, &LIB_CACHE)
}
pub fn get_phash(path: &Path) -> u64 {
hash::get_perceptual_hash(&path, &hash::Precision::Medium, &hash::HashType::DHash, &LIB_CACHE)
}
pub fn get_hamming_distance(hash1: u64, hash2: u64) -> u64 {
hash::calculate_hamming_distance(hash1, hash2)
}
// External proxies for the get_*hash methods
#[no_mangle]
pub extern "C" fn ext_get_ahash(path_char: *const libc::c_char) -> libc::uint64_t {
unsafe {
let path_str = CStr::from_ptr(path_char);
let image_path = match path_str.to_str() {
Ok(result) => result,
Err(e) => {
println!("Error: {}. Unable to parse '{}'",
e,
to_hex_string(path_str.to_bytes()));
panic!("Unable to parse path")
}
};
let path = Path::new(&image_path);
get_ahash(&path)
}
}
#[no_mangle]
pub extern "C" fn ext_get_dhash(path_char: *const libc::c_char) -> libc::uint64_t {
unsafe {
let path_str = CStr::from_ptr(path_char);
let image_path = match path_str.to_str() {
Ok(result) => result,
Err(e) => {
println!("Error: {}. Unable to parse '{}'",
e,
to_hex_string(path_str.to_bytes()));
panic!("Unable to parse path")
}
};
let path = Path::new(&image_path);
get_dhash(&path)
}
}
#[no_mangle]
pub extern "C" fn ext_get_phash(path_char: *const libc::c_char) -> libc::uint64_t {
unsafe {
let path_str = CStr::from_ptr(path_char);
let image_path = match path_str.to_str() {
Ok(result) => result,
Err(e) => {
println!("Error: {}. Unable to parse '{}'",
e,
to_hex_string(path_str.to_bytes()));
panic!("Unable to parse path")
}
};
let path = Path::new(&image_path);
get_phash(&path)
}
}
fn to_hex_string(bytes: &[u8]) -> String {
println!("length: {}", bytes.len());
let mut strs: Vec<String> = Vec::new();
for byte in bytes {
// println!("{:02x}", byte);
strs.push(format!("{:02x}", byte));
}
strs.join("\\x")
}
// Module for the tests
//
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::path::Path;
use hash;
#[test]
fn test_can_get_test_images() {
let paths = fs::read_dir(&Path::new("./test_images")).unwrap();
let mut num_paths = 0;
for path in paths {
let orig_path = path.unwrap().path();
let ext = Path::new(&orig_path).extension();
match ext {
Some(_) => {
if ext.unwrap() == "jpg" {
num_paths += 1;
println!("Is a image {}: {:?}", num_paths, orig_path) ;
}
}
_ => {
println!("Not an image: {:?}", orig_path) ;
continue;
}
}
// println!("Name: {}", path.unwrap().path().display())
}
// Currently 12 images in the test imaages directory
assert!(num_paths == 12);
}
/**
* Updated test function. Assumes 3 images to a set and no hamming distances.
* We don't need to confirm that the hamming distance calculation works in these tests.
*/
fn test_imageset_hash(hash_type: hash::HashType,
hash_precision: hash::Precision,
image_paths: [&Path; 3],
image_hashes: [u64; 3]) {
for index in 0..image_paths.len() {
let image_path = image_paths[index];
let calculated_hash = hash::get_perceptual_hash(&image_path, &hash_precision, &hash_type, &super::LIB_CACHE);
println!("Image hashes for '{}': expected: {} actual: {}",
image_path.to_str().unwrap(),
image_hashes[index],
calculated_hash);
assert!(calculated_hash == image_hashes[index]);
}
}
#[test]
fn test_confirm_ahash_results() {
// Prep_Cache
super::init();
// Sample_01 tests
let sample_01_images: [&Path; 3] = [&Path::new("./test_images/sample_01_large.jpg"),
&Path::new("./test_images/sample_01_medium.jpg"),
&Path::new("./test_images/sample_01_small.jpg")];
let sample_01_hashes: [u64; 3] = [857051991849750,
857051991849750,
857051991849750];
test_imageset_hash(hash::HashType::AHash, hash::Precision::Medium, sample_01_images, sample_01_hashes);
// Sample_02 tests
let sample_02_images: [&Path; 3] = [&Path::new("./test_images/sample_02_large.jpg"),
&Path::new("./test_images/sample_02_medium.jpg"),
&Path::new("./test_images/sample_02_small.jpg")];
let sample_02_hashes: [u64; 3] = [18446744073441116160,
18446744073441116160,
18446744073441116160];
test_imageset_hash(hash::HashType::AHash, hash::Precision::Medium, sample_02_images, sample_02_hashes);
// Sample_03 tests
let sample_03_images: [&Path; 3] = [&Path::new("./test_images/sample_03_large.jpg"),
&Path::new("./test_images/sample_03_medium.jpg"),
&Path::new("./test_images/sample_03_small.jpg")];
let sample_03_hashes: [u64; 3] = [135670932300497406,
135670932300497406,
135670932300497406];
test_imageset_hash(hash::HashType::AHash, hash::Precision::Medium, sample_03_images, sample_03_hashes);
// Sample_04 tests
let sample_04_images: [&Path; 3] = [&Path::new("./test_images/sample_04_large.jpg"),
&Path::new("./test_images/sample_04_medium.jpg"),
&Path::new("./test_images/sample_04_small.jpg")];
let sample_04_hashes: [u64; 3] = [18446460933225054208,
18446460933090836480,
18446460933090836480];
test_imageset_hash(hash::HashType::AHash, hash::Precision::Medium, sample_04_images, sample_04_hashes);
// Clean_Cache
// super::teardown();
}
#[test]
fn test_confirm_dhash_results() {
// Prep_Cache
super::init();
// Sample_01 tests
let sample_01_images: [&Path; 3] = [&Path::new("./test_images/sample_01_large.jpg"),
&Path::new("./test_images/sample_01_medium.jpg"),
&Path::new("./test_images/sample_01_small.jpg")];
let sample_01_hashes: [u64; 3] = [7937395827556495926,
7937395827556495926,
7939647627370181174];
test_imageset_hash(hash::HashType::DHash, hash::Precision::Medium, sample_01_images, sample_01_hashes);
// Sample_02 tests
let sample_02_images: [&Path; 3] = [&Path::new("./test_images/sample_02_large.jpg"),
&Path::new("./test_images/sample_02_medium.jpg"),
&Path::new("./test_images/sample_02_small.jpg")];
let sample_02_hashes: [u64; 3] = [11009829669713008949,
11009829670249879861,
11009829669713008949];
test_imageset_hash(hash::HashType::DHash, hash::Precision::Medium, sample_02_images, sample_02_hashes);
// Sample_03 tests
let sample_03_images: [&Path; 3] = [&Path::new("./test_images/sample_03_large.jpg"),
&Path::new("./test_images/sample_03_medium.jpg"),
&Path::new("./test_images/sample_03_small.jpg")];
let sample_03_hashes: [u64; 3] = [225528496439353286,
225528496439353286,
226654396346195908];
test_imageset_hash(hash::HashType::DHash, hash::Precision::Medium, sample_03_images, sample_03_hashes);
// Sample_04 tests
let sample_04_images: [&Path; 3] = [&Path::new("./test_images/sample_04_large.jpg"),
&Path::new("./test_images/sample_04_medium.jpg"),
&Path::new("./test_images/sample_04_small.jpg")];
let sample_04_hashes: [u64; 3] = [14620651386429567209,
14620651386429567209,
14620651386429567209];
test_imageset_hash(hash::HashType::DHash, hash::Precision::Medium, sample_04_images, sample_04_hashes);
// Clean_Cache
// super::teardown();
}
#[test]
fn test_confirm_phash_results() {
// Prep_Cache
super::init();
// Sample_01 tests
let sample_01_images: [&Path; 3] = [&Path::new("./test_images/sample_01_large.jpg"),
&Path::new("./test_images/sample_01_medium.jpg"),
&Path::new("./test_images/sample_01_small.jpg")];
let sample_01_hashes: [u64; 3] = [72357778504597504,
72357778504597504,
72357778504597504];
test_imageset_hash(hash::HashType::PHash, hash::Precision::Medium, sample_01_images, sample_01_hashes);
// Sample_02 tests
let sample_02_images: [&Path; 3] = [&Path::new("./test_images/sample_02_large.jpg"),
&Path::new("./test_images/sample_02_medium.jpg"),
&Path::new("./test_images/sample_02_small.jpg")];
let sample_02_hashes: [u64; 3] = [5332332327550844928,
5332332327550844928,
5332332327550844928];
test_imageset_hash(hash::HashType::PHash, hash::Precision::Medium, sample_02_images, sample_02_hashes);
// Sample_03 tests
let sample_03_images: [&Path; 3] = [&Path::new("./test_images/sample_03_large.jpg"),
&Path::new("./test_images/sample_03_medium.jpg"),
&Path::new("./test_images/sample_03_small.jpg")];
let sample_03_hashes: [u64; 3] = [6917529027641081856,
6917529027641081856,
6917529027641081856];
test_imageset_hash(hash::HashType::PHash, hash::Precision::Medium, sample_03_images, sample_03_hashes);
// Sample_04 tests
let sample_04_images: [&Path; 3] = [&Path::new("./test_images/sample_04_large.jpg"),
&Path::new("./test_images/sample_04_medium.jpg"),
&Path::new("./test_images/sample_04_small.jpg")];
let sample_04_hashes: [u64; 3] = [10997931646002397184,
10997931646002397184,
11142046834078253056];
test_imageset_hash(hash::HashType::PHash, hash::Precision::Medium, sample_04_images, sample_04_hashes);
// Clean_Cache
// super::teardown();
}
}
Loading…
Cancel
Save