From 17b8b8af364054dd08864ca4aac938ebc1c32c9f Mon Sep 17 00:00:00 2001 From: Bradlee Speice Date: Sun, 27 May 2018 13:51:23 -0400 Subject: [PATCH] Move tests to pyo3 Codegen wasn't doing much for me --- Cargo.toml | 5 +- build_tests.py | 79 -------------------------------- src/lib.rs | 3 ++ src/tests.rs | 28 +++++++++++ src/tests/compat_parse.rs | 41 ----------------- src/tests/compat_split_string.rs | 50 -------------------- src/tests/mod.rs | 2 - 7 files changed, 35 insertions(+), 173 deletions(-) delete mode 100644 build_tests.py create mode 100644 src/tests.rs delete mode 100644 src/tests/compat_parse.rs delete mode 100644 src/tests/compat_split_string.rs delete mode 100644 src/tests/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 08d86f8..124b157 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,4 +7,7 @@ authors = ["Bradlee Speice "] chrono = "0.4" lazy_static = "*" num-traits = "0.2" -rust_decimal = "0.8" \ No newline at end of file +rust_decimal = "0.8" + +[dev-dependencies] +pyo3 = "0.2" \ No newline at end of file diff --git a/build_tests.py b/build_tests.py deleted file mode 100644 index 5d85fdc..0000000 --- a/build_tests.py +++ /dev/null @@ -1,79 +0,0 @@ -#import dateutil.parser._timelex.split as time_split -from dateutil.parser import _timelex -from dateutil.parser import parse as duparse -import pytz - -# The TEST_STRINGS list should be the only thing that actually needs changing -TEST_STRINGS = [ - '2018.5.15', - 'May 5, 2018', - 'Mar. 5, 2018', - '19990101T23', - '19990101T2359', -] - -AUTOGEN_HEADER = ''' -// WARNING -// This file was auto-generated using the `build_tests.py` script. -// Please do not edit it manually. - -''' - -S4 = ' ' * 4 -S8 = ' ' * 8 -S12 = ' ' * 12 - -def rust_tokenize(time_string): - split_array = _timelex.split(time_string) - return ['"{}".to_owned()'.format(token) for token in split_array] - -def build_split_string_tests(): - header = '''use tokenize; - -#[test] -fn test_python_compat() {\n''' - - tests = [] - - for test_string in TEST_STRINGS: - token_string = '\n'.join(['{}{},'.format(S12, s) - for s in rust_tokenize(test_string)]) - tests.append(' assert_eq!(\n{}tokenize("{}"),\n{}vec![\n{}\n{}]\n{});' - .format(S8, test_string, S8, token_string, S8, S4)) - - body = '\n'.join(tests) - - footer = '\n}\n' - - return header + body + footer - -def test_parse(time_string): - dt = duparse(time_string) - # TODO: Don't make this dependent on New_York - iso8601 = pytz.timezone('America/New_York').localize(dt).astimezone(pytz.utc) - return 'assert_eq!(\n{}parse("{}".to_owned())\n{}.unwrap()\n{}.to_rfc3339_opts(SecondsFormat::Micros, false),\n{}"{}"\n{});'.format( - S8, time_string, S12, S12, S8, iso8601, S4) - -def build_parse_tests(): - header = '''use chrono::SecondsFormat; - -use parse; - -#[test] -fn test_python_compat() {\n''' - - asserts = [' {}'.format(test_parse(a)) for a in TEST_STRINGS] - body = '\n'.join(asserts) - - footer = '\n}\n' - - return header + body + footer - -if __name__ == '__main__': - split_string_test = build_split_string_tests() - with open('src/tests/compat_split_string.rs', 'w+') as handle: - handle.write(AUTOGEN_HEADER + split_string_test) - - parse_test = build_parse_tests() - with open('src/tests/compat_parse.rs', 'w+') as handle: - handle.write(AUTOGEN_HEADER + parse_test) \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index d9cc129..2568414 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,6 +8,9 @@ extern crate chrono; extern crate num_traits; extern crate rust_decimal; +#[cfg(test)] +extern crate pyo3; + use chrono::DateTime; use chrono::Datelike; use chrono::FixedOffset; diff --git a/src/tests.rs b/src/tests.rs new file mode 100644 index 0000000..e662cb5 --- /dev/null +++ b/src/tests.rs @@ -0,0 +1,28 @@ +use pyo3::ObjectProtocol; +use pyo3::PyDict; +use pyo3::PyList; +use pyo3::PyObject; +use pyo3::Python; +use pyo3::FromPyObject; + +macro_rules! test_split { + ($py: ident, $timelex: ident, $s: expr, $expected: expr) => { + let f = $timelex.call_method1($py, "split", $s).unwrap(); + let l: &PyList = f.extract($py).unwrap(); + let s: Vec = l.iter().map(|i| format!("{}", i)).collect(); + + assert_eq!(s, $expected); + }; +} + +#[test] +fn test_split() { + let gil = Python::acquire_gil(); + let py = gil.python(); + + let module = py.import("dateutil.parser").unwrap(); + let t: PyObject = module.get("_timelex").unwrap().extract().unwrap(); + + test_split!(py, t, "24, 50, ABC", vec!["24", ",", " ", "50", ",", " ", "ABC"]); + test_split!(py, t, "2018.5.15", vec!["2018", ".", "5", ".", "15"]); +} \ No newline at end of file diff --git a/src/tests/compat_parse.rs b/src/tests/compat_parse.rs deleted file mode 100644 index be886f3..0000000 --- a/src/tests/compat_parse.rs +++ /dev/null @@ -1,41 +0,0 @@ -// WARNING -// This file was auto-generated using the `build_tests.py` script. -// Please do not edit it manually. - -use chrono::SecondsFormat; - -use parse; - -#[test] -fn test_python_compat() { - assert_eq!( - parse("2018.5.15".to_owned()) - .unwrap() - .to_rfc3339_opts(SecondsFormat::Micros, false), - "2018-05-15 04:00:00+00:00" - ); - assert_eq!( - parse("May 5, 2018".to_owned()) - .unwrap() - .to_rfc3339_opts(SecondsFormat::Micros, false), - "2018-05-05 04:00:00+00:00" - ); - assert_eq!( - parse("Mar. 5, 2018".to_owned()) - .unwrap() - .to_rfc3339_opts(SecondsFormat::Micros, false), - "2018-03-05 05:00:00+00:00" - ); - assert_eq!( - parse("19990101T23".to_owned()) - .unwrap() - .to_rfc3339_opts(SecondsFormat::Micros, false), - "1999-01-02 04:00:00+00:00" - ); - assert_eq!( - parse("19990101T2359".to_owned()) - .unwrap() - .to_rfc3339_opts(SecondsFormat::Micros, false), - "1999-01-02 04:59:00+00:00" - ); -} diff --git a/src/tests/compat_split_string.rs b/src/tests/compat_split_string.rs deleted file mode 100644 index cd3c9a5..0000000 --- a/src/tests/compat_split_string.rs +++ /dev/null @@ -1,50 +0,0 @@ -// WARNING -// This file was auto-generated using the `build_tests.py` script. -// Please do not edit it manually. - -use tokenize; - -#[test] -fn test_python_compat() { - assert_eq!( - tokenize("2018.5.15"), - vec![ - "2018".to_owned(), - ".".to_owned(), - "5".to_owned(), - ".".to_owned(), - "15".to_owned(), - ] - ); - assert_eq!( - tokenize("May 5, 2018"), - vec![ - "May".to_owned(), - " ".to_owned(), - "5".to_owned(), - ",".to_owned(), - " ".to_owned(), - "2018".to_owned(), - ] - ); - assert_eq!( - tokenize("Mar. 5, 2018"), - vec![ - "Mar".to_owned(), - ".".to_owned(), - " ".to_owned(), - "5".to_owned(), - ",".to_owned(), - " ".to_owned(), - "2018".to_owned(), - ] - ); - assert_eq!( - tokenize("19990101T23"), - vec!["19990101".to_owned(), "T".to_owned(), "23".to_owned()] - ); - assert_eq!( - tokenize("19990101T2359"), - vec!["19990101".to_owned(), "T".to_owned(), "2359".to_owned()] - ); -} diff --git a/src/tests/mod.rs b/src/tests/mod.rs deleted file mode 100644 index 83f972a..0000000 --- a/src/tests/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod compat_parse; -mod compat_split_string;