Move tests to pyo3

Codegen wasn't doing much for me
pull/1/head
Bradlee Speice 2018-05-27 13:51:23 -04:00
parent b2626d971a
commit 17b8b8af36
7 changed files with 35 additions and 173 deletions

View File

@ -7,4 +7,7 @@ authors = ["Bradlee Speice <bspeice@kcg.com>"]
chrono = "0.4"
lazy_static = "*"
num-traits = "0.2"
rust_decimal = "0.8"
rust_decimal = "0.8"
[dev-dependencies]
pyo3 = "0.2"

View File

@ -1,79 +0,0 @@
#import dateutil.parser._timelex.split as time_split
from dateutil.parser import _timelex
from dateutil.parser import parse as duparse
import pytz
# The TEST_STRINGS list should be the only thing that actually needs changing
TEST_STRINGS = [
'2018.5.15',
'May 5, 2018',
'Mar. 5, 2018',
'19990101T23',
'19990101T2359',
]
AUTOGEN_HEADER = '''
// WARNING
// This file was auto-generated using the `build_tests.py` script.
// Please do not edit it manually.
'''
S4 = ' ' * 4
S8 = ' ' * 8
S12 = ' ' * 12
def rust_tokenize(time_string):
split_array = _timelex.split(time_string)
return ['"{}".to_owned()'.format(token) for token in split_array]
def build_split_string_tests():
header = '''use tokenize;
#[test]
fn test_python_compat() {\n'''
tests = []
for test_string in TEST_STRINGS:
token_string = '\n'.join(['{}{},'.format(S12, s)
for s in rust_tokenize(test_string)])
tests.append(' assert_eq!(\n{}tokenize("{}"),\n{}vec![\n{}\n{}]\n{});'
.format(S8, test_string, S8, token_string, S8, S4))
body = '\n'.join(tests)
footer = '\n}\n'
return header + body + footer
def test_parse(time_string):
dt = duparse(time_string)
# TODO: Don't make this dependent on New_York
iso8601 = pytz.timezone('America/New_York').localize(dt).astimezone(pytz.utc)
return 'assert_eq!(\n{}parse("{}".to_owned())\n{}.unwrap()\n{}.to_rfc3339_opts(SecondsFormat::Micros, false),\n{}"{}"\n{});'.format(
S8, time_string, S12, S12, S8, iso8601, S4)
def build_parse_tests():
header = '''use chrono::SecondsFormat;
use parse;
#[test]
fn test_python_compat() {\n'''
asserts = [' {}'.format(test_parse(a)) for a in TEST_STRINGS]
body = '\n'.join(asserts)
footer = '\n}\n'
return header + body + footer
if __name__ == '__main__':
split_string_test = build_split_string_tests()
with open('src/tests/compat_split_string.rs', 'w+') as handle:
handle.write(AUTOGEN_HEADER + split_string_test)
parse_test = build_parse_tests()
with open('src/tests/compat_parse.rs', 'w+') as handle:
handle.write(AUTOGEN_HEADER + parse_test)

View File

@ -8,6 +8,9 @@ extern crate chrono;
extern crate num_traits;
extern crate rust_decimal;
#[cfg(test)]
extern crate pyo3;
use chrono::DateTime;
use chrono::Datelike;
use chrono::FixedOffset;

28
src/tests.rs Normal file
View File

@ -0,0 +1,28 @@
use pyo3::ObjectProtocol;
use pyo3::PyDict;
use pyo3::PyList;
use pyo3::PyObject;
use pyo3::Python;
use pyo3::FromPyObject;
macro_rules! test_split {
($py: ident, $timelex: ident, $s: expr, $expected: expr) => {
let f = $timelex.call_method1($py, "split", $s).unwrap();
let l: &PyList = f.extract($py).unwrap();
let s: Vec<String> = l.iter().map(|i| format!("{}", i)).collect();
assert_eq!(s, $expected);
};
}
#[test]
fn test_split() {
let gil = Python::acquire_gil();
let py = gil.python();
let module = py.import("dateutil.parser").unwrap();
let t: PyObject = module.get("_timelex").unwrap().extract().unwrap();
test_split!(py, t, "24, 50, ABC", vec!["24", ",", " ", "50", ",", " ", "ABC"]);
test_split!(py, t, "2018.5.15", vec!["2018", ".", "5", ".", "15"]);
}

View File

@ -1,41 +0,0 @@
// WARNING
// This file was auto-generated using the `build_tests.py` script.
// Please do not edit it manually.
use chrono::SecondsFormat;
use parse;
#[test]
fn test_python_compat() {
assert_eq!(
parse("2018.5.15".to_owned())
.unwrap()
.to_rfc3339_opts(SecondsFormat::Micros, false),
"2018-05-15 04:00:00+00:00"
);
assert_eq!(
parse("May 5, 2018".to_owned())
.unwrap()
.to_rfc3339_opts(SecondsFormat::Micros, false),
"2018-05-05 04:00:00+00:00"
);
assert_eq!(
parse("Mar. 5, 2018".to_owned())
.unwrap()
.to_rfc3339_opts(SecondsFormat::Micros, false),
"2018-03-05 05:00:00+00:00"
);
assert_eq!(
parse("19990101T23".to_owned())
.unwrap()
.to_rfc3339_opts(SecondsFormat::Micros, false),
"1999-01-02 04:00:00+00:00"
);
assert_eq!(
parse("19990101T2359".to_owned())
.unwrap()
.to_rfc3339_opts(SecondsFormat::Micros, false),
"1999-01-02 04:59:00+00:00"
);
}

View File

@ -1,50 +0,0 @@
// WARNING
// This file was auto-generated using the `build_tests.py` script.
// Please do not edit it manually.
use tokenize;
#[test]
fn test_python_compat() {
assert_eq!(
tokenize("2018.5.15"),
vec![
"2018".to_owned(),
".".to_owned(),
"5".to_owned(),
".".to_owned(),
"15".to_owned(),
]
);
assert_eq!(
tokenize("May 5, 2018"),
vec![
"May".to_owned(),
" ".to_owned(),
"5".to_owned(),
",".to_owned(),
" ".to_owned(),
"2018".to_owned(),
]
);
assert_eq!(
tokenize("Mar. 5, 2018"),
vec![
"Mar".to_owned(),
".".to_owned(),
" ".to_owned(),
"5".to_owned(),
",".to_owned(),
" ".to_owned(),
"2018".to_owned(),
]
);
assert_eq!(
tokenize("19990101T23"),
vec!["19990101".to_owned(), "T".to_owned(), "23".to_owned()]
);
assert_eq!(
tokenize("19990101T2359"),
vec!["19990101".to_owned(), "T".to_owned(), "2359".to_owned()]
);
}

View File

@ -1,2 +0,0 @@
mod compat_parse;
mod compat_split_string;