#!/usr/bin/python
#  Copyright (C) 2012 by Carnegie Mellon University.
#  
#  @OPENSOURCE_HEADER_START@
#  Use of Rayon and related source
#  code is subject to the terms of the following licenses:
#  
#  GNU Public License (GPL) Rights pursuant to Version 2, June 1991
#  Government Purpose License Rights (GPLR) pursuant to DFARS 252.227.7013
#  
#  NO WARRANTY
#  
#  ANY INFORMATION, MATERIALS, SERVICES, INTELLECTUAL PROPERTY OR OTHER 
#  PROPERTY OR RIGHTS GRANTED OR PROVIDED BY CARNEGIE MELLON UNIVERSITY 
#  PURSUANT TO THIS LICENSE (HEREINAFTER THE "DELIVERABLES") ARE ON AN 
#  "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY 
#  KIND, EITHER EXPRESS OR IMPLIED AS TO ANY MATTER INCLUDING, BUT NOT 
#  LIMITED TO, WARRANTY OF FITNESS FOR A PARTICULAR PURPOSE, 
#  MERCHANTABILITY, INFORMATIONAL CONTENT, NONINFRINGEMENT, OR ERROR-FREE 
#  OPERATION. CARNEGIE MELLON UNIVERSITY SHALL NOT BE LIABLE FOR INDIRECT, 
#  SPECIAL OR CONSEQUENTIAL DAMAGES, SUCH AS LOSS OF PROFITS OR INABILITY 
#  TO USE SAID INTELLECTUAL PROPERTY, UNDER THIS LICENSE, REGARDLESS OF 
#  WHETHER SUCH PARTY WAS AWARE OF THE POSSIBILITY OF SUCH DAMAGES. 
#  LICENSEE AGREES THAT IT WILL NOT MAKE ANY WARRANTY ON BEHALF OF 
#  CARNEGIE MELLON UNIVERSITY, EXPRESS OR IMPLIED, TO ANY PERSON 
#  CONCERNING THE APPLICATION OF OR THE RESULTS TO BE OBTAINED WITH THE 
#  DELIVERABLES UNDER THIS LICENSE.
#  
#  Licensee hereby agrees to defend, indemnify, and hold harmless Carnegie 
#  Mellon University, its trustees, officers, employees, and agents from 
#  all claims or demands made against them (and any related losses, 
#  expenses, or attorney's fees) arising out of, or relating to Licensee's 
#  and/or its sub licensees' negligent use or willful misuse of or 
#  negligent conduct or willful misconduct regarding the Software, 
#  facilities, or other rights or assistance granted by Carnegie Mellon 
#  University under this License, including, but not limited to, any 
#  claims of product liability, personal injury, death, damage to 
#  property, or violation of any laws or regulations.
#  
#  Carnegie Mellon University Software Engineering Institute authored 
#  documents are sponsored by the U.S. Department of Defense under 
#  Contract FA8721-05-C-0003. Carnegie Mellon University retains 
#  copyrights in all material produced under this contract. The U.S. 
#  Government retains a non-exclusive, royalty-free license to publish or 
#  reproduce these documents, or allow others to do so, for U.S. 
#  Government purposes only pursuant to the copyright license under the 
#  contract clause at 252.227.7013.
#  @OPENSOURCE_HEADER_END@
from __future__ import division

import sys
import re
import datetime
from datetime import timedelta
import calendar
from itertools import cycle, chain, izip, takewhile
from pprint import pprint

from rayon.rytools import *
from rayon import toolbox
from rayon.miscutils import infer_type_from_string, dedent, parse_color_string
from rayon.tickspec_parse import parse_tickspec
from rayon.datatransformers import Classify, Project, StReorder
from rayon.datautils import (
    percentile,
    population_statistics, 
    trend_data, 
    window_timeseries)
from rayon.backcompat import OrderedDict


from netsa.data.times import (
    make_datetime,
    make_timedelta,
    divmod_timedelta,
    bin_datetime)

from netsa.data.nice import nice_floor, nice_ceil

from netsa.data.format import num_prefix



# Because sometimes None is actually a value
NO_VALUE = Constant("NO_VALUE")

# Default values for the labels for the top and bottom sides of charts.
TOP = Constant("TOP")
BOTTOM = Constant("BOTTOM")

def run_some_tests(classname, testname):
    suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
    # for the test class
    subsuite = [t for t in suite if str(t).find(classname) != -1][0]
    newsuite = unittest.TestSuite(tests=[t for t in subsuite
                                         if str(t).find(testname) != -1])
    
    unittest.TextTestRunner(verbosity=2).run(newsuite)
    

def runtests():
    suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
    unittest.TextTestRunner(verbosity=2).run(suite)

def debug(msg):
    if rayon.DEBUG:
        tstamp = datetime.datetime.now()
        print "%s: %s" % (tstamp.strftime("%H:%M:%S"), msg)

def dbg_pprint(msg, d):
    if rayon.DEBUG:
        debug(msg)
        debug("-" * 25)
        pprint(d)
        debug("-" * 25)

def dbg_dataset(msg, d):
    if rayon.DEBUG:
        debug(msg)
        debug("-" * 25)
        print d.to_string()
        debug("-" * 25)
        

def pprint_compare(e, a):
    print "expected:"; pprint(e)
    print "actual:"; pprint(a)


def supplied(v): 
    return v is not NO_VALUE

def as_seconds(d):
    return calendar.timegm(d.timetuple())
    

def gen_time_ticks(start, end):
    dur = end - start
    
    if dur < timedelta(hours=25):
        tick_data = [make_datetime(s)
                     for s in xrange(int(as_seconds(start)),
                                     int(as_seconds(end)), 3600)]
        tick_data_minor = [d for d in tick_data if d.hour % 4 != 0]
        tick_data_major = [d for d in tick_data if d.hour % 4 == 0]
        tick_data_labeler = (lambda x: "%02d:%02d" % (x.hour, x.minute))
    elif dur < timedelta(days=15):
        tick_data = [make_datetime(s)
                     for s in xrange(int(as_seconds(start)),
                                     int(as_seconds(end)), 4*3600)]
        tick_data_minor = [d for d in tick_data if d.hour != 0]
        tick_data_major = [d for d in tick_data if d.hour == 0]
        tick_data_labeler = (lambda x: "%02d/%02d" % (x.month, x.day))
    elif dur < timedelta(days=61):
        tick_data = [make_datetime(s)
                     for s in xrange(int(as_seconds(start)),
                                     int(as_seconds(end)), 86400)]
        tick_data_minor = [d for d in tick_data if d.weekday() != 6]
        tick_data_major = [d for d in tick_data if d.weekday() == 6]
        tick_data_labeler = (lambda x: "%02d/%02d" % (x.month, x.day))
    else:
        tick_data = [make_datetime(s)
                     for s in xrange(int(as_seconds(start)),
                                     int(as_seconds(end)), 86400)]
        tick_data_minor = [d for d in tick_data if d.weekday() == 6]
        tick_data_major = [d for d in tick_data if d.day == 1]
        tick_data_labeler = (lambda x: "%04d/%02d" % (x.year, x.month))
    return (tick_data_minor, tick_data_major, tick_data_labeler)


######################################################################
# Working with charts_data
######################################################################

# charts_data is a list. Each element of the list describes a
# chart. 
#
#   charts_data = [chart, ...]
#
# Each chart has a string label and two sides (top and bottom). 
#
#   chart = (chart_lbl, top, bottom)
#
# Each side is a tuple of a string label and data. 
#
#   top = (top_lbl, top_data)
#   bottom = (bottom_lbl, bottom_data)
#
# Data is itself a tuple of time data (as a list) and value data (as a
# list of equal length to the time data).
#
#   top_data = (top_times, top_values)
#   bottom_data = (bottom_times, bottom_values)
#
#
# If Python had real types, we might be able to describe this as:
#
#  ChartsData := [Chart]
#  Chart := (Str, (Side, Side))
#  Side a := (Str, [Datetime], [a])
#
# where Str and Datetime are the Python str and datetime, [x] means "a
# list of items of type x", and (x, y) means "a tuple of types x and
# y". The "a" in "Side a := ..." is a type variable; the data for a
# side can be anything, but it must all be the same thing. (The data
# is later rejected if it is not numeric, but that happens elsewhere,
# in the main function.)
#
#
# As an example, this is how one iterates over a charts_data:
#
#   for (lbl, 
#        (top_lbl, top_times, top_vals),
#        (btm_lbl, btm_times, btm_vals)) in charts_data:
#       # ...

def apply_to_all_subcharts(fn, charts_data):
    # Applies *fn* to each subchart in *charts_data*. Returns a
    # structure identical to charts_data, but with each row replaced
    # by the result of applying *fn* to it.
    #
    # *fn* takes three arguments: the plot label, the top side and the
    # bottom side, and returns a triple containing a new label, new
    # top side and new bottom side.
    return tuple(tuple(fn(lbl, top, bottom)) 
                 for lbl, top, bottom in charts_data)

def map_series(fn, charts_data):
    # Map *fn* over each time series in charts_data, returning a tuple
    # of the individual results. 
    #
    # *fn* takes two arguments: *times* is a list
    # of datetime objects, and *values* is the corresponding list of
    # values.
    def _map():
        for lbl, top, bottom in charts_data:
            yield fn(top[1], top[2])
            yield fn(bottom[1], bottom[2])
    return tuple(_map())


#######################################################################
# Parsing input
######################################################################

# All the following functions take a Dataset and return a charts_data
# structure.


def parse_data_columnar(d, time_colname, top_colname, bottom_colname):
    col_dict = dict()
    labels = []

    if d.get_num_rows() == 0:
        return tuple()
    
    for name in d.get_column_names():
        if name == time_colname:
            continue
        lbl, direction = name.split("_")
        if lbl not in labels:
            labels.append(lbl)
        col_dict.setdefault(lbl, dict())[direction] = d.get_column(name)

    return tuple(
        (lbl,
         (top_colname,
          tuple(d.get_column(time_colname)),
          tuple(col_dict[lbl][top_colname])),
         (bottom_colname,
          tuple(d.get_column(time_colname)),
          tuple(col_dict[lbl][bottom_colname])))
        for lbl in labels)



re_operators = re.compile(r'(==)|(!=)')
re_colidx = re.compile(r'\[([0-9]+)\]')

def parse_filter_expr(expr):
    # Parse the filter expression *expr* and return a predicate that
    # satisfies it. The predicate takes a row and returns True if
    # satisfied, else False.
    def _infer(s):
        return infer_type_from_string(s)(s)

    def get_colidx(i):
        colidx_match = re_colidx.search(i)
        if colidx_match:
            return int(i[colidx_match.start(1):colidx_match.end(1)])
        else:
            return i

    oper_match = re_operators.search(expr)
    if not oper_match:
        raise RuntimeError("Malformed filter '%s'" % expr)
    else:
        lhs, oper, rhs = (get_colidx(expr[:oper_match.start()].strip()), 
                          expr[oper_match.start():oper_match.end()].strip(),
                          expr[oper_match.end():].strip())

        # TODO: check that rhs is bounded by quotation marks if it
        # contains spaces.

    if oper == "==":
        def _filter_eq(l, r):
            return lambda row: row[l] == r
        return _filter_eq(lhs, _infer(rhs))
    elif oper == "!=":
        def _filter_ne(l, r):
            return lambda row: row[l] != r 
        return _filter_ne(lhs, _infer(rhs))
    # Shouldn't happen
    else:
        raise RuntimeError("Operator '%s' not supported" % oper)

# Parsing relational-style data

# 4 steps: 
#
# 1. Filter the main dataset into the data to be plotted on the
# top and the data to be plotted on the bottom.
#
# 2. Eliminate the columns that aren't time_colname, top_colname
# or bottom_colname (as appropriate) or key_colnames.
#
# 3. Partition the data on the key to get smaller per-plot datasets.
#
# 4. Merge the top and bottom of each per-plot dataset.

# TODO: normalize top_columns/top_filter and
# bottom_column/bottom_filter so that if there's only one, it's in
# top_*, no matter what. This should be done in main()
#
# TODO: synthesize plot_label_map from --show and --labels



# What we're shooting for:
#
# OrderedDict([
#     # Double-sided chart
#     ('a', 
#      OrderedDict(
#          [('foo', 
#            Dataset.from_rows(
#                [(0, 1, 2), 
#                 (10, 20, 30)],
#                colnames="time val"))
#           ('bar', 
#            Dataset.from_rows(
#                [(4, 5, 6), 
#                 (40, 50, 60)],
#                colnames="time val"))]),
#      # Single-sided chart
#      ('b', 
#       OrderedDict(
#           [("" , 
#             Dataset.from_rows(
#                 [(0, 1, 4), 
#                  (10, 20, 10)],
#                 colnames="time val"))])))])
#
#
# # Which becomes:
#
# (('a', ('foo', (0, 1, 2), (10, 20, 30)), 
#        ('bar', (4, 5, 6), (40, 50, 60))),
#  ('b', ("" , (0, 1, 4), (10, 20, 10)),
#        ("" , (,)      , (,))))
def parse_data_relational(full_data, 
                          plot_label_map,
                          time_colname,
                          key_colnames, # i.e., --group-by
                          top_colname, 
                          bottom_colname, 
                          top_filter, 
                          bottom_filter, 
                          top_label, 
                          bottom_label):

        
    if not supplied(top_filter):
        # Default: get everything
        top_filter = lambda _: True
    else:
        top_filter = parse_filter_expr(top_filter)

    if not supplied(bottom_colname):
        # no bottom_colname == no bottom side
        bottom_filter = lambda r: False
    elif not supplied(bottom_filter):
        bottom_filter = lambda r: True
    else:
        bottom_filter = parse_filter_expr(bottom_filter)

    # Use constants to represent the default top and bottom
    # labels. The default in the arg parsing code is for both to be
    # "".
    if top_label == "":
        top_label = TOP
    if bottom_label == "":
        bottom_label = BOTTOM

    # plot_label_map is a set of pairs of values in key columns in the
    # incoming data and their corresponding display labels. Turn them
    # into an OrderedDict, which is what they should be anyway.
    if plot_label_map is not None:
        plot_label_map = OrderedDict(plot_label_map)




    # Use the order of labels in plot_label_map to determine display
    # order. If it's None, use alphabetical order.
    if plot_label_map is None:
        def chart_ordering(odict):
            return sorted(odict.keys())
    else:
        chart_ordering = [v for k, v in plot_label_map.items()]

    def _classify_by_chart(row):
        keyval = "/".join(str(row[x]) for x in key_colnames)
        
        # If we have a plot->label map, only return rows that can be
        # resolved in the map, and return the labe. Otherwise, return
        # every row, based on the raw value of the value in its
        # --group-by column
        if plot_label_map is None:
            return [keyval]
        else:
            if keyval in plot_label_map:
                return [plot_label_map[keyval]]

    classify_by_chart = Classify(_classify_by_chart, ordering=chart_ordering)


    def _classify_by_side(row):
        # A row can be in both top and bottom. (In that case, we're
        # usually using different columns for top and bottom value.)
        #
        # Also: only populate the bottom side if a bottom column name was
        # supplied
        if top_filter(row):
            if supplied(bottom_colname) and bottom_filter(row):
                return [top_label, bottom_label]
            else:
                return [top_label]
        elif supplied(bottom_colname) and bottom_filter(row):
            return [bottom_label]
        else:
            debug("dropping row: %s" % str([s for s in row]))

    
    def side_ordering(odict):
        return top_label, bottom_label

    classify_by_side = Classify(_classify_by_side, ordering=side_ordering)

    
    # Use different column names in the final output if we're on top
    # or bottom
    def _reorder(dset, state):
        label = state['path'][-1]
        if label == top_label:
            return [time_colname, top_colname]
        else:
            return [time_colname, bottom_colname]

    reorder_columns = StReorder(_reorder)
                                
    
    # Get the data in the native datatransformer format (a tree of
    # OrderedDicts and Datasets).
    chain = (classify_by_chart + classify_by_side + reorder_columns)

    processed_data = chain(full_data)

    # Convert to rytimeseries' native format (tuples of charts/sides/data)
    def gen_chart(chart_tree):
        debug("Keys of chart_tree are %s" % chart_tree.keys())
        for chart_name, chart in chart_tree.items():
            debug("Keys of chart are %s" % str(chart.keys()))
            top_side = (
                ("" if top_label == TOP else top_label),
                tuple(chart[top_label].get_column(0)),
                tuple(chart[top_label].get_column(1)))
            if bottom_label in chart:
                bottom_side = (
                    ("" if bottom_label == BOTTOM else bottom_label),
                    tuple(chart[bottom_label].get_column(0)),
                    tuple(chart[bottom_label].get_column(1)))
            else:
                bottom_side = ('', (), ())
                                
            yield (chart_name, top_side, bottom_side)

    return tuple(gen_chart(processed_data))
        

def OLDparse_data_relational(full_data, 
                          plot_label_map,
                          time_colname,
                          key_colnames,
                          top_colname, 
                          bottom_colname, 
                          top_filter, 
                          bottom_filter, 
                          top_label, 
                          bottom_label):
    """
    Split input data into charts and sides (top and bottom halfs of a
    chart). 

    *plot_label_map* is an ordered mapping of chart names in the data
    to labels to be used on the visualization. This specifies both the
    names and the order in which they appear on the visualization. If
    *plot_label_map* is ``None``, the names of the charts from the
    input data are used, in alphabetical order.
    """

    def _mangle_side(side_colname, side_filter):
        # Output is a dictionary of (key, dataset) pairs as with
        # partition(), but the key has been stringified so it can be
        # compared to plot_label_map.
        def _partition(d):
            if d.get_num_rows() == 0:
                return dict()
            else:
                key_colids = list(xrange(2, d.get_num_columns()))
                return dict(
                    ("/".join(str(i) for i in keyval), subdata) 
                    for keyval, subdata in d.partition(key_colids))
        def _eliminate(d):
            # Output is a dataset with columns in the following order
            # (kn == key column #n):
            #
            #   (time, value, k1, k2, ... kn)
            if d.get_num_rows() == 0:
                return d
            else:
                kept_colids = ((time_colname, side_colname) 
                                 + tuple(iter(key_colnames)))
                kept_columns = [d.get_column(i) for i in kept_colids]
                return rayon.data.Dataset.from_columns(kept_columns)
        def _filter():
            if supplied(side_filter):
                return full_data.filter_pass(parse_filter_expr(side_filter))
            else:
                return full_data
        # First, filter out extraneous rows. Then eliminate the
        # columns we don't care about (and order the
        # dataset columns). Finally, partition the remaining data based on the
        # key.
        return _partition(_eliminate(_filter()))

    # Note: Having no bottom_colname implies that there will be no
    # bottom data, (i.e., a single-sided plot). top_colname and
    # top_filter are processed normally.

    top_mangled = _mangle_side(top_colname, top_filter)

    if supplied(bottom_colname):
        bottom_mangled = _mangle_side(bottom_colname, bottom_filter)
    else:
        bottom_mangled = dict()

    # Format output
    if plot_label_map is None:
        # Create a plot_label map by merging key values from top and
        # bottom, sorted lexicographically.
        if bottom_mangled is None:
            plot_labels = set(top_mangled.keys())
        else:
            plot_labels = set(top_mangled.keys()) | set(bottom_mangled.keys())
        plot_label_map = tuple((k, k) for k in sorted(list(plot_labels)))

    def _row_gen():
        def _tuples_from_dataset(d, side_label):
            return (side_label,
                    tuple(d.get_column(0)), 
                    tuple(d.get_column(1)))
        for (keyval, label) in plot_label_map:
            try:
                top  = _tuples_from_dataset(top_mangled[keyval], 
                                            top_label)
            except KeyError:
                top = (top_label, tuple(), tuple())
            try:
                bottom = _tuples_from_dataset(bottom_mangled[keyval], 
                                              bottom_label)
            except KeyError:
                bottom = (bottom_label, tuple(), tuple())
            yield label, top, bottom

    return tuple(_row_gen())
        
        

def parse_data_functional(d, 
                          state_initializer,
                          parse_fn, 
                          top_lbl, 
                          btm_lbl):
    # parse_fn:: Row -> State -> ((Label, IsTop, Time, Value), State)
    # state_initializer:: () -> State

    def row_gen(dset):
        st = state_initializer()
        for row in dset:
            rc, st = parse_fn(row, st)
            if rc is not None:
                yield rc

    def format_row(lbl, subset, top_p):
        def times(_d): return tuple(_d.get_column('time'))
        def vals(_d) : return tuple(_d.get_column('val'))
        top, btm = subset.filter_both(top_p)
        return (lbl, 
                (top_lbl, times(top), vals(top)),
                (btm_lbl, times(btm), vals(btm)))

    if len(d) == 0:
        return tuple()
    else:
        new_d = rayon.data.Dataset.from_rows(
            row_gen(d), 
            colnames="lbl is_top time val".split())

        return tuple(format_row(lbl, subset, lambda r: r.is_top)
                     for lbl, subset in new_d.partition("lbl"))


def get_classifier(f, initializer_sym=None, classifier_sym=None):
    """
    Looks for the symbols required for functional data parsing in *f*,
    a file object that will be ``exec``'ed. *initializer_sym* is the
    name of the initialization function in the module, and
    *classifier_sym* is the name of the classifier.
    """
    def _symbol_getter(sym, default):
        def _get_symbol(env):
            try:
                return env[sym]
            except KeyError:
                if default is not None: 
                    return default
                else: 
                    raise NameError("No such symbol '%s' in '%s'" % 
                                    (sym, f.name))
        return _get_symbol
                
    if initializer_sym is None:
        get_initializer_fn = _symbol_getter("initialize", lambda: None)
    else:
        get_initializer_fn = _symbol_getter(initializer_sym, None)

    if classifier_sym is None:
        get_classifier_fn = _symbol_getter("classify", None)
    else:
        get_classifier_fn = _symbol_getter(classifier_sym, None)
    
    env = {}
    exec f in env
    return get_initializer_fn(env), get_classifier_fn(env)





# Trend lines

def get_trend_data(opts, raw_data):
    default_trend_arguments = {
        'kernel': {'output_size': 100},
        'ols': {},
        'moving_avg': {},
    }
    if not supplied(opts['trend_line']):
        return None
    else:
        if opts['trend_args'] is None:
            trend_args = default_trend_arguments[opts['trend_line']]
        else:
            trend_args = opts['trend_args']
        return trend_data(opts['trend_line'], raw_data,
                          **trend_args)

def new_trend_plot(opts, 
                   tools, 
                   time_data,
                   time_scale, 
                   value_data,
                   value_scale):
    seconds_data = [as_seconds(d) for d in time_data]
    raw_data = tools.new_dataset_from_columns([seconds_data, value_data])
    trended_data = get_trend_data(opts, raw_data)
    return tools.new_plot(
        'line',
        x_data = [make_datetime(s) for s in trended_data.get_column(0)],
        x_scale = time_scale,
        y_data = trended_data.get_column(1),
        y_scale = value_scale,
        line_color_scale = opts['trend_line_color'],
        line_width_scale = opts['trend_line_width'],
        line_style_scale = "solid")
        


# Variation (standard deviation) fields

def new_variation_plot(opts,
                       tools,
                       time_data,
                       time_scale,
                       value_data,
                       value_scale):
    debug("variation plot: value_scale.get_input_min() == %s" 
          % value_scale.get_input_min())
    debug("variation plot: value_scale.get_input_max() == %s" 
          % value_scale.get_input_max())
    new_times, new_vals = window_timeseries(
        time_data,
        value_data,
        lambda ts, vs: population_statistics(vs))
    if len(new_vals):
        means, _, stdevs = zip(*new_vals)
    else:
        means = tuple()
        stdevs = tuple()
    dbg_pprint("raw means[:20]", means[:20])
    dbg_pprint("raw stdevs[:20]", stdevs[:20])
    hi_stdevs = tuple(m + s for s, m in izip(stdevs, means))
    # TODO: this won't hold up for non-count data
    lo_stdevs = tuple(m - s for s, m in izip(stdevs, means))
    dbg_pprint("lo[:20]", lo_stdevs[:20])
    dbg_pprint("hi[:20]", hi_stdevs[:20])
    return tools.new_plot(
        'field',
        x_data = new_times,
        x_scale = time_scale,
        y1_data = hi_stdevs,
        y1_scale = value_scale,
        y2_data = lo_stdevs,
        y2_scale = value_scale,
        line_color_scale = opts['variation_line_color'],
        line_width_scale = opts['variation_line_width'],
        line_style_scale = opts['variation_line_style'],
        fill_color = opts['variation_field_color'])
        
    


# NOTE: It is possible in Rayon to plot data on many more axes than I
# am doing here. (E.g., putting data on marker size.). The argument
# list for this command is already huge, and I don't want to add to it
# unnecessarily, so that is not available in this tool. 
#
# TODO: Review with analysts and other users and determine if it is
# desirable to add.
            
    

def plot_as_points(opts, tools, side, x_scales, y_scale, top_bottom):
    label, times, values = side

    # Use the point scale
    x_scale = x_scales[0]

    # TODO: marker-size, marker-color and marker-style should all have
    # sensible defaults. marker-size-scale, marker-color-scale and
    # marker-style-scale should all have NO_VALUE by default. I may
    # add marker-*-args at some point in the future.

    lbl, times, values = side



 
    return tools.new_plot(
        'scatter',
        x_data=times,
        x_scale=x_scale,
        y_data=values,
        y_scale=y_scale,
        marker_size_scale=opts['marker_size'],
        marker_color_scale=opts['marker_color'],
        marker_shape_scale=opts['marker_shape'])



def plot_as_lines(opts, tools, side, time_scales, value_scale, top_bottom):
    time_scale = time_scales[0]
    lbl, time_data, value_data = side

    return tools.new_plot(
        'line',
        x_data = time_data,
        x_scale = time_scale,
        y_data = value_data,
        y_scale = value_scale,
        line_color_scale = opts['line_color'],
        line_width_scale = opts['line_width'],
        line_style_scale = opts['line_style'])


    
def plot_as_bars(opts, tools, side, time_scales, value_scale, top_bottom):
    # We use a ranged scale. Ranged scales are cool.
    time_scale = time_scales[1]
    lbl, time_data, value_data = side

    return tools.new_plot(
        'bar',
        x_data=time_data,
        x_scale=time_scale,
        y_data=value_data,
        y_scale=value_scale,
        bar_color_scale=opts['bar_fill_color'],
        bar_width_scale=opts['bar_width'],
        border_color_scale=opts['bar_border_color'],
        border_width_scale=opts['bar_border_width'],
        bar_fixup_scale=(len(time_data) <= 50))



def plot_as_filled_lines(
    opts, tools, side, time_scales, value_scale, top_bottom):
    time_scale = time_scales[0]
    lbl, time_data, value_data = side

    if top_bottom == 'top':
        fill_color = opts['top_field_color']
    elif top_bottom == 'bottom':
        fill_color = opts['bottom_field_color']
    else:
        raise ValueError("Odd value for top_bottom: '%s'" % top_bottom)
    

    return tools.new_plot(
        'filledline',
        x_data=time_data,
        x_scale=time_scale,
        y_data=value_data,
        y_scale=value_scale,
        line_color_scale = opts['line_color'],
        line_width_scale = opts['line_width'],
        line_style_scale = opts['line_style'],
        fill_color=fill_color)
        
        



def new_time_series_plot(opts, tools, side, x_scales, y_scale, top_bottom):
    if opts['style'] == 'bars':
        return plot_as_bars(
            opts, tools, side, x_scales, y_scale, top_bottom)
    elif opts['style'] == 'lines':
        return plot_as_lines(
            opts, tools, side, x_scales, y_scale, top_bottom)
    elif opts['style'] == 'dots':
        return plot_as_points(
            opts, tools, side, x_scales, y_scale, top_bottom)
    elif opts['style'] == 'filled_lines':
        return plot_as_filled_lines(
            opts, tools, side, x_scales, y_scale, top_bottom)
    else:
        # Shouldn't get here
        raise ValueError("Unknown plot style '%s'" % opts['style'])


def new_plots(opts, 
              tools, 
              lbl, 
              time_data, 
              time_scales, 
              value_data, 
              value_scale,
              top_bottom):
    """
    Return the main time series plot, plus the variation and trend
    line plots as appropriate.
    """
    time_point_scale, time_range_scale = time_scales
    
    plots = []
    # TODO: remove useless "side" abstraction from
    # new_time_series_plot, etc.
    plots.append(new_time_series_plot(opts,
                                      tools,
                                      (lbl, time_data, value_data), 
                                      time_scales, 
                                      value_scale,
                                      top_bottom))

    # Trend line
    if supplied(opts['trend_line']):
        plots.append(new_trend_plot(opts, 
                                    tools, 
                                    time_data, 
                                    time_point_scale, 
                                    value_data, 
                                    value_scale))
        
    # Variation field
    if supplied(opts['variation_field']):
        plots.append(new_variation_plot(opts, 
                                        tools, 
                                        time_data, 
                                        time_point_scale, 
                                        value_data, 
                                        value_scale))

    return plots




def get_precision(td_data, min_precision=0, max_precision=3):
    def _float_equals(a, b):
        return abs(a - b) < .00001

    if min_precision >= max_precision:
        precision = min_precision
    else:
        precision = min_precision
        for d in td_data:
            tmp = d
            if precision > 0:
                tmp = tmp * 10 * min_precision
            tmp_precision = min_precision
            while tmp_precision <= max_precision:
                if _float_equals(int(tmp), tmp):
                    break
                tmp = tmp * 10
                tmp_precision += 1
            precision = max(precision, tmp_precision)
            if precision == max_precision:
                break
    return precision

def get_tick_positions(col, scale, tick_fns):
    if len(col) == 0:
        return [], 0
    tick_pos = tuple(reduce(lambda a,b: a+b,
                            (f([col], scale) for f in tick_fns)))
    precision = get_precision(tick_pos)
    return tick_pos, precision

    



def get_time_ticksets(opts, tools, time_scale, format_for_bottom):
    """
    Returns the time ticksets to be used in the visualization as a
    2-tuple, or ``(None, None)`` if time ticksets won't be used.
    """
    # We'll only need these if the user wants a centerline drawn, or
    # if they want a vertical grid and haven't specified any other way
    # to draw it.
    will_use_time_ticksets = (
        opts['draw_centerline']
        or (opts['vgrid'] 
            and not (supplied(opts['vgrid_lines_at'])
                     or supplied(opts['vgrid_num_lines']))))

    if not will_use_time_ticksets:
        return None, None
    

    x_minor_ticks, x_major_ticks, x_labeler = gen_time_ticks(
        time_scale.get_input_min(), time_scale.get_input_max())


    def _time_tickmarker(size, spacing, angle, halign, valign, font_size):
        def _get_above():
            if format_for_bottom:
                return 0
            else:
                return 0
                #return size
        return tools.new_labeled_marker(
            marker=tools.new_marker('vline',
                                      below=size,
                                      above=_get_above(),
                                      width=2),
            labeler=tools.new_labeler(font_size=font_size,
                                        halign=halign,
                                        valign=valign,
                                        angle=angle),
            label_position='s',
            label_spacing=spacing)

    def _get_tick_tuples(ticks):
        if format_for_bottom:
            return tuple((t, x_labeler(t)) for t in ticks)
        else:
            return tuple((t, "") for t in ticks)

    def _get_major_tickset():
        marker = _time_tickmarker(
            opts['time_major_tick_size'],
            opts['time_major_tick_label_spacing'],
            opts['time_tick_label_angle'],
            opts['time_tick_label_halign'],
            opts['time_tick_label_valign'],
            opts['time_major_tick_label_size'])
        return tools.new_tickset_from_tuples(
                'default',
                tick_tuples=_get_tick_tuples(x_major_ticks),
                scale=time_scale,
                labeledmarker=marker)

    def _get_minor_tickset():
        marker = _time_tickmarker(
            opts['time_minor_tick_size'],
            opts['time_minor_tick_label_spacing'],
            opts['time_tick_label_angle'],
            opts['time_tick_label_halign'],
            opts['time_tick_label_valign'],
            opts['time_minor_tick_label_size'])
        return tools.new_tickset_from_tuples(
                'default',
                tick_tuples=_get_tick_tuples(x_minor_ticks),
                scale=time_scale,
                labeledmarker=marker)

    return _get_minor_tickset(), _get_major_tickset()

def get_horizontal_gridlines(opts, tools, value_data, value_scale):
    """
    Returns the Gridlines object corresponding to the horizontal
    gridlines, or ``None`` if no horizontal gridlines should be drawn.
    """
    if not opts['hgrid']:
        return None

    if supplied(opts['hgrid_lines_at']):
        hgrid_data, hgrid_precision = get_tick_positions(
            value_data, value_scale, opts['hgrid_lines_at'])
        hgrid_scale = value_scale
    else:
        # Plot the grid lines scaled 1:1 with the
        # visualization space, as they're being plotted
        # independently of the data.

        # Requirements:
        #  - Evenly subdivide the visualization space
        #  
        #  - Don't put lines on the top or bottom (There may be other
        #    lines there, like borders.)
        #
        # Example: if hgrid_num_lines == 2, put lines at 1/3 and 2/3

        hgrid_data = [(x + 1) / (opts['hgrid_num_lines'] + 1)
                      for x in xrange(opts['hgrid_num_lines'])]

        hgrid_scale = tools.new_scale('linear', 
                                      input_min=0,
                                      input_max=1,
                                      output_min=value_scale.get_output_min(),
                                      output_max=value_scale.get_output_max())
    return tools.new_gridlines(
        'horizontal',
        pos_data= hgrid_data,
        pos_scale=hgrid_scale,
        line_style_scale=opts['hgrid_style'],
        line_width_scale=opts["hgrid_width"],
        line_color_scale=opts["hgrid_color"])

def get_vertical_gridlines(
    opts, tools, time_scale, minor_time_tickset, major_time_tickset):
    """
    Return the Gridlines object corresponding to the vertical
    gridlines, or ``None`` if no vertical gridlines should be
    drawn. *minor_time_tickset* and *major_time_tickset* may be
    ``None``.
    """
    if not opts['vgrid']:
        return None

    def vgrid_data_from_num(num):
        return[(x + 1)/(num + 1) for x in xrange(num)]


    if supplied(opts['vgrid_lines_at']):
        # Use the user-supplied options
        vgrid_data = tuple(
            opts['vgrid_lines_at'].iter_tick_positions())
        vgrid_scale = time_scale
    elif supplied(opts['vgrid_num_lines']):
        # Plot the grid lines scaled 1:1 with the
        # visualization space, as they're being plotted
        # independently of the data.
        vgrid_data = vgrid_data_from_num(opts['vgrid_num_lines'])
        vgrid_scale = lambda x: x
    elif major_time_tickset is not None:
        # Use the major time ticks as grid lines. (Don't use the
        # first, as it will compete with the left border line.)
        vgrid_data = list(major_time_tickset.iter_tick_positions())[1:]
        vgrid_scale = time_scale
    else:
        # This really shouldn't happen. By rights I should throw an
        # exception or something, but....oh, just make it three.
        vgrid_data = vgrid_data_from_num(3)
        vgrid_scale = lambda x: x
    
    return tools.new_gridlines(
        'vertical',
        pos_data=vgrid_data,
        pos_scale=vgrid_scale,
        line_style_scale=opts['vgrid_style'],
        line_width_scale=opts["vgrid_width"],
        line_color_scale=opts["vgrid_color"])

def format_ticks(tick_positions, tick_format, value_units, precision):
    def autofloat(i):
        if precision == 0:
            return "%d" % i 
        else:
            return ("%%.%df" % precision) % i

    formatters = {
        'autofloat': autofloat,
        'metric'   : num_prefix,
        'binary'   : lambda i: num_prefix(i, use_binary=True)}        

    # Don't append units to the default formatter; instead provide it
    # as an argument to the format string.

    if tick_format in formatters:
        if value_units.strip():
            f = lambda i: formatters[tick_format](i) + value_units
        else:
            f = formatters[tick_format]
    else:
        f = lambda i: tick_format % {'value': i,
                                     'autofloat': autofloat(i),
                                     'units': value_units}
        
    return [(i, f(i)) for i in tick_positions]
    
        
    

def get_value_tickset(opts, tools, value_data, value_scale):
    y_tick_pos, y_precision = get_tick_positions(
        value_data, value_scale, opts['value_ticks'])

    tick_tuples = format_ticks(y_tick_pos,
                               opts['value_tick_label_format'],
                               opts['value_units'],
                               y_precision)

    y_tickmarker = tools.new_labeled_marker(
        marker=tools.new_marker('hline', 
                                  before=opts['value_tick_size'], 
                                  after=0,
                                  width=2),
        labeler=tools.new_labeler(font_size=opts['value_tick_label_size'],
                                    halign=opts['value_tick_label_halign'],
                                    valign=opts['value_tick_label_valign'],
                                    angle=opts['value_tick_label_angle']),
        label_position='w',
        label_spacing=opts['value_tick_label_spacing'])

    return tools.new_tickset_from_tuples(
        'default',
        tick_tuples=tick_tuples,
        scale=value_scale,
        labeledmarker=y_tickmarker)


def get_bottom_border(opts, 
                      tools, 
                      minor_time_tickset, 
                      major_time_tickset):
    # NOTE: minor_time_tickset and major_time_tickset can be None
    bottom_border = tools.new_border(
        'hline', 
        y_offset=.25,
        line=tools.new_line(opts['horizontal_border_line_style']))       

    if minor_time_tickset is not None and opts['time_minor_ticks'] == 'auto':
        bottom_border.add_tickset(minor_time_tickset)
    if major_time_tickset is not None and opts['time_major_ticks'] == 'auto':
        bottom_border.add_tickset(major_time_tickset)

    return bottom_border

def get_annotation_plot(opts, tools, side, time_scale, value_scale, style):
    lbl, time_data, value_data = side

    if (
        # Don't annotate if there's no data...
        len(value_data) == 0
        # ...or if we're told not to...
        or not (opts['annotate_max'] or opts['annotate_min'])
        # ...or it doesn't make sense for the visualization.
        or opts['style'] == 'bars'):
        return None
    else:
        debug("Adding annotations")
        def _find_first_instance_of(val):
            for t, v in izip(time_data, value_data):
                if val == v:
                    annotation = "%s, %d:%02d" % (
                        num_prefix(v, opts['value_units']),
                        t.hour, t.minute)
                    return t, v, annotation

        min_time, min_val, min_label = _find_first_instance_of(min(value_data))
        max_time, max_val, max_label = _find_first_instance_of(max(value_data))

        font_properties_template = {
                'font_size' : opts['annotation_label_size'],
                'color'     : opts['annotation_label_color'],
                'valign'    : 'center'}

        def _over_half_p(scale, val):
            # Return True if val is past the midpoint of the
            # scale. False if not, or if the scale is really a
            # function.
            if not hasattr(scale, "get_output_max"):
                return False
            if scale.get_output_max() == 0:
                return False
            return (scale(val) / scale.get_output_max() > .5)
            
        def font_properties_scale(observation):
            timestamp, value = observation
            props = dict(font_properties_template)
            if _over_half_p(time_scale, timestamp):
                props['halign'] = "right"
            else:
                props['halign'] = "left"
            return props

        def label_position_scale(observation):
            # Return "ne", "nw", "se" or "sw", depending on (1)
            # whether this is a top or bottom visualization, and (2)
            # whether the annotation is on a value to the left or
            # right of the midpoint of the visualization.
            timestamp, value = observation

            if style == 'top':
                northsouth = 'n'
            else:
                northsouth = 's'

            if _over_half_p(time_scale, timestamp):
                eastwest = 'w'
            else:
                eastwest = 'e'
            return northsouth + eastwest
            

        # Figure out which annotations to use
        x_data, y_data, text_data = [], [], []

        if opts['annotate_max']:
            x_data.append(max_time)
            y_data.append(max_val)
            text_data.append(max_label)

        if opts['annotate_min']:
            x_data.append(min_time)
            y_data.append(min_val)
            text_data.append(min_label)

        zipped_data = zip(x_data, y_data)

        return tools.new_plot(
            'labeledscatter',
            x_scale = time_scale,
            y_scale = value_scale,
            x_data = x_data,
            y_data = y_data,
            text_data = text_data,
            marker_color_scale = opts['annotation_marker_color'],
            marker_shape_scale = "circle",
            marker_size_scale = opts['annotation_marker_size'],
            label_position_data = zipped_data,
            label_position_scale = label_position_scale,
            label_spacing_scale = opts['annotation_label_spacing'],
            font_properties_data = zipped_data,
            font_properties_scale = font_properties_scale,
            bgcolor_scale = opts['annotation_label_background_color'])

        return ann_plot
        
        
    


def decorate_one_sided(opts, 
                       tools, 
                       subchart, 
                       side,
                       time_scale, # always the point scale, not the range scale
                       value_scale,
                       draw_bottom_border):
    """
    Add decorations to *subchart* as appropriate for a one-sided plot.
    """
    (lbl, time_data, value_data) = side

    # Annotations (Add blank borders in case the annotations float off
    # the plot)
    top_annotation = get_annotation_plot(
        opts, tools, side, time_scale, value_scale, 'top')
    if top_annotation is not None:
        subchart.add_top_border(
            tools.new_border('none'), height=.085)
        subchart.add_plot(top_annotation, "annotation")


    # Gridlines
    minor_time_tickset, major_time_tickset = get_time_ticksets(
        opts, tools, time_scale, True) 

    hgridlines = get_horizontal_gridlines(
        opts, tools, value_data, value_scale)
    if hgridlines is not None:
        subchart.add_rear_gridlines(hgridlines)

    vgridlines = get_vertical_gridlines(
        opts, tools, time_scale, minor_time_tickset, major_time_tickset)
    if vgridlines is not None:
        subchart.add_rear_gridlines(vgridlines)
            

    # Left border
    value_tickset = get_value_tickset(
        opts, tools, value_data, value_scale)

    left_border = tools.new_border(
        'vline', 
        ticksets=[value_tickset],
        x_offset=.25,
        line=tools.new_line(opts['vertical_border_line_style']))
        
    subchart.add_left_border(left_border, width=.1)

    # Bottom border
    if draw_bottom_border:
        subchart.add_bottom_border(
            get_bottom_border(opts, 
                              tools, 
                              minor_time_tickset, 
                              major_time_tickset),
            height=.1)





    
def decorate_two_sided(opts,
                       tools,
                       subchart,
                       top_side,
                       bottom_side,
                       time_scale, # point scale, not range scale
                       top_value_scale,
                       bottom_value_scale,
                       draw_bottom_border):
    (top_lbl, top_time_data, top_value_data) = top_side
    (bottom_lbl, bottom_time_data, bottom_value_data) = bottom_side

    # Annotations (Add blank borders in case the annotations float off
    # the plot)
    top_annotation = get_annotation_plot(opts, 
                                         tools, 
                                         top_side, 
                                         time_scale, 
                                         top_value_scale, 
                                         'top')
    if top_annotation is not None:
        subchart.add_top_border(
            tools.new_border('none'), height=.085)
        subchart.add_plot(top_annotation, "top_annotation")


    bottom_annotation = get_annotation_plot(opts, 
                                            tools, 
                                            bottom_side, 
                                            time_scale, 
                                            bottom_value_scale, 
                                            'bottom')
    if bottom_annotation is not None:
        subchart.add_bottom_border(
            tools.new_border('none'), height=.085)
        subchart.add_plot(bottom_annotation, "bottom_annotation")

    # Gridlines and ticked center line. (Both of which conditionally
    # depend on time ticksets.)

    minor_time_tickset, major_time_tickset = get_time_ticksets(
        opts, tools, time_scale, False) 

    hgridlines_top = get_horizontal_gridlines(
        opts, tools, top_value_data, top_value_scale)

    hgridlines_bottom = get_horizontal_gridlines(
        opts, tools, bottom_value_data, bottom_value_scale)

    if hgridlines_top is not None:
        subchart.add_rear_gridlines(hgridlines_top)
    if hgridlines_bottom is not None:
        subchart.add_rear_gridlines(hgridlines_bottom)

    
    vgridlines = get_vertical_gridlines(opts, 
                                        tools, 
                                        time_scale, 
                                        minor_time_tickset, 
                                        major_time_tickset)

    if vgridlines is not None:
        subchart.add_rear_gridlines(vgridlines)

    if (opts['draw_centerline'] and (None not in (minor_time_tickset,
                                                  major_time_tickset))):
        centerline = tools.new_gridlines(
            'horizontal',
            pos_data=[.5],
            pos_scale=lambda x: x,
            line_style_scale=opts['horizontal_border_line_style'],
            line_width_scale=2,
            line_color_scale="black",
            ticksets_scale=(minor_time_tickset, major_time_tickset))

        subchart.add_rear_gridlines(centerline)


        


    # Left border

    # -- The "proper" border

    top_y_tickset = get_value_tickset(
        opts, tools, top_value_data, top_value_scale)
    bottom_y_tickset = get_value_tickset(
        opts, tools, bottom_value_data, bottom_value_scale)
        
    left_border = tools.new_border(
        'vline',
        ticksets=[top_y_tickset, bottom_y_tickset],
        x_offset=.25,
        line=tools.new_line(opts['vertical_border_line_style']))

    # This guarantees the label border will overlap with the line
    # border. This is Okay(tm). (If fairly hacktastic.) The width of
    # the label border should be the one you want to adjust if you
    # want to put additional stuff outside the label/line border
    # complex.
    subchart.add_left_border(left_border, width="15px")


    # -- Labels

    top_label_border = tools.new_border("vlabel", 
                                        label=opts['top_label'],
                                        font_size="10px",
                                        halign="center",
                                        valign="bottom")

    bottom_label_border = tools.new_border("vlabel", 
                                           label=opts['bottom_label'],
                                           font_size="10px",
                                           halign="center",
                                           valign="bottom")


    uber_label_border = tools.new_border("vsplit", 
                                         children=[top_label_border,
                                                   bottom_label_border],
                                         rpad="1px")

    subchart.add_left_border(uber_label_border, width="1px")

    # Add a final border for spacing
    subchart.add_left_border(tools.new_border("none"), width=.1)
                                        


    if draw_bottom_border:
        # Draw bottom border like one-sided chart
        # TODO: Figure out what spacing that looks good 
        labeled_ticks = get_time_ticksets(
            opts, tools, time_scale, True) 

        subchart.add_bottom_border(
            get_bottom_border(opts, 
                              tools, 
                              labeled_ticks[0], 
                              labeled_ticks[1]),
            height=.1)

        

def split_outliers(value_max, value_min, time_data, value_data):
    # Splits time_data and value data on the thresholds value_min and
    # value_max. Returns a triple (inliers, low_outliers,
    # high_outliers) of pairs (time_data, value_data) containing the
    # time and value columns of the inliers and outliers.
    debug("Trimming data")
    # Swap values if min > max
    if (value_min is not None 
        and value_max is not None 
        and value_min > value_max):
        value_min, value_max = value_max, value_min

    if value_min is None:
        low_pred = lambda t, v: False
    else:
        low_pred = lambda t, v: v < value_min

    if value_max is None:
        high_pred = lambda t, v: False
    else:
        high_pred = lambda t, v: v > value_max

    low_outliers = []
    inliers = []
    high_outliers = []

    for t, v in zip(time_data, value_data):
        if low_pred(t, v):
            low_outliers.append((t, v))
        elif high_pred(t, v):
            high_outliers.append((t, v))
        else:
            inliers.append((t, v))

    def _restructure(d):
        # Convert list of (time, value) pairs to 2 columns of times
        # and values. (Or two empty lists if it's empty.)
        if len(d) == 0:
            return [[], []]
        else:
            return zip(*d)
    
    return (_restructure(inliers), 
            _restructure(low_outliers),
            _restructure(high_outliers))
    

def compute_y_scale_limits(computed_value_max,
                           computed_value_min,
                           inliers,
                           low_outliers,
                           high_outliers):
    if computed_value_min is None and computed_value_max is None:
        if len(inliers[1]):
            return max(inliers[1]), min(inliers[1])
        else:
            return 0, 0
    elif computed_value_min is None:
        if len(inliers[1]):
            y_min_val = min(inliers[1])
        else:
            # We have a computed max; come in under it.
            if computed_value_max > 0:
                y_min_val = 0
            else:
                # All solutions are bad here; 1 may be way too big of
                # a separation, but computing some epsilon based on
                # the data will yield changes to the lower bound of
                # the scale when the data changes, which is weird.
                #
                # This hack is as good as any for now. Perhaps make it
                # settable in the future. (Because this tool needs
                # more switches.)
                y_min_val = computed_value_max - 1
        return computed_value_max, y_min_val
    elif computed_value_max is None:
        if len(inliers[1]):
            y_max_val = max(inliers[1])
        else:
            # We have a computed min; come in above it
            if computed_value_min < 0:
                y_max_val = 0
            else:
                # See the lament directly above.
                y_max_val = computed_value_min + 1
        return y_max_val, computed_value_min
    else:
        return computed_value_max, computed_value_min





def draw_single(opts,
                tools,
                top_side, 
                bottom_side, 
                x_scales,
                draw_bottom_border):
 
    # Some of the plots (notably the bar plot) use a range scale for
    # the value (X) scale. x_scales is a double containing both scales
    # for the plot to use as needed.
    #
    # x_scales[0] == the point scale
    # x_scales[1] == the range scale

    def add_plots_to_subchart(side, value_scale, top_bottom):
        debug("Adding plots to subchart")
        (lbl, time_data, value_data) = side
        # Plot the main data sequence
        plots = new_plots(opts, 
                          tools, 
                          lbl, 
                          time_data, 
                          x_scales, 
                          value_data, 
                          value_scale, 
                          top_bottom)
        for p in plots:
            subchart.add_plot(p)

    def add_bings_to_subchart(outliers, scale_pos):
        # Add bings at the "top" of the chart (whatever scale_pos is)
        time_data, value_data = outliers
        debug("%d outliers" % len(value_data))
        bing_plot = tools.new_plot(
            'scatter',
            x_data=time_data,
            x_scale=x_scales[0],
            y_data=value_data,
            y_scale=scale_pos,
            marker_shape_scale=opts['outlier_marker_shape'],
            marker_color_scale=opts['outlier_marker_color'],
            marker_size_scale=opts['outlier_marker_size'])
        subchart.add_plot(bing_plot)
                                   
                                   
                                   


    def decompose(s1, s2):
        # Decompose the two sides s1 and s2 into "splits" (a pair
        # containing the new side with outliers extracted and those
        # extracted outliers). Return the splits for s1 and s2 and the
        # limits of the value (Y) scale.
        #
        # s2 may be None. If so, the split for s2 will also be None.
        #
        # If there are no inliers, the Y min and max may be None. It
        # is expected that the caller will replace them with
        # appropriate values (e.g., --value-min/max or the value of
        # --value-min/max-pct)
        def _decompose_side(s):
            lbl, time_data, val_data = s

            def _compute_min_threshold(fixedval, absval, pctval):
                if supplied(absval):
                    return absval
                elif supplied(pctval):
                    return percentile(sorted(val_data), pctval)
                elif supplied(fixedval) and fixedval > min(val_data):
                    return fixedval
                else:
                    return None

            def _compute_max_threshold(fixedval, absval, pctval):
                if supplied(absval):
                    return absval
                elif supplied(pctval):
                    return percentile(sorted(val_data), pctval)
                elif supplied(fixedval) and fixedval < max(val_data):
                    return fixedval
                else:
                    return None

            debug("Computing min value")
            computed_value_min = _compute_min_threshold(
                opts['fix_scale_min'],
                opts['value_min'], 
                opts['value_min_pct'])
            debug("Computing max value")
            computed_value_max = _compute_max_threshold(
                opts['fix_scale_max'],
                opts['value_max'], 
                opts['value_max_pct'])

            debug("Computed max value: %s" % computed_value_max)
            debug("Computed min value: %s" % computed_value_min)

            (inliers, 
             low_outliers, 
             high_outliers) = split_outliers(computed_value_max, 
                                             computed_value_min,
                                             time_data, 
                                             val_data)

            y_max_val, y_min_val = compute_y_scale_limits(
                computed_value_max,
                computed_value_min,
                inliers,
                low_outliers,
                high_outliers)
                            
            inlier_times, inlier_vals = inliers
            new_side = lbl, inlier_times, inlier_vals
 
            # Discard the low outliers, as they will not be plotted.
            return (new_side, high_outliers), y_max_val, y_min_val
        
        if s2 is None:
            split, y_max, y_min = _decompose_side(s1)
            return split, None, y_max, y_min
        else:
            splits, y_maxes, y_mins = zip(_decompose_side(s1),
                                          _decompose_side(s2))

            def maybe_minmax(minmax, pair):
                a, b = pair
                if a is None and b is None: 
                    return None
                elif a is None:
                    return b
                elif b is None: 
                    return a
                else: 
                    return minmax(pair)


            return (splits[0], 
                    splits[1], 
                    maybe_minmax(max, y_maxes), 
                    maybe_minmax(min, y_mins))

    def _draw_top_bottom(subchart):
        debug("Drawing top and bottom")
        # Draw both sides. The top plot has a scale going from the
        # midpoint of the space to the top. The bottom plot has a
        # scale going from the midpoint of the space to the bottom.
        top_split, bottom_split, y_max, y_min = decompose(top_side, bottom_side)

        # Override the data limits with fixed limits, if desired
        if supplied(opts['fix_scale_max']):
            y_max = opts['fix_scale_max']
        if supplied(opts['fix_scale_min']):
            y_min = opts['fix_scale_min']

        debug("decomposed output:")
        debug("y_max, y_min: %s, %s" % (y_max, y_min))

        trimmed_top_side, top_outliers = top_split
        trimmed_bottom_side, bottom_outliers = bottom_split

        # Compose the top side
        
        top_value_scale = tools.new_scale(opts['value_scale'],  
                                            input_max=y_max,
                                            input_min=y_min,
                                            output_max=1, 
                                            output_min=.5)
        add_plots_to_subchart(trimmed_top_side,
                              top_value_scale,
                              "top")

        if opts['plot_high_outliers']:
            add_bings_to_subchart(top_outliers, 
                                  top_value_scale.get_output_max())


        # Compose the bottom side

        bottom_value_scale = tools.new_scale(opts['value_scale'],
                                               input_max=y_max,
                                               input_min=y_min,
                                               output_max=0,
                                               output_min=.5)
        add_plots_to_subchart(trimmed_bottom_side,
                              bottom_value_scale,
                              "bottom")

        if opts['plot_high_outliers']:
            add_bings_to_subchart(bottom_outliers, 
                                  bottom_value_scale.get_output_max())


        decorate_two_sided(opts,
                           tools,
                           subchart,
                           trimmed_top_side,
                           trimmed_bottom_side,
                           x_scales[0],
                           top_value_scale,
                           bottom_value_scale,
                           draw_bottom_border)

    def _draw_top_only(subchart):
        debug("Drawing top only")
        # Just draw the top. One value scale, using all available
        # space.
        top_split, _, y_max, y_min = decompose(top_side, None)
        debug("decomposed output:")
        #dbg_pprint("top_split", top_split)
        debug("y_max, y_min: %s, %s" % (y_max, y_min))

        # Override the data limits with fixed limits, if desired
        if supplied(opts['fix_scale_max']):
            y_max = opts['fix_scale_max']
        if supplied(opts['fix_scale_min']):
            y_min = opts['fix_scale_min']

        trimmed_top_side, top_outliers = top_split

        value_scale = tools.new_scale(opts['value_scale'],
                                      input_min=y_min,
                                      input_max=y_max)
        add_plots_to_subchart(trimmed_top_side, value_scale, "top")

        if opts['plot_high_outliers']:
            add_bings_to_subchart(top_outliers,
                                  value_scale.get_output_max())

        decorate_one_sided(opts, 
                           tools, 
                           subchart, 
                           trimmed_top_side,
                           x_scales[0],
                           value_scale,
                           draw_bottom_border)

    subchart = tools.new_chart('square')

    if supplied(opts['bottom_column']):
        _draw_top_bottom(subchart)
    else:
        _draw_top_only(subchart)

    subchart.set_plot_background(opts['plot_background_color'])
    return subchart



def draw_multiple(opts, 
                  tools, 
                  charts_data, 
                  x_scales):
    def _add_bottom_border(single_chart):
        minor_time_tickset, major_time_tickset = get_time_ticksets(
            opts, tools, x_scales[0], True) 
        
        bottom_border = get_bottom_border(opts,
                                          tools,
                                          minor_time_tickset,
                                          major_time_tickset)
        
        # TODO: pad between this and rest of charts
        single_chart.add_bottom_border(bottom_border, height=.1)
        

    def _add_frame(lbl, single_chart):
        frame = tools.new_chart(
            'tiled_adv', col_weights=[1,5])

        lbl_chart = tools.new_chart('square')
        lbl_chart.add_left_border(
            tools.new_border('hlabel',
                               label=lbl,
                               font_size=opts['group_label_size'],
                               halign='right',
                               valign='center'),
            1)
        frame.add_chart(lbl_chart)
        frame.add_chart(single_chart)

        # Padding
        frame.set_padding(tpad="10px", bpad="10px")

        return frame
        

    single_charts = tuple((lbl, draw_single(opts, 
                                            tools, 
                                            top, 
                                            bottom, 
                                            x_scales,
                                            False))
                          for (lbl, top, bottom) in charts_data)

    if len(single_charts) == 0:
        pass
    elif len(single_charts) == 1:
        _, last_single_chart = single_charts[-1]
        if opts['draw_timeline']:
            debug("drawing timeline")
            _add_bottom_border(last_single_chart)
    else:
        _, last_single_chart = single_charts[-1]
        if opts['draw_timeline']:
            debug("drawing timeline")
            _add_bottom_border(last_single_chart)

    return tuple(_add_frame(lbl, single_chart)
                 for (lbl, single_chart) in single_charts)





def no_data_plot(tools, title, title_size, caption):
    # Draw a default chart for the case when there's absolutely no
    # data for anything, and no ranges have been supplied so we can't
    # even draw an empty plot
    chart = tools.new_chart("square")
    border = tools.new_border("hlabel", "No Data")
    chart.add_bottom_border(border, height=.6)
    if title != NO_VALUE:
        title = tools.new_border("hlabel", title, font_size=title_size)
    else:
        title = tools.new_border("none")
    chart.add_top_title(title, height=.2)
        
    if caption != NO_VALUE:
        caption = tools.new_border("hlabel", caption)
    else:
        caption = tools.new_border("none")
    chart.add_bottom_title(caption, height=.2)

    # TODO: draw







######################################################################
# Time binning
######################################################################

def choose_bin_size(start_time, end_time, opts_bin_size, target=150):
    # Minimum bin size is one minute. Beyond that, try to pick the
    # pretedermined interval that gets as close to t (t=150) bins as
    # possible.


    breaks = (
        make_timedelta("PT1S"),
        make_timedelta("PT5S"),
        make_timedelta("PT10S"),
        make_timedelta("PT15S"),
        make_timedelta("PT20S"),
        make_timedelta("PT30S"),
        make_timedelta("PT1M"),
        make_timedelta("PT2M"),
        make_timedelta("PT3M"),
        make_timedelta("PT5M"),
        make_timedelta("PT10M"),
        make_timedelta("PT15M"),
        make_timedelta("PT20M"),
        make_timedelta("PT30M"),
        make_timedelta("PT1H"),
        make_timedelta("PT2H"),
        make_timedelta("PT3H"),
        make_timedelta("PT4H"),
        make_timedelta("PT6H"),
        make_timedelta("PT8H"),
        make_timedelta("PT12H"),
        make_timedelta("P1D"),
        make_timedelta("P7D"),
        make_timedelta("P1M"),
        make_timedelta("P1Y"),
        )
    
    def _get_num_bins(dur, bin_size):
        zero_seconds = make_timedelta("P0D")
        div, mod = divmod_timedelta(dur, bin_size)
        if mod == zero_seconds: return div
        else                  : return div + 1

    def _gen_bins(dur, t):
        for bin_size in breaks:
            if bin_size > dur:
                # Since bins only get larger, there's no point in
                # continuing here.
                yield bin_size, 1
                break
            yield bin_size, _get_num_bins(dur, bin_size)

    def _search(dur, t):
        # Return the bin size that generates the closest to target bins
        cand = min(_gen_bins(dur, t), key=lambda (_, t): abs(target - t))
        return cand[0]

    if opts_bin_size != NO_VALUE:
        return opts_bin_size
    else:
        assert(end_time >= start_time)
        if end_time == start_time:
            # Heyo, a zero-width range! Give them the smallest bin
            # size for their (likely) one-element data series.
            return breaks[0]

        return _search(end_time - start_time, target)

def decipher_time_bounds(
    opts_start_time, 
    opts_end_time,
    data_start_time,
    data_end_time,
    is_binned,
    opts_bin_size):

    # NOTE: All the _time_bounds_* functions assume is_binned.

    def _time_bounds_fixed(bin_size):
        if opts_start_time > opts_end_time:
            raise ToolRuntimeError(
                "--start-time occurs after --end-time")
        else:
            binned_start = bin_datetime(bin_size, opts_start_time)
            binned_end = bin_datetime(bin_size, opts_end_time)
            if binned_start != opts_start_time or binned_end != opts_end_time:
                if supplied(opts_bin_size):
                    raise ToolRuntimeError(
                        "Can't cleanly bin --start-time and --end-time "
                        "into --bin-size")
                else:
                    raise ToolRuntimeError(
                        "Can't cleanly bin --start-time and --end-time")
            else:
                return opts_start_time, opts_end_time, bin_size


    def _time_bounds_fixed_start(bin_size):
        return (opts_start_time,
                bin_datetime(bin_size, 
                             data_end_time, 
                             opts_start_time) + bin_size,
                bin_size)

    def _time_bounds_fixed_end(bin_size):
        return (bin_datetime(bin_size, data_start_time, opts_end_time),
                opts_end_time,
                bin_size)

    def _time_bounds_unfixed(bin_size):
        return (bin_datetime(bin_size, data_start_time),
                bin_datetime(bin_size, data_end_time) + bin_size,
                bin_size)

    bin_size = choose_bin_size(data_start_time, 
                               data_end_time, 
                               opts_bin_size)

    
    if supplied(opts_start_time) and supplied(opts_end_time):
        if is_binned: return _time_bounds_fixed(bin_size)
        else        : return opts_start_time, opts_end_time, None
    elif data_start_time is None or data_end_time is None:
        return None, None, None
    elif supplied(opts_start_time):
        if is_binned: return _time_bounds_fixed_start(bin_size)
        else        : return opts_start_time, data_end_time, None
    elif supplied(opts_end_time):
        if is_binned: return _time_bounds_fixed_end(bin_size)
        else        : return data_start_time, opts_end_time, None
    else:
        if is_binned: return _time_bounds_unfixed(bin_size)
        else        : return data_start_time, data_end_time, None
        


class Putback(object):
    def __init__(self, head, tail):
        self.head = head
        self.tail = tail
    def internal_iter(self):
        for x in xrange(len(self.head)):
            yield self.head[x], self.head[x+1:], self.tail
        for j in self.tail:
            yield j, [], self.tail

    def __iter__(self):
        for i in self.head:
            yield i
        for i in self.tail:
            yield i

    def __repr__(self):
        return "_Putback(%s, %s)" % (str(self.head), str(self.tail))


def span(predicate, stream):
    """
    Partition a stream into a pair. The first element is a tuple
    containing the longest prefix (possibly empty) of *stream* whose
    elements satisfy *predicate*. The second element is the
    remainder of the stream, starting with the first element that does
    not satisfy *predicate*.

    *predicate* is a function that takes one argument (an item in the
    stream) and returns ``True`` or ``False`` to indicate if the
    predicate is satisfied.

    Inspired by the Haskell ``Data.List.span`` function.

    An example of use::

       >>> prefix, rest = span(lambda x: x == 1, (1,1,2,3,4))
       >>> prefix, tuple(rest)
       ((1, 1), (2, 3, 4))
    """
    acc = []
    if isinstance(stream, Putback):
        for head, tail, terminal in stream.internal_iter():
            if predicate(head):
                acc.append(head)
            else:
                return tuple(acc), Putback([head] + tail, terminal)
    else:
        # take an iterator; the "stream" may be a collection
        it = iter(stream)
        for i in it:
            if predicate(i):
                acc.append(i)
            else:
                return tuple(acc), Putback([i], it)

    # The whole stream passes the predicate
    return tuple(acc), tuple()








def rebin_single(time_col, val_col, start_time, end_time, bin_size):
    # rebin a single time series. 
    #
    # *start_time* and *end_time* are datetimes such that for all x in
    # the range of bins, ``start_time <= x < end_time``. The bins will
    # start on *start_time* exactly and end with the last bin that
    # does not contain end_time, assuming bin_size sizes. Callers
    # should ensure that *start_time* and *end_time* are aligned
    # appropriately.
    def _binned_pairs(b_start, b_end, b_size):
        # Generate a stream of (time, obs) pairs from the data in
        # time_col and val_col, using b_start, b_end and
        # b_size to compute the desired bins.
        def _bin_iter():
            # A generator of bins, as pairs (s, e). For all
            # observations o in the bin, s <= o < e.
            a = b = b_start
            b += b_size
            yield a, b
            while b < b_end:
                a = b
                b += b_size
                yield a, b

        def _bound(n): return lambda (t, _): t < n
        def _sum_pairs(acc, (_, v)): return acc + v
        
        pairs = izip(time_col, val_col)
        for s, e in _bin_iter():
            # Run off any leading observations that aren't in this bin
            # (bound(_s)), and capture all the rest of the
            # observations that are (bound(_e)).
            my_pairs, rest = span(
                _bound(e),
                span(_bound(s), pairs)[1])
            yield s, reduce(_sum_pairs, my_pairs, 0)
            pairs = rest

    # Roll the (time, val) pairs into a pair of columns (time, val),
    # like the input.

    the_pairs = tuple(_binned_pairs(start_time, end_time, bin_size))
    rc = tuple(zip(*the_pairs))
    return rc
    


def rebin_all(charts_data, start_time, end_time, bin_size):
    # Rebin the data in *charts_data* according to the arguments,
    # which are passed on to *rebin_single*
    def _to_apply(lbl, top, bottom):
        def _rebin_side(side, times, values):
            new_times, new_values = rebin_single(
                times, values, start_time, end_time, bin_size)
            return side, new_times, new_values
        return lbl, _rebin_side(*top), _rebin_side(*bottom)

    return apply_to_all_subcharts(_to_apply, charts_data)




def filter_all(charts_data, predicate):
    # Filter the data in *charts_data* according to
    # *predicate*. *predicate* takes a time and a value and returns
    # ``True`` if the value should be in the result set and ``False``
    # if it should be filtered.
    # 
    # Returns a *charts_data*-type structure in which all the data
    # failing *predicate* has been removed.
    def _to_apply(lbl, top, bottom):
        def _filter_side(side, time_col, val_col):
            if len(time_col) == 0:
                return side, tuple(), tuple()
            zipped = zip(time_col, val_col)
            filtered = filter(lambda x: predicate(*x), zipped)
            if len(filtered) == 0:
                new_times, new_values = tuple(), tuple()
            else:
                new_times, new_values = zip(*filtered)
            return side, new_times, new_values
        return lbl, _filter_side(*top), _filter_side(*bottom)

    return apply_to_all_subcharts(_to_apply, charts_data)

    

           
def pad_chart(chart, opts):
    def spec_from_opt(src):
        if opts[src] is not None:
            return "%dpx" % opts[src]
        else:
            return None

    pad_opts = dict()
    for argname, optname in (('allpad','padding'),
                             ('tpad', 'pad_top'), ('bpad', 'pad_bottom'),
                             ('lpad', 'pad_left'), ('rpad', 'pad_right')):
        if opts[optname] is not None:
            pad_opts[argname] = "%dpx" % opts[optname]

    if pad_opts.has_key('allpad') and len(pad_opts) > 1:
        raise ValueError("Cannot specify both --padding "
                         "and other --pad-* options")
    
    # Are any of the values set? Then use the user-supplied opts
    if len(pad_opts) == 0:
        chart.set_padding(lpad="25px", rpad="25px", tpad="15px", bpad="25px")
    else:
        chart.set_padding(**pad_opts)
    return

       
def main(opts):
    if opts['run_tests']:
        runtests()
        return

    debug("Checking options")
    #if opts['style'] in ('bars', 'filled_lines'):
    if opts['style'] == 'bars':
        if not supplied(opts['bin_size']):
            raise RuntimeError("Must specify --bin-size to draw '%s' style" %
                               opts['style'])
    else:
        debug("opts['style'] is %s" % opts['style'])
        debug("opts['bin_size'] is %s" % opts['bin_size'])
        debug("supplied(opts['bin_size']) is %s" % supplied(opts['bin_size']))

    debug("Starting")
    tools = toolbox.Toolbox.for_file()

    debug("Fetching data")
    debug("first_line_colnames is %s" % opts['first_line_colnames'])
    input_data = tools.new_dataset_from_stream(
        istream_from_str(opts['input_path']),
        first_line_is_colnames=opts['first_line_colnames'])

    debug("Column names: %s" % str(input_data.get_column_names()))


    
    # Sort data. Some of the logic below relies on the data being
    # sorted.

    if not opts['presorted_input']:
        # TODO: validate that input is sorted?
        debug("Input is unsorted. Sorting.")
        input_data.sort(key=lambda r: r[opts['time_column']])
    else:
        debug("Input is pre-sorted.")


    # Figure out how to parse data

    input_options_usage= dedent(
        """|Ambiguous options. Valid sets of classifying options are:
           |
           |   Relational:
           |      --group-by
           |      --top-column
           |      --bottom-column
           |      --top-filter
           |      --bottom-filter
           |
           |   Functional:
           |      --classifier-file
           |      --classifier-fn
           |      --initializer-fn
           |
           |   Legacy:
           |      --legacy
           |   
           |   For more info, see the rytimeseries man page.""")

    # If the user specifies only --bottom-column, it's an error. If
    # the user specified neither, assume they want --top-column=1, no
    # --bottom-column. (This is a basic 1-sided time series.)
    if not supplied(opts['top_column']):
        if not supplied(opts['bottom_column']):
            # Supplied neither
            opts['top_column'] = 1
        else:
            # Supplied bottom, but no top. Move bottom to top.
            raise ValueError(
                "Cannot specify --bottom-column without --top-column")


    if opts['legacy']:
        for o in """classifier_module
                    classifier_fn
                    initializer_fn
                    group_by
                    top_column
                    bottom_column
                    top_filter
                    bottom_filter""".split():
            if o in opts and supplied(opts[o]):
                raise ValueError(input_options_usage)

        debug("Parsing data with legacy parser")
        charts_data = parse_data_columnar(input_data, 
                                          opts['time_column'],
                                          opts['top_label'],
                                          opts['bottom_label'])
    elif supplied(opts['classifier_module']):
        debug("Parsing data with functional parser")
        for o in """group_by
                    top_column
                    bottom_column
                    top_filter
                    bottom_filter""".split():
            if o in opts and supplied(opts[o]):
                raise ValueError(input_options_usage)

        parser_file = open(opts['classifier_module'])

        try:
            init, classify = get_classifier(parser_file, 
                                            opts['classifer_fn'],
                                            opts['initializer_fn'])
        finally:
            parser_file.close()

        charts_data = parse_data_functional(input_data,
                                            init,
                                            classify,
                                            opts['top_label'],
                                            opts['bottom_label'])
    else: 
        debug("Parsing data with relational parser")

        if "classifier_fn" in opts and supplied(opts['classifier_fn']):
            raise ValueError(input_options_usage)
        if "initializer_fn" in opts and supplied(opts['initializer_fn']):
            raise ValueError(input_options_usage)
                                                    
        # Get mapping between subchart names (in input data) and
        # user-visible labels on the visualization (herewith, the
        # plot_label_map)
        if supplied(opts['show']):
            if supplied(opts['labels']):
                if len(opts['show']) != len(opts['labels']):
                    raise RuntimeError(
                        "If supplied, --labels must have equal length "
                        "to --show")
                else:
                    plot_label_map = zip(opts['show'], opts['labels'])
            else:
                plot_label_map = zip(opts['show'], opts['show'])
        else:
            if supplied(opts['labels']):
                print >>sys.stderr, (
                    "Warning: --labels will have no effect without --show")
            plot_label_map = None


        charts_data = parse_data_relational(input_data,
                                            plot_label_map,
                                            opts['time_column'],
                                            opts['group_by'],
                                            opts['top_column'],
                                            opts['bottom_column'],
                                            opts['top_filter'],
                                            opts['bottom_filter'],
                                            opts['top_label'],
                                            opts['bottom_label'])

    # Check types for charts_data. It would be more efficient to check
    # input_data, but all the default options and such are inferred in
    # the process of building charts_data, so we do it here.
    def _check_col_type(colname, col, types):
        if len(col) == 0:
            debug("no data in '%s'" % colname)
        elif not isinstance(col[0], types):
            raise TypeError("Unexpected type for %s column: '%s'" % 
                               (colname, type(col[0]).__name__))
        else:
            debug("%s is of type '%s', as expected" % 
                  (colname, type(col[0]).__name__))

    for (lbl, 
         (top_lbl, top_times, top_vals), 
         (btm_lbl, btm_times, btm_vals)) in charts_data:
        _check_col_type('top time', top_times, datetime.datetime)
        _check_col_type('top value', top_vals, (int, float))
        _check_col_type('bottom time', btm_times, datetime.datetime)
        _check_col_type('bottom value', btm_vals, (int, float))

    # Identify the earliest and latest points in the whole
    # dataset. (Note that we're relying here on the data having
    # already been sorted.)
    def _get_min_max(times, vals):
        if len(times) == 0: return None
        else              : return [times[0], times[-1]]

    time_bounds = map_series(_get_min_max, charts_data)
    data_time_start = min(t[0] for t in time_bounds if t is not None)
    data_time_end = max(t[1] for t in time_bounds if t is not None)


    debug("start: %s" % str(data_time_start))
    debug("end  : %s" % str(data_time_end))


    # If --bin-size is specified, input will be binned. If
    # --style=(bars|filled_lines), input must be binned, so --bin-size
    # is a required argument. Therefore, --bin-size must always be
    # specified if we have binned data.
    is_binned = supplied(opts['bin_size'])


    # Compute the default height based on the number of subcharts
    if not supplied(opts['height']):
        opts['height'] = opts['height_per_subchart'] * len(charts_data)

    # Generate a time scale to be shared across all the plots.


    start_time, end_time, bin_size = decipher_time_bounds(
        opts['start_time'], 
        opts['end_time'], 
        data_time_start, 
        data_time_end,
        is_binned,
        opts['bin_size'])


    if start_time is None:
        # No input data
        no_data_plot(tools, opts['title'], opts['caption'])
        return 0

    # TODO: It might be necessary to change how TimeBinnedScale
    # handles end times.
    if is_binned:
        time_range_scale = tools.new_scale('timebins',
                                             input_min=start_time,
                                             input_max=end_time,
                                             bin_size=bin_size)
        time_point_scale = time_range_scale.to_time_scale()
    else:
        time_range_scale = None
        time_point_scale = tools.new_scale('time',
                                             input_min=start_time,
                                             input_max=end_time)
        
    x_scales = (time_point_scale, time_range_scale)

    if is_binned:
        # Bin everything to make sure it's all on the same bin
        # size. (But skip if the user says everything's binned up
        # already.)

        if not opts['prebinned_input']:
            debug("Binning data")
            charts_data = rebin_all(charts_data,
                                    start_time=start_time,
                                    end_time=end_time,
                                    bin_size=bin_size)
        else:
            debug("Data is pre-binned")
    else:
        debug("Will not bin data")

    # if supplied(opts['value_min']) or supplied(opts['value_max']):
    #     debug("Trimming data")
    #     if supplied(opts['value_min']):
    #         min_pred = lambda t, v: v >= opts['value_min']
    #     else:
    #         min_pred = lambda t, v: True

    #     if supplied(opts['value_max']):
    #         max_pred = lambda t, v: v <= opts['value_max']
    #     else:
    #         max_pred = lambda t, v: True

    #     pred = lambda t, v: min_pred(t, v) and max_pred(t, v)
    #     charts_data = filter_all(charts_data, pred)
    # else:
    #     debug("Will not trim data")



    if len(charts_data) == 1:
        chart = tools.new_chart('square')
        if opts['draw_as_multiple']:
            # Draw as a multi-subchart visualization, with the title
            # and spacing on the left.
            subchart = draw_multiple(opts,
                                     tools,
                                     charts_data, 
                                     x_scales)[0]

        else:
            # Draw as a singleton visualization.
            label, top, bottom = charts_data[0]
            subchart = draw_single(opts,
                                   tools,
                                   top, 
                                   bottom, 
                                   x_scales,
                                   opts['draw_timeline'])
        chart.add_chart(subchart)
    else:
        # Many subcharts.
        chart = tools.new_chart('tiled',
                                  num_rows=len(charts_data))
        subcharts = draw_multiple(opts,
                                  tools,
                                  charts_data, 
                                  x_scales)

        for s in subcharts:
            chart.add_chart(s)

    

    # add title, caption, padding, etc.
    pad_chart(chart, opts)

    if supplied(opts['title']):
        title = tools.new_border('hlabel', 
                                 label=opts['title'],
                                 font_size=opts['title_size'], 
                                 bpad=.10, 
                                 valign="top")
        chart.add_top_title(title, height=.15)

    if supplied(opts['caption']):
        title = tools.new_border('hlabel', label=opts['caption'], tpad=.05)
        chart.add_bottom_title(title, height=.075)




    chart.set_chart_background(opts['chart_background_color'])


    # TODO: draw
    page = tools.new_page_from_filename(
        opts['output_path'], opts['width'], opts['height'])
    page.write(chart)


    return 0




##################################################
# Tests
##################################################

import rayon.data
import unittest
import textwrap
from StringIO import StringIO
import tempfile




origin = make_datetime("2000/01/01T00:00:00")


def seconds(n): return timedelta(seconds=n)
def minutes(n): return timedelta(minutes=n)
def hours(n): return timedelta(hours=n)
def days(n): return timedelta(days=n)
def weeks(n): return timedelta(weeks=n)


def dstr(d): return d.strftime("%Y/%m/%dT%H%M%SZ")


class StreamSource(object):
    def __init__(self, fn): self._fn = fn
    def __call__(self): return self._fn()
    def __add__(self, right):
        return StreamSource(fn=lambda: right(self()))
    def __iter__(self):
        return self()

class StreamProcessor(object):
    def __init__(self, fn): self._fn = fn
    def __call__(self, xs): return self._fn(xs)
    def __add__(self, right):
        return StreamProcessor(fn=lambda xs: right(self(xs)))

class every(StreamSource):
    def __init__(self, dur, starting=None):
        if isinstance(dur, basestring):
            dur = make_timedelta(dur)
        if starting is None:
            starting = origin
        else:
            starting = make_datetime(starting)
        def fn():
            # Python's scoping rules can go die in a fire
            new_starting = starting
            while True:
                yield new_starting
                new_starting += dur
        StreamSource.__init__(self, fn)

class cycling(StreamSource):
    def __init__(self):
        def fn(xs):
            return cycle(xs)
        StreamProcessor.__init__(self, fn)      


class where(StreamProcessor):
    def __init__(self, pred):
        def fn(xs):
            return (x for x in xs if pred(x))
        StreamProcessor.__init__(self, fn)

class until(StreamProcessor):
    # Terminate the stream when values are >= dur
    def __init__(self, dur, start=None):
        if isinstance(dur, basestring):
            dur = make_timedelta(dur)
        if start is None:
            end = origin + dur
        else:
            end = make_datetime(start) + dur
        def fn(xs):
            return takewhile(lambda x: x < end, xs)
        StreamProcessor.__init__(self, fn)

class until_including(StreamProcessor):
    # Terminate stream when values are > dur
    def __init__(self, dur, start=None):
        if isinstance(dur, basestring):
            dur = make_timedelta(dur)
        if start is None:
            end = origin + dur
        else:
            start + dur
        def fn(xs):
            return takewhile(lambda x: x <= end, xs)
        StreamProcessor.__init__(self, fn)

class excluding(StreamProcessor):
    def __init__(self, exclusions):
        # Exclusions must not be an infinite stream. (Easier to forget
        # than you might think.)
        exclusions = set(exclusions)
        def fn(xs):
            return (x for x in xs if x not in exclusions)
        StreamProcessor.__init__(self, fn)

class as_interval(StreamProcessor):
    # Transform stream from (a,b,c,d,...) to ((a,b),(b,c),(c,d),...)
    # TODO: (maybe) - support streams with 0 or 1 element
    def __init__(self):
        def fn(xs):
            i = iter(xs)
            first = i.next()
            for x in i:
                yield first, x
                first = x
        StreamProcessor.__init__(self, fn)

def time_tick_test(duration, major_ticks, minor_ticks, label, dbg=False):
    # The minor ticks should never include the major ticks.
    minor_ticks += excluding(major_ticks())
    sample_date = make_datetime("2000/01/02T12:34:56")
    def t(self):
        out_minor_ticks, out_major_ticks, labeler = gen_time_ticks(
            origin, origin + duration)
        for tickpair in ((minor_ticks(), out_minor_ticks),
                         (major_ticks(), out_major_ticks)):
            for exp, act in zip(*tickpair):
                self.assertEqual(exp, act)
        self.assertEqual(labeler(sample_date), label)
    return t

    
class t_gen_time_ticks(unittest.TestCase):

    test_hour_scale_1 = time_tick_test(
        duration=hours(24),
        major_ticks=every(hours(4)) + until(days(1)),
        minor_ticks=every(hours(1)) + until(days(1)),
        label="12:34")

    test_day_scale_1 = time_tick_test(
        duration=days(14),
        major_ticks=every(hours(24)) + until(days(14)),
        minor_ticks=every(hours(4)) + until(days(14)),
        label="01/02")

    test_month_scale_1 = time_tick_test(
        duration=days(60),
        major_ticks=(every(days(1))
                     + where(lambda d: d.weekday() == 6)
                     + until(days(60))),
        minor_ticks=every(days(1)) + until(days(60)),
        label="01/02")

    test_longer_scale_1 = time_tick_test(
        duration=days(120),
        major_ticks=(every(days(1))
                     + where(lambda d: d.day == 1)
                     + until(days(120))),
        minor_ticks=(every(days(1))
                     + where(lambda d: d.weekday() == 6)
                     + until(days(120))),
        label="2000/01")
           

 
def charts_data_from_parse_test_format(str_d):
    # Create dataset from special text input format used in parse
    # tests. Mostly this means handling some strings indicating no
    # data properly.
    #
    # Put another way, turns this:
    #
    #   """|a|foo|0,1,2|10,20,30|bar|4,5,6|40,50,60
    #      |b|   |0,1,4|10,20,10|   |     |        """
    #
    # Into this:
    #
    # (('a', ('foo', (0, 1, 2), (10, 20, 30)), 
    #        ('bar', (4, 5, 6), (40, 50, 60))),
    #  ('b', ("" , (0, 1, 4), (10, 20, 10)),
    #        ("" , (,)      , (,))))
    #
    # Note that this function only handles int values.
    def to_tuple(converter):
        def fn(i):
            try:
                if i.strip() == "":
                    return tuple()
                else:
                    return tuple(converter(x.strip()) 
                                 for x in i.split(","))
            except:
                print "oops"
                raise
        return fn
    d = rayon.data.Dataset.from_string(
        dedent(str_d),
        typemap=(str,
                 str, to_tuple(int), to_tuple(int),
                 str, to_tuple(int), to_tuple(int)))

    tuples = tuple(tuple(row) for row in d)
    # Divvy up into key, time series and data series
    return tuple((t[0], t[1:4], t[4:]) for t in tuples)



class t_parse_data_columnar(unittest.TestCase):
    class _(object):
        @staticmethod
        def passes_1(str_d_in, str_d_out, **kargs):
            d_in = rayon.data.Dataset.from_string(dedent(str_d_in))
            d_out = charts_data_from_parse_test_format(str_d_out)
            def test_(self):
                self.assertEqual(
                    d_out, 
                    parse_data_columnar(d_in, **kargs))
            return test_

    # Basic test
    test_1 = _.passes_1(
        """|#time|foo_in|foo_out
           |    1|    10|    100
           |    2|    20|    200
           |    3|    30|    300""",
        """|foo|in|1,2,3|10,20,30|out|1,2,3|100,200,300""",
        time_colname="time",
        top_colname="in",
        bottom_colname="out")
        
    # Works with more than one set of series
    test_multi_1 = _.passes_1(
        """|#time|foo_up|foo_down|bar_up|bar_down
           |    1|    10|     100|  1000|   10000
           |    2|    20|     200|  2000|   20000
           |    3|    30|     300|  3000|   30000""",
        """|foo|up|1,2,3|10,20,30      |down|1,2,3|100,200,300
           |bar|up|1,2,3|1000,2000,3000|down|1,2,3|10000,20000,30000""",
        time_colname="time",
        top_colname="up",
        bottom_colname="down")

    # Works with a different name and position for the time column
    test_time_col_1 = _.passes_1(
        """|#bar_black|bar_white|timestamp
           |         1|       10|      100
           |         2|       20|      200
           |         3|       30|      300""",
        """|bar|black|100,200,300|1,2,3|white|100,200,300|10,20,30""",
        time_colname="timestamp",
        top_colname="black",
        bottom_colname="white")

    test_empty_1 = _.passes_1(
        """|#t|foo_in|foo_out""",
        """ """,
        time_colname="t",
        top_colname="in",
        bottom_colname="out")

class t_parse_data_relational(unittest.TestCase):
    class _(object):
        @staticmethod
        def passes_1(str_d_in, str_d_out, **kargs):
            d_in = rayon.data.Dataset.from_string(dedent(str_d_in))
            d_out = charts_data_from_parse_test_format(str_d_out)
            def test_(self):
                rc = parse_data_relational(d_in, **kargs)
                if d_out != rc:
                    print "-----expected:"
                    pprint(d_out)
                    print "-----actual:"
                    pprint(rc)
                self.assertEquals(d_out, rc)
            return test_


    d_in_str_1 = """|#time|sip|sport|bytes|packets
                    |0    |a  |1    |10   |123
                    |1    |a  |1    |20   |234
                    |2    |a  |1    |30   |125
                    |0    |b  |1    |10   |175
                    |1    |b  |1    |20   |500
                    |4    |b  |2    |10   |42"""

    d_in_str_2 = """|#time|sip|sport|sensor|bytes|packets
                    |0    |a  |1    |A     |10   |123
                    |1    |a  |1    |A     |20   |234
                    |2    |a  |1    |A     |30   |125
                    |0    |a  |1    |B     |10   |123
                    |1    |a  |1    |B     |20   |234
                    |2    |a  |1    |B     |30   |195
                    |0    |b  |1    |A     |10   |175
                    |1    |b  |1    |A     |20   |500
                    |0    |b  |1    |B     |10   |175
                    |1    |b  |1    |B     |20   |502
                    |4    |b  |2    |A     |10   | 42
                    |4    |b  |2    |B     |10   | 42"""

    d_in_str_3 = """|#time|bytes
                    |0    |10   
                    |1    |20   
                    |2    |30   
                    |0    |10   
                    |1    |20   
                    |4    |10"""


    # Roll up the data on sip, top half shows 'bytes', no bottom half
    
    test_1half_1 = _.passes_1(
        d_in_str_1,
        """|a| |0,1,2|10,20,30| | | 
           |b| |0,1,4|10,20,10| | | """,
        time_colname="time",
        key_colnames=("sip",),
        plot_label_map=None,
        top_colname="bytes",
        bottom_colname=NO_VALUE,
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="",
        bottom_label="",)

    # Tests that bottom_filter is ignored if bottom_colname == NO_VALUE

    test_1half_2 = _.passes_1(
        d_in_str_1,
        """|a| |0,1,2|10,20,30| | | 
           |b| |0,1,4|10,20,10| | | """,
        time_colname="time",
        key_colnames=("sip",),
        plot_label_map=None,
        top_colname="bytes",
        bottom_colname=NO_VALUE,
        top_filter=NO_VALUE,
        bottom_filter=None, # NOTE: invalid value for bottom_filter
        top_label="",
        bottom_label="",)


    # Don't roll up the data, just put all of time and 'bytes' in top
    # half, no bottom half. (NOTE: no sorting, aggregating or deduping
    # takes place. There are multiple obervations for the same time
    # value, get over it.)

    test_1half_nokey_1 = _.passes_1(
        d_in_str_1,
        """| | |0,1,2,0,1,4|10,20,30,10,20,10| | | """,
        time_colname="time",
        key_colnames=tuple(),
        plot_label_map=None,
        top_colname="bytes",
        bottom_colname=NO_VALUE,
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="",
        bottom_label="")

    # Main usage scenario #1: Value data for the top and bottom are in
    # different columns.

    test_group_by_1 = _.passes_1(
        d_in_str_1,
        """|a|Bytes|0,1,2|10,20,30|Packets|0,1,2|123,234,125
           |b|Bytes|0,1,4|10,20,10|Packets|0,1,4|175,500,42""",
        time_colname="time",
        key_colnames=("sip",),
        plot_label_map=None,
        top_colname="bytes",
        bottom_colname="packets",
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="Bytes",
        bottom_label="Packets")


    # Main usage scenario #2: Value data for top and bottom are in the
    # same columns, but use different data via top-filter and
    # bottom-filter

    test_group_by_2 = _.passes_1(
        d_in_str_2,
        """|a|A|0,1,2|123,234,125|B|0,1,2|123,234,195
           |b|A|0,1,4|175,500,42 |B|0,1,4|175,502,42""",
        time_colname="time",
        key_colnames=("sip",),
        plot_label_map=None,
        top_colname="packets",
        bottom_colname="packets",
        top_filter="sensor==A", #lambda r: r.sensor == 'A',
        bottom_filter="sensor==B", #lambda r: r.sensor == 'B',
        top_label="A",
        bottom_label="B")

    test_empty_1 = _.passes_1(
        """|#t|foo|bar""",
        """ """,
        time_colname="t",
        key_colnames=("bar",),
        plot_label_map=None,
        top_colname="foo",
        bottom_colname=NO_VALUE,
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="",
        bottom_label="",)

    test_plot_label_map_1 = _.passes_1(
        d_in_str_1,
        """|Alpha|Bytes|0,1,2|10,20,30|Packets|0,1,2|123,234,125
           |Beta|Bytes|0,1,4|10,20,10|Packets|0,1,4|175,500,42""",
        time_colname="time",
        key_colnames=("sip",),
        plot_label_map=(('a', 'Alpha'), ('b', 'Beta')),
        top_colname="bytes",
        bottom_colname="packets",
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="Bytes",
        bottom_label="Packets")
        
    test_plot_label_map_2 = _.passes_1(
        d_in_str_1,
        """|Beta|Bytes|0,1,4|10,20,10|Packets|0,1,4|175,500,42
           |Alpha|Bytes|0,1,2|10,20,30|Packets|0,1,2|123,234,125""",
        time_colname="time",
        key_colnames=("sip",),
        plot_label_map=(('b', 'Beta'), ('a', 'Alpha')),
        top_colname="bytes",
        bottom_colname="packets",
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="Bytes",
        bottom_label="Packets")

    test_empty_params_1 = _.passes_1(
        d_in_str_3,
        """| | |0,1,2,0,1,4|10,20,30,10,20,10| | | """,
        time_colname=0,
        key_colnames=[],
        plot_label_map=None,
        top_colname=1,
        bottom_colname=NO_VALUE,
        top_filter=NO_VALUE,
        bottom_filter=NO_VALUE,
        top_label="",
        bottom_label="")
        
        
        
class t_parse_data_functional(unittest.TestCase):
    data_1 = rayon.data.Dataset.from_string(
        dedent("""|#name|rank|date|count
                  |aaron|maj |2000/01/01:00:00:00|1
                  |aaron|maj |2000/01/01:01:00:00|1
                  |aaron|maj |2000/01/01:02:00:00|1
                  |aaron|maj |2000/01/01:03:00:00|1
                  |abel |maj |2000/01/01:00:00:00|1
                  |abel |maj |2000/01/01:01:00:00|1
                  |barb |maj |2000/01/01:00:00:00|1
                  |barb |maj |2000/01/01:01:00:00|1
                  |barb |maj |2000/01/01:02:00:00|1
                  |bob  |prv |2000/01/01:00:00:00|1
                  |bob  |prv |2000/01/01:01:00:00|1
                  |bob  |prv |2000/01/01:02:00:00|2
                  |bob  |prv |2000/01/01:03:00:00|1""")) 

    def test_1(self):

        def parsing_function(row, state):
            # State is ignored
            def cell(lbl, half):
                return (lbl, half, row.date, row.count), None
            if row.name.startswith("a"):
                if row.rank == "maj": return cell("A", True)
                else                : return cell("A", False)
            elif row.name.startswith("b"):
                if row.rank == "maj": return cell("B", True)
                else                : return cell("B", False)

        expected = (("A", 
                     ("Majors", 
                      (make_datetime("2000/01/01:00:00:00"),
                       make_datetime("2000/01/01:01:00:00"),
                       make_datetime("2000/01/01:02:00:00"),
                       make_datetime("2000/01/01:03:00:00"),
                       make_datetime("2000/01/01:00:00:00"),
                       make_datetime("2000/01/01:01:00:00")),
                      (1, 1, 1, 1, 1, 1)),
                     ("Privates", tuple(), tuple())),
                    ("B",
                     ('Majors',
                      (make_datetime("2000/01/01:00:00:00"),
                       make_datetime("2000/01/01:01:00:00"),
                       make_datetime("2000/01/01:02:00:00")),
                       (1, 1, 1)),
                      ('Privates',
                       (make_datetime("2000/01/01:00:00:00"),
                        make_datetime("2000/01/01:01:00:00"),
                        make_datetime("2000/01/01:02:00:00"),
                        make_datetime("2000/01/01:03:00:00")),
                       (1, 1, 2, 1))))

        actual = parse_data_functional(
            self.data_1, lambda: None, parsing_function, "Majors", "Privates")

        self.assertEqual(expected, actual)

    def test_empty_1(self):
        def parsing_function(row, state):
            raise AssertionError("I shouldn't be called")
        
        self.assertEqual(
            tuple(),
            parse_data_functional(
                rayon.data.Dataset(), 
                lambda: None, 
                parsing_function, 
                "foo", 
                "bar"))
        

    def test_state_1(self):
        d = rayon.data.Dataset.from_string(
            dedent("""|#name|rank|date
                      |a |maj |2000/01/01:00:00:00
                      |a |min |2000/01/01:01:00:00
                      |b |maj |2000/01/01:02:00:00
                      |b |min |2000/01/01:03:00:00
                   """))


        def state_initializer():
            return 0

        def parsing_function(row, state):
            return ((row.name, row.rank == "maj", row.date, state),
                    state + 1)

        expected = (("a", 
                     ("Top", 
                      (make_datetime("2000/01/01:00:00:00"),), (0,)),
                     ("Bottom", 
                      (make_datetime("2000/01/01:01:00:00"),), (1,))),
                    ("b",
                     ('Top',
                      (make_datetime("2000/01/01:02:00:00"),), (2,)),
                     ('Bottom',
                      (make_datetime("2000/01/01:03:00:00"),), (3,))))

        actual = parse_data_functional(
            d, state_initializer, parsing_function, "Top", "Bottom")

        self.assertEqual(expected, actual)
            

    def test_error_1(self):
        d = rayon.data.Dataset.from_string(
            dedent("""|#name|rank|date
                      |a |maj |2000/01/01:00:00:00
                      |a |min |2000/01/01:01:00:00
                      |b |maj |2000/01/01:02:00:00
                      |b |min |2000/01/01:03:00:00
                   """))
        class FooError(Exception): pass
        def parsing_function(row, state): raise FooError()
        self.assertRaises(
            FooError,
            lambda: parse_data_functional(
                d, lambda: 0, parsing_function, "top", "bottom"))
            

    def test_error_2(self):
        d = rayon.data.Dataset.from_string(
            dedent("""|#name|rank|date
                      |a |maj |2000/01/01:00:00:00
                      |a |min |2000/01/01:01:00:00
                      |b |maj |2000/01/01:02:00:00
                      |b |min |2000/01/01:03:00:00
                   """))
        class FooError(Exception): pass
        def parsing_function(row, state): return ((1, 2, 3), 4)
        def state_initializer(): raise FooError()
        self.assertRaises(
            FooError,
            lambda: parse_data_functional(
                d, state_initializer, parsing_function, "top", "bottom"))

    def test_error_3(self):
        d = rayon.data.Dataset()
        class FooError(Exception): pass
        def parsing_function(row, state): raise FooError()
        def state_initializer(): raise FooError()
        self.assertEqual(
            tuple(),
            parse_data_functional(
                d, state_initializer, parsing_function, "top", "bottom"))






                

class t_get_classifier(unittest.TestCase):
    class _(object):
        @staticmethod
        def with_temp_file(contents, fn):
                tf = tempfile.TemporaryFile()
                try:
                    tf.write(dedent(contents))
                    tf.seek(0)
                    fn(tf)
                finally:
                    tf.close()
            

        @staticmethod
        def passes_1(kode, 
                     init_sym=None, 
                     klass_sym=None,
                     init_val=0,
                     klass_val=((1, 2, 3), 4)):
            def test_(self):
                def _do_test(tf):
                    # NOTE: get_classifier doesn't check that the
                    # symbols returned are in any way suitable for use
                    # in parse_data_functional. We're writing and
                    # calling them as if they are for convenience.
                    initializer, classifier = get_classifier(
                        tf, init_sym, klass_sym)
                    self.assertEqual(init_val, initializer())
                    self.assertEqual(klass_val, classifier(None, None))
                self._.with_temp_file(kode, _do_test)
            return test_

        @staticmethod
        def fails_1(kode, exc, init_sym=None, klass_sym=None):
            def test_(self):
                def _do_test(tf):
                    self.assertRaises(
                        exc,
                        lambda: get_classifier(tf, init_sym, klass_sym))
                self._.with_temp_file(kode, _do_test)
            return test_
        
                    

                    

    test_1 = _.passes_1(
        """|def classify(row, state): return (1, 2, 3), 4
           |def initialize(): return 0""")

    test_custom_names_1 = _.passes_1(
        """|def cla(row, state): return (1, 2, 3), 4
           |def ini(): return 0""",
        init_sym="ini",
        klass_sym="cla")

    test_custom_names_2 = _.passes_1(
        """|def ini(): return 0
           |def initialize(): raise RuntimeError()
           |def classify(row, state): return (1, 2, 3), 4""",
        init_sym="ini")

    test_custom_names_3 = _.passes_1(
        """|def cla(row, state): return (1, 2, 3), 4
           |def initialize(): return 0
           |def classify(row, state): raise RuntimeError()""",
        klass_sym="cla")

    test_error_1 = _.fails_1(
        "no way in hell this is gonna parse",
        SyntaxError)

    test_error_2 = _.fails_1(
        """|# valid, but no symbols
           |""",
        NameError)

    # If and only if initialize doesn't exist and an initializer symbol
    # wasn't supplied, supply a default. Otherwise, raise NameError
    test_missing_symbols_1 = _.passes_1(
        """|# valid, but missing initializer
           |def classify(row, state): return ((1, 2, 3), 4)
           |""",
        init_val=None)
        
    test_missing_symbols_2 = _.fails_1(
        """|# valid, but missing classifier
           |def initialize(): return 0
           |""",
        NameError)

    test_missing_symbols_3 = _.fails_1(
        """|# valid, but missing custom names
           |def classify(row, state): return ((1, 2, 3), 4)
           |def initialize(): return 0
           |""",
        NameError,
        init_sym="wrong")
    
    test_missing_symbols_4 = _.fails_1(
        """|# valid, but missing custom names
           |def classify(row, state): return ((1, 2, 3), 4)
           |def initialize(): return 0
           |""",
        NameError,
        klass_sym="wrong")

    test_missing_symbols_5 = _.fails_1(
        """|# valid, but missing custom names
           |def classify(row, state): return ((1, 2, 3), 4)
           |def initialize(): return 0
           |""",
        NameError,
        init_sym="wrong",
        klass_sym="also_wrong")


def date_xrange(num,
             start=make_datetime("2000/01/01:00:00:00"), 
             step="PT1H"):
    d = make_datetime(start)
    s = make_timedelta(step)
    yield d
    for _ in xrange(num):
        d += s
        yield d                      
                          


class t_split_outliers(unittest.TestCase):
    # TODO: This test should probably be rewritten. Right now, the
    # test function pastes time data onto the input sequence. This is
    # because the time is really incidental; it's along for the ride,
    # but the function does nothing with it. So we ought to test that
    # it's kept along, but that's about it.
    #
    # I thought it would make things simple if the caller didn't have
    # to deal with time, but I think I was wrong; the result is a
    # little weird, in that changing the partitions changes the
    # output. Strictly speaking this is okay, since all the input
    # should show up in one of the three partitions. Nevertheless, it
    # should probably be rewritten so the input test data includes the
    # times, and the test input is sent in independently of the
    # partitions.
    class _(object):
        @staticmethod
        def passes_1(value_min, 
                     value_max, 
                     inliers, 
                     low_outliers, 
                     high_outliers):
            def test_(self):
                # Add time values in such a way that we can verify the
                # time->value mappings are correctly maintained.
                time_data_iter = date_xrange(len(low_outliers) +
                                             len(inliers) +
                                             len(high_outliers))
                def paste_times_on(seq):
                    return list(izip(time_data_iter, seq))
                low_pairs  = paste_times_on(low_outliers)
                in_pairs     = paste_times_on(inliers)
                high_pairs = paste_times_on(high_outliers)
        
                
                # Input is a single set of columnized data
                time_data, value_data = zip(*(low_pairs
                                              + in_pairs 
                                              + high_pairs))
        
                rc = split_outliers(value_max, 
                                    value_min, 
                                    time_data, 
                                    value_data)
        
                for expected, actual in zip((in_pairs, 
                                             low_pairs, 
                                             high_pairs),
                                            rc):
                    # The expected data is row-major. Sort it and columnize it.
                    expected_as_cols = zip(*(sorted(expected)))
                    # The actual data is column-major. Rowize it, sort it and
                    # re-columnize it.
                    actual_as_cols = zip(*(sorted(zip(*actual))))
                    self.assertEqual(expected_as_cols, actual_as_cols)

            return test_
            
    
    example_1 = list(xrange(100))
                    
    # Unlike slice notation, both value_min and value_max are
    # inclusive; both 10 and 90 are inliers.
    test_1 = _.passes_1(10, 90,
                        example_1[10:91],
                        example_1[:10],
                        example_1[91:])

    # If min > max, they are switched.
    test_2 = _.passes_1(90, 10,
                        example_1[10:91],
                        example_1[:10],
                        example_1[91:])

    # No lower bound
    test_3 = _.passes_1(None, 90,
                        example_1[:91],
                        [],
                        example_1[91:])

    # No upper bound
    test_4 = _.passes_1(10, None,
                        example_1[10:],
                        example_1[:10],
                        [])

    # No bounds
    test_5 = _.passes_1(None, None,
                        example_1,
                        [],
                        [])

    # max == min
    test_6 = _.passes_1(50, 50,
                        [example_1[50]],
                        example_1[:50],
                        example_1[51:])

    test_7 = _.passes_1(50.5, 50.5,
                        [],
                        example_1[:51],
                        example_1[51:])


class t_compute_y_scale_limits(unittest.TestCase):
    # The first item in the in/outlier tuples is a column of time
    # values. They aren't used, and don't figure in the return value,
    # so they're mocked up here as None.

    def test_1(self):
        self.assertEqual(
            (20, 10),
            compute_y_scale_limits(
                20, 10, 
                (None, [11, 12, 13]), 
                (None, [0, 1, 2]), 
                (None, [21, 22, 23])))
            
    def test_2(self):
        self.assertEqual(
            (13, 10),
            compute_y_scale_limits(
                None, 10, 
                (None, [11, 12, 13]), 
                (None, [0, 1, 2]), 
                (None, [])))
            
    def test_2b(self):
        self.assertEqual(
            (13, 10),
            compute_y_scale_limits(
                None, 10, 
                (None, [11, 12, 13]), 
                (None, []), 
                (None, [])))
            
    def test_2c(self):
        self.assertEqual(
            (11, 10),
            compute_y_scale_limits(
                None, 10, 
                (None, []), 
                (None, []), 
                (None, [])))
            
    def test_2d(self):
        self.assertEqual(
            (11, 10),
            compute_y_scale_limits(
                None, 10, 
                (None, []), 
                (None, []), 
                (None, [-1, -2, -3, 4])))
            
    def test_2e(self):
        self.assertEqual(
            (0, -0.5),
            compute_y_scale_limits(
                None, -0.5, 
                (None, []), 
                (None, []), 
                (None, [-1, -2, -3])))
            
    def test_2f(self):
        self.assertEqual(
            (1, 0),
            compute_y_scale_limits(
                None, 0, 
                (None, []), 
                (None, []), 
                (None, [-1, -2, -3])))
            
    def test_3(self):
        self.assertEqual(
            (20, 11),
            compute_y_scale_limits(
                20, None, 
                (None, [11, 12, 13]), 
                (None, []), 
                (None, [21, 22, 23])))
            
    def test_3b(self):
        self.assertEqual(
            (20, 11),
            compute_y_scale_limits(
                20, None, 
                (None, [11, 12, 13]), 
                (None, []), 
                (None, [])))
            
    def test_3c(self):
        self.assertEqual(
            (20, 0),
            compute_y_scale_limits(
                20, None, 
                (None, []), 
                (None, []), 
                (None, [])))
            
    def test_3d(self):
        self.assertEqual(
            (20, 0),
            compute_y_scale_limits(
                20, None, 
                (None, []), 
                (None, []), 
                (None, [21, 22, 23, 24])))
            
    def test_3e(self):
        self.assertEqual(
            (0, -1),
            compute_y_scale_limits(
                0, None, 
                (None, []), 
                (None, []), 
                (None, [21, 22, 23, 24])))
            

    def test_4(self):
        self.assertEqual(
            (13, 11),
            compute_y_scale_limits(
                None, None, 
                (None, [11, 12, 13]), 
                (None, []), 
                (None, [])))
            


class t_choose_bin_size(unittest.TestCase):
    class _(object):
        @staticmethod
        def passes_1(start_time, end_time, opts_bin_size, out):
            start_time = make_datetime(start_time)
            end_time = make_datetime(end_time)
            out = make_timedelta(out)
            def test_(self):
                self.assertEqual(
                    out,
                    choose_bin_size(start_time, end_time, opts_bin_size))
            return test_

        @staticmethod
        def passes_2(in_range, out_bin_size):
            start_time = make_datetime("2000/01/01T00:00:00")
            end_time = start_time + make_timedelta(in_range)
            out = make_timedelta(out_bin_size)
            def test_(self):
                self.assertEqual(
                    out,
                    choose_bin_size(start_time, end_time, NO_VALUE))
            return test_

        @staticmethod
        def fails(start_time, end_time, opts_bin_size, err):
            start_time = make_datetime(start_time)
            end_time = make_datetime(end_time)
            def test_(self):
                self.assertRaises(
                    lambda: choose_bin_size(start_time, end_time, NO_VALUE),
                    Assertionerror)

            

    test_option_supplied_1 = _.passes_1(
        "2000/01/01T00:00:00", "2010/01/01T00:00:00", make_timedelta("PT1H"),
        "PT1H")

    test_start_before_end_1 = _.fails(
        "2000/01/01T00:00:00", "2000/01/01T00:00:00", NO_VALUE, AssertionError)
    test_start_before_end_2 = _.fails(
        "2001/01/01T00:00:00", "2000/01/01T00:00:00", NO_VALUE, AssertionError)

    test_01 = _.passes_2("PT1M"    , "PT1S")
    test_02 = _.passes_2("PT2M"    , "PT1S")
    test_03 = _.passes_2("PT5M"    , "PT5S")
    test_04 = _.passes_2("PT10M"   , "PT5S")
    test_05 = _.passes_2("PT15M"   , "PT5S")
    test_06 = _.passes_2("PT20M"   , "PT10S")
    test_07 = _.passes_2("PT25M"   , "PT10S")
    test_08 = _.passes_2("PT30M"   , "PT10S")
    test_09 = _.passes_2("PT35M"   , "PT15S")
    test_10 = _.passes_2("PT40M"   , "PT15S")
    test_11 = _.passes_2("PT45M"   , "PT20S")
    test_12 = _.passes_2("PT50M"   , "PT20S")
    test_13 = _.passes_2("PT55M"   , "PT20S")
    test_14 = _.passes_2("PT1H"    , "PT20S")
    test_15 = _.passes_2("PT1H30M" , "PT30S")
    test_16 = _.passes_2("PT2H"    , "PT1M")
    test_17 = _.passes_2("PT3H"    , "PT1M")
    test_18 = _.passes_2("PT4H"    , "PT2M")
    test_19 = _.passes_2("PT5H"    , "PT2M")
    test_20 = _.passes_2("PT6H"    , "PT2M")
    test_21 = _.passes_2("PT8H"    , "PT3M")
    test_22 = _.passes_2("PT10H"   , "PT5M")
    test_23 = _.passes_2("PT12H"   , "PT5M")
    test_24 = _.passes_2("PT14H"   , "PT5M")
    test_25 = _.passes_2("PT16H"   , "PT5M")
    test_26 = _.passes_2("PT18H"   , "PT10M")
    test_27 = _.passes_2("PT20H"   , "PT10M")
    test_28 = _.passes_2("PT22H"   , "PT10M")
    test_29 = _.passes_2("P1D"     , "PT10M")
    test_30 = _.passes_2("PT36H"   , "PT15M")
    test_31 = _.passes_2("P2D"     , "PT20M")
    test_32 = _.passes_2("P3D"     , "PT30M")
    test_33 = _.passes_2("P4D"     , "PT30M")
    test_34 = _.passes_2("P5D"     , "PT1H")
    test_35 = _.passes_2("P6D"     , "PT1H")
    test_36 = _.passes_2("P7D"     , "PT1H")
    test_37 = _.passes_2("P10D"    , "PT2H")
    test_38 = _.passes_2("P14D"    , "PT2H")
    test_39 = _.passes_2("P21D"    , "PT3H")
    test_40 = _.passes_2("P25D"    , "PT4H")
    test_41 = _.passes_2("P28D"    , "PT4H")
    test_42 = _.passes_2("P1M"     , "PT4H")
    test_43 = _.passes_2("P2M"     , "PT8H")
    test_44 = _.passes_2("P3M"     , "PT12H")
    test_45 = _.passes_2("P6M"     , "P1D")
    test_46 = _.passes_2("P1Y"     , "P7D")

class t_rebin_single(unittest.TestCase):
    class _(object):
        @staticmethod
        def passes((in_time_col, in_val_col),
                   start_time,
                   end_time,
                   bin_size,
                   out):
            bin_size = make_timedelta(bin_size)
            def reify((time_col, val_col)):
                # Doing a zip/unzip so one of the input "columns" can
                # be an infinite stream. The zip cuts it off at the
                # size of the smaller/finite stream.
                pairs = zip(time_col, val_col)
                return tuple(iter(zip(*pairs)))
                
            def test_(self):
                self.assertEquals(
                    reify(out),
                    rebin_single(in_time_col,
                                 in_val_col,
                                 start_time,
                                 end_time,
                                 bin_size))
            return test_

    test_1 = _.passes(
        # Input: 1,2,3,1,2,3,... every 20 seconds.
        (every("PT20S") + until("PT5M"), cycle([1,2,3])),
        # bin_size: 1 minute
        origin, origin + make_timedelta("PT5M"), "PT1M",
        # Expected output: 6,6,6,... every minute.
        (every("PT1M") + until("PT5M"), cycle([6])))

    # What controls the bins that are emitted is start_time and
    # end_time. If you want bins that start on a nice, round number,
    # pass round-numbered values in for start_time, end_time and
    # bin_size.
    test_single_element_1 = _.passes(
        ([make_datetime("2000/01/01:00:00:12")], [23]),
        make_datetime("2000/01/01:00:00:12"),
        make_datetime("2000/01/01:00:00:12"),
        "PT1H",
        ([make_datetime("2000/01/01:00:00:12")], [23]))
                      
    test_single_element_2 = _.passes(
        ([make_datetime("2000/01/01:00:00:12")], [23]),
        make_datetime("2000/01/01:00:00:00"),
        make_datetime("2000/01/01:00:00:00"),
        "PT1H",
        ([make_datetime("2000/01/01:00:00:00")], [23]))

    
    test_sparse_1 = _.passes(
        ([make_datetime("2000/01/01:00:00:00"),
          make_datetime("2000/01/01:03:00:00")],
         [10, 20]),
        make_datetime("2000/01/01:00:00:00"),
        make_datetime("2000/01/01:04:00:00"),
        "PT1H",
        (tuple(every("PT1H") + until("PT4H")),
         (10, 0, 0, 20)))
        
              

    # rebin will always emit at least one bin, of size bin_size.
    test_empty_1 = _.passes(([], []),
                            make_datetime("2000/01/01:00:00:00"),
                            make_datetime("2000/01/01:00:00:00"),
                            "PT1H",
                            ([make_datetime("2000/01/01:00:00:00")], [0]))


# This test fails! And that's okay, because span() goes out of its way
# never to use it like this. It's here as a cautionary tale; Putback
# exists to make span() work, and has problems as a general purpose
# pushback stream.
#
# class t_Putback(unittest.TestCase):
#     def test_recursion_omg_stack_death(self):
#         acc = []
#         for x in xrange(1000):
#             acc = Putback([999 - x], acc)
#         self.assertEquals(tuple(acc),
#                           tuple(xrange(1000)))
        
                  


class t_span(unittest.TestCase):
    class _(object):
        @staticmethod
        def passes(predicate, in_data, out_data):
            def test_(self):
                prefix, rest = span(predicate, in_data)
                self.assertEqual((prefix, tuple(rest)), out_data)
            return test_

    test_1 = _.passes(
        lambda i: i == 1,
        (1, 1, 1, 2, 3, 4, 5, 6),
        ((1, 1, 1), (2, 3, 4, 5, 6)))

    test_2 = _.passes(
        lambda i: i == 1,
        [1, 1, 1, 2, 3, 4, 5, 6],
        ((1, 1, 1), (2, 3, 4, 5, 6)))

    test_3 = _.passes(
        lambda i: i == 1,
        iter([1, 1, 1, 2, 3, 4, 5, 6]),
        ((1, 1, 1), (2, 3, 4, 5, 6)))

    test_all_pass_1 = _.passes(
        lambda i: i == 1, (1, 1, 1), ((1, 1, 1), tuple()))

    test_all_fail_1 = _.passes(
        lambda i: i == 1,
        (2, 3, 4, 5, 6),
        (tuple(), (2, 3, 4, 5, 6)))

    test_empty_1 = _.passes(
        lambda i: i == 1, tuple(), (tuple(), tuple()))

    def test_recycling_1(self):
        iterations = 25000
        rest = baselist = xrange(iterations)
        for a in xrange(iterations):
            prefix, rest = span(lambda x: x == a, rest)



class t_apply_to_all_subcharts(unittest.TestCase):
    in_data = (("lbl1", 
                ("t", (1, 2, 3), ('a', 'b', 'c')),
                ("b", (10, 20, 30), ('d', 'e', 'f'))),
               ("lbl2",
                ("foo", (4, 5, 6), ('z', 'y', 'x')),
                ("bar", (40, 50, 60), ('w', 'v', 'u'))))

    def test_1(self):
        expected_data = (("lbl1_changed", 
                          ("t", (1, 2, 3), ('a', 'b', 'c')),
                          ("b", (10, 20, 30), ('d', 'e', 'f'))),
                         ("lbl2_changed",
                          ("foo", (4, 5, 6), ('z', 'y', 'x')),
                          ("bar", (40, 50, 60), ('w', 'v', 'u'))))
        def fn(lbl, top, bottom):
            return lbl + "_changed", top, bottom
        self.assertEqual(expected_data, apply_to_all_subcharts(fn, 
                                                               self.in_data))

    def test_2(self):
        expected_data = (("lbl1", 
                          ("t", (2, 3, 4), ('a', 'b', 'c')),
                          ("b", (11, 21, 31), ('d', 'e', 'f'))),
                         ("lbl2",
                          ("foo", (5, 6, 7), ('z', 'y', 'x')),
                          ("bar", (41, 51, 61), ('w', 'v', 'u'))))
        def fn(lbl, top, bottom):
            def _add_one(side, times, values):
                return side, tuple(x+1 for x in times), values
            return lbl, _add_one(*top), _add_one(*bottom)
        self.assertEqual(expected_data, apply_to_all_subcharts(fn, 
                                                               self.in_data))

    def test_list_1(self):
        expected_data = (("lbl1_changed", 
                          ("t", (1, 2, 3), ('a', 'b', 'c')),
                          ("b", (10, 20, 30), ('d', 'e', 'f'))),
                         ("lbl2_changed",
                          ("foo", (4, 5, 6), ('z', 'y', 'x')),
                          ("bar", (40, 50, 60), ('w', 'v', 'u'))))
        def fn(lbl, top, bottom):
            return [lbl + "_changed", top, bottom]
        self.assertEqual(expected_data, apply_to_all_subcharts(fn, 
                                                               self.in_data))

    def test_iter_1(self):
        expected_data = (("lbl1_changed", 
                          ("t", (1, 2, 3), ('a', 'b', 'c')),
                          ("b", (10, 20, 30), ('d', 'e', 'f'))),
                         ("lbl2_changed",
                          ("foo", (4, 5, 6), ('z', 'y', 'x')),
                          ("bar", (40, 50, 60), ('w', 'v', 'u'))))
        def fn(lbl, top, bottom):
            return iter([lbl + "_changed", top, bottom])
        self.assertEqual(expected_data, apply_to_all_subcharts(fn, 
                                                               self.in_data))

    def test_generator_1(self):
        expected_data = (("lbl1_changed", 
                          ("t", (1, 2, 3), ('a', 'b', 'c')),
                          ("b", (10, 20, 30), ('d', 'e', 'f'))),
                         ("lbl2_changed",
                          ("foo", (4, 5, 6), ('z', 'y', 'x')),
                          ("bar", (40, 50, 60), ('w', 'v', 'u'))))
        def fn(lbl, top, bottom):
            return (x for x in [lbl + "_changed", top, bottom])
        self.assertEqual(expected_data, apply_to_all_subcharts(fn, 
                                                               self.in_data))

    def test_empty_1(self):
        empty_data = tuple()
        def fn(lbl, top, bottom):
            raise AssertionError("Shouldn't call predicate function")
        self.assertEqual(empty_data, apply_to_all_subcharts(fn,
                                                            empty_data))

    
    
            
class t_rebin_all(unittest.TestCase):
    def test_1(self):
        # Top time input: A 3-element time series, not aligned to the
        # clock.
        in_top_start = "2000/01/01:01:02:03"
        in_top_time = tuple(every("PT1H1M1S", in_top_start) 
                            + until("PT3H", in_top_start))
        # Bottom time input: Same, occupying different bins and with a
        # different interval (but not enough to jump 1-hour bins)
        in_bottom_start = "2000/01/01:02:03:04"
        in_bottom_time = tuple(every("PT1H2M2S", in_bottom_start)
                               + until("PT3H", in_bottom_start))

        # Expected time output: 5 bins starting from the origin
        # (Midnight on 2001.01.01) Should be the same for top and
        # bottom.
        out_time = tuple(every("PT1H") + until("PT5H"))

        in_data = (('lbl1',
                    ('top', in_top_time, (4,5,6)),
                    ('bottom', in_bottom_time, (5,6,7))),)

        out_data = (('lbl1',
                     ('top', out_time, (0,4,5,6,0)),
                     ('bottom', out_time, (0,0,5,6,7))),)

        real_out = rebin_all(
            in_data,
            make_datetime("2000/01/01:00:00:00"),
            make_datetime("2000/01/01:05:00:00"),
            make_timedelta("PT1H"))

        self.assertEqual(out_data, real_out)

    def test_empty(self):
        in_data = (('lbl1',
                    ('top', tuple(), tuple()),
                    ('bottom', tuple(), tuple())),)

        out_data = (('lbl1', 
                     ('top', 
                      tuple(every("PT1H") + until("PT5H")), 
                      (0, 0, 0, 0, 0)), 
                     ('bottom',
                      tuple(every("PT1H") + until("PT5H")), 
                      (0, 0, 0, 0, 0))),)
        self.assertEquals(out_data, 
                          rebin_all(in_data, 
                                    make_datetime("2000/01/01:00:00:00"),
                                    make_datetime("2000/01/01:05:00:00"),
                                    make_timedelta("PT1H")))
        


class t_filter_all(unittest.TestCase):
    class _(object):
        times    = tuple(every("PT30M") + until("PT5H"))
        in_vals  = range(10)
        out_vals = range(10, 20)
        
        in_data = (('lbl1',
                    ('top', times, in_vals),
                    ('bottom', times, out_vals)),)

        @classmethod
        def out_data_from_pred(cls, pred):
            top_zipped = zip(cls.times, cls.in_vals)
            bottom_zipped = zip(cls.times, cls.out_vals)
            return (('lbl1',
                     ('top', 
                      tuple(t for (t, v) in top_zipped if pred(t, v)),
                      tuple(v for (t, v) in top_zipped if pred(t, v))),
                     ('bottom', 
                      tuple(t for (t, v) in bottom_zipped if pred(t, v)),
                      tuple(v for (t, v) in bottom_zipped if pred(t, v)))),)

        @classmethod
        def passes_1(cls, predicate):
            def test(self):
                self.assertEqual(cls.out_data_from_pred(predicate), 
                                 filter_all(cls.in_data, predicate))
            return test

    test_filter_bytime = _.passes_1(lambda t, v: t.minute == 0)
    test_filter_byval  = _.passes_1(lambda t, v: v % 3 == 0)
        
    def test_empty(self):
        in_data = (('lbl1',
                    ('top', tuple(), tuple()),
                    ('bottom', tuple(), tuple())),)
        def predicate(t, v):
            raise AssertionError("Predicate shouldn't be called")
        self.assertEquals(in_data, filter_all(in_data, predicate))
        

        


def none_or(fn, val): 
    if val is None: return None
    else          : return fn(val)

def noval_or(fn, val):
    if val is NO_VALUE: return NO_VALUE
    else              : return fn(val)

class t_decipher_time_bounds(unittest.TestCase):
    class _(object):

        @classmethod
        def _format_input_args(cls,
                               opt_start, 
                               opt_end, 
                               opt_bin_size, 
                               data_low, 
                               data_high):
            return (noval_or(make_datetime, opt_start),
                    noval_or(make_datetime, opt_end),
                    noval_or(make_timedelta, opt_bin_size),
                    none_or(make_datetime, data_low),
                    none_or(make_datetime, data_high))


            

        @classmethod
        def passes_1(cls,
                     opt_start, 
                     opt_end, 
                     data_low, 
                     data_high, 
                     is_binned, 
                     opt_bin_size,
                     out_lo, 
                     out_hi, 
                     out_bin_size):
            (opt_start, 
             opt_end, 
             opt_bin_size, 
             data_low, 
             data_high) = cls._format_input_args(opt_start, 
                                                 opt_end, 
                                                 opt_bin_size, 
                                                 data_low, 
                                                 data_high)

            def test_(self):
                self.assertEqual(
                    (none_or(make_datetime, out_lo),
                     none_or(make_datetime, out_hi), 
                     none_or(make_timedelta, out_bin_size)),
                    decipher_time_bounds(opt_start, 
                                         opt_end,
                                         data_low,
                                         data_high,
                                         is_binned,
                                         opt_bin_size))

            return test_

        @classmethod
        def fails_1(cls,
                    opt_start,
                    opt_end, 
                    data_low, 
                    data_high, 
                    is_binned, 
                    opt_bin_size,
                    exc):
            (opt_start, 
             opt_end, 
             opt_bin_size, 
             data_low, 
             data_high) = cls._format_input_args(opt_start, 
                                                 opt_end, 
                                                 opt_bin_size, 
                                                 data_low, 
                                                 data_high)
            def test_(self):
                self.assertRaises(
                    exc,
                    lambda: decipher_time_bounds(opt_start, 
                                                 opt_end,
                                                 data_low,
                                                 data_high,
                                                 is_binned,
                                                 opt_bin_size))
            return test_
                

    test_all_opts_1 = _.passes_1(
        # --INPUT--
        "2000/01/01:01:00:00", # Start time from options
        "2000/01/01:02:00:00", # End time from options
        "2000/01/01:01:02:14", # Lower bound of data
        "2000/01/01:01:54:36", # Upper bound of data
        True,                  # Is data binned?
        "PT5M",                # Bin size from options
        # --OUTPUT--
        # Lower and upper bounds of data
        "2000/01/01:01:00:00",
        # Lower and upper bounds of scale
        "2000/01/01:02:00:00",
         # Bin size
        "PT5M")

    test_no_opts_no_data_1 = _.passes_1(
        NO_VALUE, NO_VALUE,
        None, None,
        True,
        NO_VALUE,
        None, None, None)

    test_no_opts_1 = _.passes_1(
        NO_VALUE, NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        NO_VALUE,
        "2000/01/01:01:02:14",
        "2000/01/01:01:54:36",
        None)

    test_no_opts_2 = _.passes_1(
        NO_VALUE, NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        NO_VALUE,
        "2000/01/01:01:02:00",
        "2000/01/01:01:54:40",
        "PT20S")
                                
    test_opts_bin_size_1 = _.passes_1(
        NO_VALUE, NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        "PT5M",
        "2000/01/01:01:00:00",
        "2000/01/01:01:55:00",
        "PT5M")

    test_opts_bin_size_1 = _.passes_1(
        NO_VALUE, NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        "PT5M",
        "2000/01/01:01:00:00",
        "2000/01/01:01:55:00",
        "PT5M")

    test_opts_bin_size_2 = _.passes_1(
        NO_VALUE, NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        "PT5M",
        "2000/01/01:01:02:14",
        "2000/01/01:01:54:36",
        None)

    test_opts_start_1 = _.passes_1(
        "2000/01/01:00:00:00", NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        NO_VALUE,
        "2000/01/01:00:00:00",
        "2000/01/01:01:54:40",
        "PT20S")

    test_opts_start_2 = _.passes_1(
        "2000/01/01:00:00:00", NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        NO_VALUE,
        "2000/01/01:00:00:00",
        "2000/01/01:01:54:36",
        None)

    test_opts_end_1 = _.passes_1(
        NO_VALUE, "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        NO_VALUE,
        "2000/01/01:01:02:00",
        "2000/01/01:02:00:00",
        "PT20S")

    test_opts_end_2 = _.passes_1(
        NO_VALUE, "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        NO_VALUE,
        "2000/01/01:01:02:14",
        "2000/01/01:02:00:00",
        None)

    test_opts_start_end_1 = _.passes_1(
        "2000/01/01:01:00:00", "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        NO_VALUE,
        "2000/01/01:01:00:00",
        "2000/01/01:02:00:00",
        "PT20S")

    test_opts_start_end_2 = _.passes_1(
        "2000/01/01:01:00:00", "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        NO_VALUE,
        "2000/01/01:01:00:00",
        "2000/01/01:02:00:00",
        None)

    test_opts_start_bin_size_1 = _.passes_1(
        "2000/01/01:00:00:00", NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        "PT5M",
        "2000/01/01:00:00:00",
        "2000/01/01:01:55:00",
        "PT5M")

    test_opts_start_bin_size_1 = _.passes_1(
        "2000/01/01:00:00:00", NO_VALUE,
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        "PT5M",
        "2000/01/01:00:00:00",
        "2000/01/01:01:54:36",
        None)

    test_opts_end_bin_size_1 = _.passes_1(
        NO_VALUE, "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        "PT5M",
        "2000/01/01:01:00:00",
        "2000/01/01:02:00:00",
        "PT5M")

    test_opts_end_bin_size_2 = _.passes_1(
        NO_VALUE, "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        False,
        "PT5M",
        "2000/01/01:01:02:14",
        "2000/01/01:02:00:00",
        None)

    test_illogical_opts_1 = _.fails_1(
        # opts_start_time > opts_end_time
        "2000/01/01:02:00:00", "2000/01/01:01:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        "PT5M",
        ToolRuntimeError)
        
    test_illogical_opts_2 = _.fails_1(
        # does not bin cleanly
        "2000/01/01:01:00:00", "2000/01/01:02:00:00",
        "2000/01/01:01:02:14", "2000/01/01:01:54:36",
        True,
        "PT25M",
        ToolRuntimeError)
        


        

class t_parse_filter_expr(unittest.TestCase):
    class _(object):
        @staticmethod
        def passes_1(expr, data_in, expected_rows):
            def test_(self):
                filter_fn = parse_filter_expr(expr)
                d_in = rayon.data.Dataset.from_string(dedent(data_in))
                d_expected_rows = rayon.data.Dataset.from_string(
                    dedent(expected_rows))
                d_rows = d_in.filter_pass(filter_fn)
                self.assertEqual(d_expected_rows.to_string(), 
                                 d_rows.to_string())
            return  test_

        @staticmethod
        def fails_1(expr, exc):
            def test_(self):
                self.assertRaises(exc, parse_filter_expr(expr))

    # With headers

    data_in = """    |#foo|bar|baz
                     |   1|  a|  A
                     |   1|  b|  B
                     |   1|  c|  C
                     |   0|  d|  D
                     |   0|  e|  E
                     |   0|  f|  F"""

    data_zeroes =""" |#foo|bar|baz
                     |   0|  d|  D
                     |   0|  e|  E
                     |   0|  f|  F"""

    data_ones = """  |#foo|bar|baz
                     |   1|  a|  A
                     |   1|  b|  B
                     |   1|  c|  C"""



    test_eq_1 = _.passes_1("foo==0", data_in, data_zeroes)
    test_eq_2 = _.passes_1("foo==1", data_in, data_ones)
    test_eq_3 = _.passes_1("foo!=0", data_in, data_ones)
    test_eq_4 = _.passes_1("foo!=1", data_in, data_zeroes)

    test_eq_whitespace_1 = _.passes_1("foo == 0", data_in, data_zeroes)
    test_eq_whitespace_2 = _.passes_1("foo == 1", data_in, data_ones)
    test_eq_whitespace_3 = _.passes_1("foo != 0", data_in, data_ones)
    test_eq_whitespace_4 = _.passes_1("foo != 1", data_in, data_zeroes)


    # Without headers

    data_in_2 = """     |   1|  a|  A
                        |   1|  b|  B
                        |   1|  c|  C
                        |   0|  d|  D
                        |   0|  e|  E
                        |   0|  f|  F"""

    data_zeroes_2 = """ |   0|  d|  D
                        |   0|  e|  E
                        |   0|  f|  F"""

    data_ones_2 = """   |   1|  a|  A
                        |   1|  b|  B
                        |   1|  c|  C"""


    test_eq_nh_1 = _.passes_1("[0]==0", data_in_2, data_zeroes_2)
    test_eq_nh_2 = _.passes_1("[0]==1", data_in_2, data_ones_2)
    test_eq_nh_3 = _.passes_1("[0]!=0", data_in_2, data_ones_2)
    test_eq_nh_4 = _.passes_1("[0]!=1", data_in_2, data_zeroes_2)

    test_eq_whitespace_nh_1 = _.passes_1("[0] == 0", data_in_2, data_zeroes_2)
    test_eq_whitespace_nh_2 = _.passes_1("[0] == 1", data_in_2, data_ones_2)
    test_eq_whitespace_nh_3 = _.passes_1("[0] != 0", data_in_2, data_ones_2)
    test_eq_whitespace_nh_4 = _.passes_1("[0] != 1", data_in_2, data_zeroes_2)



class t_format_ticks(unittest.TestCase):
    class _(object):
        @classmethod
        def passes_1(cls,
                     tick_positions, 
                     tick_labels, 
                     *format_ticks_args, 
                     **format_ticks_kargs):
            def test_(self):
                self.assertEqual(
                    zip(tick_positions, tick_labels),
                    format_ticks(tick_positions, 
                                 *format_ticks_args, 
                                 **format_ticks_kargs))
            return test_
        

    test_autofloat_1 = _.passes_1(
        [1024, 2048, 4096], ["1024", "2048", "4096"],
        "autofloat", "", 0)

    test_autofloat_precision_1 = _.passes_1(
        [1.024, 2.048, 4.096], ["1.02", "2.05", "4.10"],
        "autofloat", "", 2)

    test_autofloat_units_1 = _.passes_1(
        [1024, 2048, 4096], ["1024u", "2048u", "4096u"],
        "autofloat", "u", 0)
        
    test_autofloat_units_1 = _.passes_1(
        [1.024, 2.048, 4.096], ["1.02u", "2.05u", "4.10u"],
        "autofloat", "u", 2)

    # (si and binary don't use precision)
        
    test_metric_1 = _.passes_1(
        [1024, 2048, 4096], ["1.02k", "2.05k", "4.10k"],
        "metric", "", 0)

    test_metric_units_1 = _.passes_1(
        [1024, 2048, 4096], ["1.02kb/s", "2.05kb/s", "4.10kb/s"],
        "metric", "b/s", 0)

    test_binary_1 = _.passes_1(
        [1024, 2048, 4096], ["1.00Ki", "2.00Ki", "4.00Ki"],
        "binary", "", 0)
        
    test_binary_units_1 = _.passes_1(
        [1024, 2048, 4096], ["1.00Kib/s", "2.00Kib/s", "4.00Kib/s"],
        "binary", "b/s", 0)
        
    test_default_1 = _.passes_1(
        [1024, 2048, 4096], ["?1024!", "?2048!", "?4096!"],
        "?%(value)s!", "", 0)

    test_default_precision_1 = _.passes_1(
        [1.024, 2.048, 4.096], ["?1.02!", "?2.05!", "?4.10!"],
        "?%(autofloat_value)s!", "", 2)

    test_default_units_1 = _.passes_1(
        [1024, 2048, 4096], ["?1024 b/s!", "?2048 b/s!", "?4096 b/s!"],
        "?%(value)s %(units)s!", "b/s", 0)
        

        


                      
                  
        
                  

# Option parsing and execution


desc = "Visualize time series data as a strip plot"

NO_GROUPS = Constant("NO_GROUPS")

# Automatically assembling option lists for marker, scale, etc. choices
def make_choices(choice_method, exceptions):
    return [lbl for lbl, _ in choice_method()
            if lbl not in exceptions]


scale_choices        = make_choices(toolbox.Toolbox.list_scale_labels,
                                    [])
marker_shape_choices = make_choices(toolbox.Toolbox.list_marker_labels,
                                    [])
border_choices       = make_choices(toolbox.Toolbox.list_border_labels,
                                    ['hsplit', 'vsplit'])
line_choices         = make_choices(toolbox.Toolbox.list_line_labels,
                                    [])

def choice_str(choices):
    return ", ".join("'%s'" % c for c in choices)


# An abbreviation to use in the option definitions
cstr = parse_color_string

def colname_from_str(s):
    try:               return int(s)
    except ValueError: return str(s)

class ColnamesOption(ListOption):
    metavar_="COLNAMES_OR_INDICES"


default_width  = 800
default_height = 450


options = [
    Flag(
        'run-tests'),


    # rytimeseries overrides the default options to get a custom
    # defaults for width and height.
    PathOption(
        "input-path", default_val="-",
        hlp="Path to input data. (Default: STDIN)"),
    PathOption(
        "output-path", default_val=NO_DEFAULT,
        hlp="(REQUIRED) Path to output file"),
    Flag(
        "first-line-colnames", 
        hlp="First line of input contains column names"),
    IntOption(
        "width", default_val=800,
        hlp="image width (pixels for PNG, otherwise points) "
        "(Default: %d)" % default_width),
    IntOption(
        "height", default_val=NO_VALUE,
        hlp="image height (pixels for PNG, otherwise points) "
        "(Default: %d)" % default_height),

    IntOption(
        "height-per-subchart", default_val=250,
         hlp="Height of each subchart in image."),





    ChoiceOption(
        'style', choices=['dots', 'lines', 'bars', 'filled_lines'], 
        default_val='dots',
        hlp="Style of visualization output"),

    TimeDeltaOption(
        'bin-size', default_val=NO_VALUE,
        hlp="Size of time bins, as ISO-8601 duration"),
    Flag(
        'presorted-input', hlp="Input is already sorted by time."),
    Flag(
        'prebinned_input', hlp="Input is already in --bin-size bins"),
    DateOption(
        'start-time', default_val=NO_VALUE,
        hlp="Earliest time to plot"),
    DateOption(
        'end-time', default_val=NO_VALUE,
        hlp="Latest time to plot"),


    ColnameOption(
        'time-column', default_val=0,
        hlp="Column in input to use for time data."),

    StringOption(
        'top-label', default_val="",
        hlp="Label for top halves of all series plots"),
    StringOption(
        'bottom-label', default_val="",
        hlp="Label for bottom halves of all series plots"),

    ColnamesOption(
        'group-by', default_val=[], converter=colname_from_str,
        hlp="Column(s) whose unique values will be series"),
    ColnameOption(
        'top-column', default_val=NO_VALUE,
        hlp="Column from which to take data for top half of series."),
    ColnameOption(
        'bottom-column', default_val=NO_VALUE,
        hlp="Column from which to take data for bottom half of series."),
    StringOption(
        'top-filter', default_val=NO_VALUE,
        hlp="Selection filter for data in the top half of series."),
    StringOption(
        'bottom-filter', default_val=NO_VALUE,
        hlp="Selection filter for data in the bottom half of series."),


    StringOption(
        'classifier-module', default_val=NO_VALUE, 
        hlp=SUPPRESS_HELP),
       #hlp="Python module to use to classify input data into series"),
    StringOption(
        'classifier-fn', default_val=NO_VALUE,
        hlp=SUPPRESS_HELP),
        #hlp="Name of classifier function in --classifier-module"),
    StringOption(
        'initializer-fn', default_val=NO_VALUE,
        hlp=SUPPRESS_HELP),
        #hlp="Name of initializer function in --classifier-module"),


    ListOption(
        'show', default_val=NO_VALUE,
        hlp="Only show these series of input data"),
    ListOption(
        'labels', default_val=NO_VALUE,
        hlp="Labels given to displayed series of input data"),
    Flag('legacy', hlp="Input is in legacy format"),

    # TODO: get choices for scale, etc., from Rayon
    ChoiceOption(
        'value-scale', choices=['linear', 'log', 'clog'] , default_val='linear',
        hlp="Type of scale to use for values "
        "(Values: 'linear', 'log', 'clog') "
        "(Default: 'linear')"),


    NewTickspecOption(
        'value-ticks', default_val=parse_tickspec("value-ticks", "smin,smax"),
        hlp="Positions of tickmarks on value scale. (Default: 'smin,smax')"),

                            

    StringOption(
        'value-tick-label-format', default_val='autofloat',
        hlp="Format of tickmarks on value scale. "
        "(Values: 'autofloat', 'binary', 'metric' or format string)"
        "(Default: 'autofloat')"),
    IntOption(
        'value-tick-size', default_val=8,
        hlp="Size in pts/pixels of tick mark on value "
        "(vertical) axis (Default: 3)"),
    StringOption(
        'value-tick-label-size', default_val="12px",
        hlp="Font size of value tick mark labels. ('12px' == 12 pixels)"
        "(Default: '12px')"),
    IntOption(
        'value-tick-label-spacing', default_val=13,
        hlp="Spacing between text and tickmark, in pts/pixels. (Default: 10)"),
    ChoiceOption(
        'value-tick-label-halign', default_val="right",
        choices=['left', 'center', 'right'],
        hlp="Horizontal alignment of text on value axis relative to tick mark "
        "(Values: 'left', 'center', 'right') "
        "(Default: 'center')"),
    ChoiceOption(
        'value-tick-label-valign', default_val="center",
        choices=['top', 'center', 'bottom'],
        hlp="Vertical alignment of text on value axis relative to tick mark "
        "(Values: 'top', 'center', 'bottom') "
        "(Default: 'center')"),
    IntOption('value-tick-label-angle', default_val=0,
              hlp="Degrees of rotation (0 == horizontal, left-to-right) "
              "to apply to text on value axis. (Default: 0)"),

        
    
    ChoiceOption(
        'time-major-ticks', default_val="auto",
        choices=['auto', 'none'],
        hlp="How often to put major ticks on time axis. "
        "(Values: 'auto', 'none') (Default: 'auto')"),
    ChoiceOption(
        'time-minor-ticks', default_val="auto",
        choices=['auto', 'none'],
        hlp="How often to put minor ticks on time axis. "
        "(Values: 'auto', 'none') (Default: 'auto')"),


    IntOption(
        'time-major-tick-size', default_val=8,
        hlp="Size in pts/pixels of tick mark on time "
        "(horizontal) axis (Default: 3)"),
    StringOption(
        'time-major-tick-label-size', default_val="12px",
        hlp="Font size of time tick mark labels. ('12px' == 12 pixels) "
        "(Default: '12px')"),
    IntOption(
        'time-major-tick-label-spacing', default_val=12,
        hlp="Spacing between text and tickmark, in pts/pixels. (Default: 10)"),

    IntOption(
        'time-minor-tick-size', default_val=3,
        hlp="Size in pts/pixels of tick mark on time "
        "(horizontal) axis (Default: 3)"),
    StringOption(
        'time-minor-tick-label-size', default_val="8px",
        hlp="Font size of time tick mark labels. ('12px' == 12 pixels)"
        "(Default: '8px')"),
    IntOption(
        'time-minor-tick-label-spacing', default_val=10,
        hlp="Spacing between text and tickmark, in pts/pixels. (Default: 10)"),


    ChoiceOption(
        'time-tick-label-halign', default_val="right",
        choices=['left', 'center', 'right'],
        hlp="Horizontal alignment of text on time axis relative to tick mark "
        "(Values: 'left', 'center', 'right') (Default: 'center')"),
    ChoiceOption(
        'time-tick-label-valign', default_val="center",
        choices=['top', 'center', 'bottom'],
        hlp="Vertical alignment of text on time axis relative to tick mark "
        "(Values: 'top', 'center', 'bottom') (Default: 'center')"),
    IntOption('time-tick-label-angle', default_val=45,
              hlp="Degrees of rotation (0 == horizontal, left-to-right) "
              "to apply to text on time axis. (Default: 45)"),


    Flag(
        "annotate-max",
        hlp="Put annotations on the highest point of "
        "visualization (Default: no"),

    Flag(
        "annotate-min",
        hlp="Put annotations on the lowest point of "
        "visualization (Default: no)"),

    StringOption(
        "value-units", default_val="",
        hlp="Units for value input. (Default: None)"),


    

    ColorOption(
        "annotation-marker-color", default_val=cstr("#ff0000ff"),
        hlp="Color of annotation callout marker (Default: red)"),

    IntOption(
        "annotation-marker-size", default_val=7,
        hlp="Size of annotation collout marker (Default: 7-pixel radius)"),

    StringOption(
        'annotation-label-size', default_val="12px",
        hlp="Font size of annotation labels. ('12px' == 12 pixels) "
        "(Default: '12px')"),

    ColorOption(
        "annotation-label-color", default_val=cstr("#000000ff"),
        hlp="Color of annotation text (Default: black)"),

    ColorOption(
        "annotation-label-background-color", default_val=cstr("#ffffff99"),
        hlp="Color of annotation text (Default: clear)"),

    IntOption(
        'annotation-label-spacing', default_val=15,
        hlp="Spacing between text and annotated data, "
        "in pts/pixels. (Default: 1)"),



    StringOption(
        'group-label-size', default_val="large",
        hlp="Font size of group labels (Default: 'large')"),



     
    ChoiceOption(
        'vertical-border-line-style', default_val="solid",
        choices=border_choices,
        hlp="Type of line to draw on left edge of plot. "
        "(Values: %s) "
        "Default: 'solid'" % choice_str(border_choices)),

   
    ChoiceOption(
        'horizontal-border-line-style', default_val="solid",
        choices=border_choices,
        hlp="Type of line to draw on left edge of plot. "
        "(Values: %s) "
        "Default: 'solid'" % choice_str(border_choices)),

   


    ColorOption(
        'marker-color', default_val=(0, 0, 0),
        hlp="Color of point markers (if style = 'dots')"),
    IntOption(
        'marker-size', default_val=2,
        hlp="Size of point markers (if style = 'dots')"),
    ChoiceOption(
        'marker-shape', choices=marker_shape_choices, default_val="dot",
        hlp="Shape of point markers (if style = 'dots')"
        "(Values: %s) "
        "(Default: 'dots')" % choice_str(marker_shape_choices)),


    ColorOption(
        'line-color', default_val=cstr("#6495edff"),
        hlp="Color of line (if style = 'lines') (default: blue)"),
    IntOption(
        'line-width', default_val=1,
        hlp="Width of line in points/pixels (if style = 'lines') "
        "(default: 1)"),
    ChoiceOption(
        'line-style', default_val='solid',
        choices=line_choices,
        hlp="Type of line (if style = 'lines'). "
        "(Values: %s) "
        "Default: 'solid'" % choice_str(line_choices)),


    ColorOption(
        'top-field-color', default_val=cstr("#ed9564cc"),
        hlp="Color of field in filled_lines style plot (default: red)"),
        
    ColorOption(
        'bottom-field-color', default_val=cstr("#6495edcc"),
        hlp="Color of field in filled_lines style plot (default: blue)"),
        


    ColorOption(
        "bar-fill-color", default_val=cstr("#0000ccff"),
        hlp="Color of bars in barplots (if style = 'bars') (Default: blue)"),
    ColorOption(
        "bar-border-color", default_val=cstr("#00000000"),
        hlp="Color of bar borders in barplots (if style = 'bars') "
        "(Default: black)"),
    FloatOption(
        "bar-width", default_val=.8,
        hlp="Bar width in barplots, as proportion of "
        "available (1 == 100%) (if style = 'bars') (Default: .8)"),
    IntOption(
        "bar-border-width", default_val=1,
        hlp="Width (in pixels) of bar border (if style = 'bars') (Default: 2)"),


    ColorOption(
        'plot-background-color', default_val=(255, 255, 255, 255),
        hlp="Background color of just data plots"),
    ColorOption(
        'chart-background-color', default_val=(255, 255, 255, 255),
        hlp="Background color of entire chart"),

    Flag(
        'vgrid', hlp="Draw vertical grid lines? Default: no"),
    ColorOption(
        'vgrid-color', default_val=cstr('#ccccccff'),
        hlp="Line color of vertical grid lines. (Default: gray)"),
    ChoiceOption(
        'vgrid-style', choices=line_choices, default_val='solid',
        hlp="Line style of vertical grid lines. "
        "(Values: %s) "
        "(Default: 'solid')" % choice_str(line_choices)),
    IntOption(
        "vgrid-width", default_val=2,
        hlp="Line width of vertical grid lines, in pts/pixels. (Default: 2)"),
    NewTickspecOption(
        "vgrid-lines-at", 
        default_val=NO_VALUE,
        hlp="position of horizontal grid lines."),
    IntOption(
        "vgrid-num-lines", default_val=NO_VALUE,
        hlp="Number of vertical gridlines to draw (Default: 1 per time tick)"),

    Flag(
        'hgrid', hlp="Draw horizontal grid lines? Default: no"),
    ColorOption(
        'hgrid-color', default_val=cstr('#ccccccff'),
        hlp="Line color of horizontal grid lines. (Default: gray)"),
    ChoiceOption(
        'hgrid-style', choices=line_choices, default_val='solid',
        hlp="Line style of horizontal grid lines. "
        "(Values: %s) "
        "(Default: 'solid')" % choice_str(line_choices)),
    IntOption(
        "hgrid-width", default_val=2,
        hlp="Line width of horizontal grid lines, in pts/pixels. (Default: 2)"),
    NewTickspecOption(
        "hgrid-lines-at", 
        default_val=NO_VALUE,
        hlp="position of horizontal grid lines."),
    IntOption(
        "hgrid-num-lines", default_val=2,
        hlp="Number of horizontal gridlines to draw (Default: 2)"),

    


    ChoiceOption(
        "trend-line", default_val=NO_VALUE,
        choices=['kernel', 'ols', 'moving_avg'],
        hlp="Add trend (regression) line of specified type. "
        "(Values: 'kernel', 'ols', 'moving_avg') "
        "(Default: no trend line)"),
    KVOption(
        "trend-args", default_val=None,
        #hlp="(Optional) args to customize --trend-line",
        hlp=SUPPRESS_HELP,
        converter=lambda v:miscutils.infer_type_from_string(v)(v)),
    ColorOption(
        "trend-line-color", default_val=cstr("#ff0000ff"),
        hlp="Color of trend line (Default: red)"),
    FloatOption(
        "trend-line-width", default_val=3,
        hlp="Width of trend line in pixels/points (Default: 3)"),

    ChoiceOption(
        "variation-field", default_val=NO_VALUE,
        choices=['stdev'],
        hlp="Add transparent field showing variation of data from trend."
        "(Values: 'stdev')"
        "(Default: 'stdev')"),
    ColorOption(
        "variation-field-color", default_val=cstr("#6495edcc"),
        hlp="Color of variation field "
        "(Default: blue, 25% screen"),
    ColorOption(
        "variation-line-color", default_val=cstr("#0000007f"),
        hlp="Color of edge line of variation field (Default: gray)"),
    IntOption(
        "variation-line-width", default_val=2,
        hlp="Width of edge line of variation field"), 
    ChoiceOption(
        "variation-line-style", default_val="dashed",
        choices=line_choices,
        hlp="Type of edge line of variation field. "
        "(Values: %s) "
        "Default: 'dashed'" % choice_str(line_choices)),

    Flag(
        "draw-as-multiple", 
        hlp="Draw chart as if it were one of several, "
        "even if there is only one chart in data "
        "(Default: draw singleton charts differently)"),

    FalseFlag(
        "no-timeline", valname="draw_timeline",
        hlp="Don't draw timeline on bottom border"),

    FalseFlag(
        "no-centerline", valname="draw_centerline",
        hlp="Don't draw centerline between plot sides"),


    StringOption("title", default_val=NO_VALUE, hlp="Chart title"),
    StringOption("title-size", default_val="x-large", hlp="Title size"),
    StringOption("caption", default_val=NO_VALUE, hlp="Chart caption"),
    IntOption(
        "padding", default_val=None,
        hlp="Padding to add to all sides of output. (Default: see man page)"),
    IntOption(
        "pad-top", default_val=None,
        hlp="Padding to add to top of output. (Default: see man page)"),
    IntOption(
        "pad-bottom", default_val=None,
        hlp="Padding to add to bottom of output. (Default: see man page)"),
    IntOption(
        "pad-left", default_val=None,
        hlp="Padding to add to left of output. (Default: see man page)"),
    IntOption(
        "pad-right", default_val=None,
        hlp="Padding to add to right of output. (Default: see man page)"),

    FloatOption(
        "value-min", default_val=NO_VALUE,
        hlp="Minimum value for data to be displayed."),
    FloatOption(
        "value-max", default_val=NO_VALUE,
        hlp="Maximum value for data to be displayed."),

    IntOption(
        "value-min-pct", default_val=NO_VALUE,
        hlp="omit points < nth percentile in value data. "
        "(Default: no floor)"),

    IntOption(
        "value-max-pct", default_val=NO_VALUE,
        hlp="omit points > nth percentile in value data. "
        "(Default: no ceiling)"),

    FloatOption(
        "fix-scale-min", default_val=NO_VALUE,
        hlp="Minimum value for ALL visualization value scales."),
    FloatOption(
        "fix-scale-max", default_val=NO_VALUE,
        hlp="Maximum value for ALL visualization value scales."),

    Flag(
        "plot-high-outliers",
        hlp="Display points > --value-max as outliers on visualization"),

    StringOption(
        "outlier-marker-shape", default_val="arrow",
        hlp="Shape of outlier markers (Values: %s) "
        "(Default: 'arrow')" % choice_str(marker_shape_choices)),

    ColorOption(
        'outlier-marker-color', default_val=(0, 255, 0),
        hlp="Color of outlier markers"),
    IntOption(
        'outlier-marker-size', default_val=5,
        hlp="Size of outlier markers"),

]
                 
                 
    
if __name__ == '__main__':
    execute(cmd_func=main, 
            cmd_options=options, 
            cmd_desc=desc, 
            include_common_options=False)

