#!/usr/bin/perl
use strict;
use warnings;
use Getopt::Long qw(:config no_getopt_compat bundling);
use List::Util 'max';
use List::MoreUtils qw(any all);
use FindBin '$Bin';
use lib "$Bin/lib";
use Vnlog::Util 'get_unbuffered_line';
use Text::Balanced 'extract_bracketed';

use feature qw(say state);


my $usage =  <<EOF;
$0 [--has c0,c1,...] [--has c2] [--print|--pick|-p c3,+c4,sum=c5+c6,rel(c7)] [match_expr] [match_expr] ...

    Other available options:
      --function 'f(x) { .... return ... }'
      --eval expr
      --noskipempty
      --skipcomments
      --dumpexprs
      --perl
      --unbuffered
      --stream
      -A/-B/-C

    This tool is a nicer 'awk' that reads and write vnlog. As a result, it
    can refer to columns by name, not number, like awk does.

    Columns are selected with -p (we select all columns if omitted). We can
    rename columns with C<=>, and if we do that, we can output arbitrary
    expressions. For instance:

      vnl-filter -p 'a,b,sum=a+b'

    Rows are selected with match expressions given on the commandline. To select
    all rows after a certain time, and within a certain temperature range, do:

      vnl-filter 'time > 100' 'temp > 20 && temp < 30'

    By default, this tool generates an awk script that's then interpreted by
    mawk. Although it is slower, perl can be used instead by passing --perl.
    This makes no difference in output in most cases, but the various
    expressions would be evaluated by perl, which could be desirable.

    --unbuffered flushes each line after each print. Useful for streaming data.

    --stream is a synonym for "--unbuffered"

    -A N/ -B N / -C N prints N lines of context after/before/around all records
     matching the given expressions. Works just like in the 'grep' tool

    For more information, please read the manpage.
EOF

if(! @ARGV)
{
    die $usage;
}

# by default we do skip empty records
my %options = (skipempty => 1);
GetOptions(\%options,
           "has=s@",
           "pick|print|p=s@",
           "eval=s",
           "after-context|A=i",
           "before-context|B=i",
           "context|C=i",
           "function|sub=s@",
           "skipempty!",
           "skipcomments!",
           "dumpexprs!",
           "perl",
           "unbuffered",
           "stream",
           "help") or die($usage);
if( defined $options{help} )
{
    print $usage;
    exit 0;
}

$options{has}  //= [];
$options{pick} //= [];

$options{unbuffered} = $options{unbuffered} || $options{stream};

# anything remaining on the commandline are 'matches' expressions
$options{matches} = \@ARGV;

if( defined $options{eval} )
{
    $options{skipcomments} = 1;
}

if( defined $options{eval} && @{$options{pick}} )
{
    say STDERR "--eval is given, so no column selectors should be given also";
    die $usage;
}

if( defined $options{context} &&
    (defined $options{'before-context'} ||
     defined $options{'after-context'}) )
{
    say STDERR "-C is exclusive with -A and -B";
    die $usage;
}

my $any_context_stuff =
  $options{'after-context'}  ||
  $options{'before-context'} ||
  $options{'context'};

if( $any_context_stuff && $options{eval} )
{
    say STDERR "--eval is exclusive with -A/-B/-C";
    die $usage;
}

my $NcontextBefore = ($options{'before-context'} || $options{'context'}) // 0;
my $NcontextAfter  = ($options{'after-context'}  || $options{'context'}) // 0;



# parse the , in $options{has} and $options{pick}. In --pick use the fancy
# ()-respecting version of split
@{$options{has}} = map split(/,/, $_), @{$options{has}};
@{$options{pick}} = map split_on_comma_respect_parens($_), @{$options{pick}};

# any requested columns preceded with '+' go into --has. And I strip out the '+'
for my $ipick(0..$#{$options{pick}})
{
    # handle extra column syntax here
    if( ${$options{pick}}[$ipick] =~ /^\+(.+)/ )
    {
        ${$options{pick}}[$ipick] = $1;
        push @{$options{has}}, ${$options{pick}}[$ipick];
    }
}

my @picked_exprs_named  = @{$options{pick}};
my @must_have_col_names = @{$options{has}};
my @must_have_col_indices_input;

# if no columns requested, just print everything
if( !@picked_exprs_named  &&
    !@must_have_col_names &&
    !@{$options{matches}} &&
    !defined $options{eval} )
{
    if($options{dumpexprs})
    {

        say "--dumpexprs: No-op special case; printing everything, modulo --skipcomments, --noskipempty";
        exit 0;
    }
    my $gotlegend;
    while(<STDIN>)
    {
        if( $options{skipempty} )
        {
            next if /^ \s* - (?: \s+ - )* \s* $/x;
        }
        if( $options{skipcomments})
        {
            # always skip hard comments
            next if /^\s*(?:#[#!]|#\s*$|$)/p;

            # skip a single comment only if we need a legend still
            if( /^\s*#/)
            {
                next if $gotlegend;
                $gotlegend = 1;
            }
        }

        print;
        flush STDOUT if $options{unbuffered};
    }
    exit 0;
}

my @colnames_output;

# input column-name to index map. This always maps to a listref of indices, even
# if I only have a single index
my %colindices_input;

my $colidx_needed_max = -1;



# awk or perl strings representing stuff to output. These are either simple
# column references (such as $1), or more complex expressions
my @langspecific_output_fields;

# How many rel(),diff(),... calls we have. I generate code based on this
my @all_specialops = qw(rel diff sum prev);
my %specialops;
for my $what (@all_specialops)
{
    $specialops{$what} = {N     => 0,
                          outer => []};
}

# Loop searching for the legend.
#
# Here instead of using while(<STDIN>) we read one byte at a time. This means
# that as far as the OS is concerned we never read() past our line. And when we
# exec() to awk, all the data is available. This is inefficient, but we only use
# this function to read up to the legend, which is fine.
#
# Note that perl tries to make while(<STDIN>) work by doing an lseek() before we
# exec(), but if we're reading a pipe, this can't work
while(defined ($_ = get_unbuffered_line(*STDIN)))
{
    # I pass through (don't treat as a legend) ## comments and #! shebang and
    # empty lines and # comments without anything else.
    if(/^\s*(?:#[#!]|#\s*$|$)/p)
    {
        if(!$options{skipcomments} && !$options{dumpexprs})
        {
            print;
            flush STDOUT if $options{unbuffered};
        }
        next;
    }

    if( /^\s*#\s*/p )
    {
        chomp;

        # we got a legend line
        my @cols_all_legend_input = split ' ', ${^POSTMATCH}; # split the field names (sans the #)
        foreach my $idx (0..$#cols_all_legend_input)
        {
            $colindices_input{$cols_all_legend_input[$idx]} //= [];
            push @{$colindices_input{$cols_all_legend_input[$idx]}}, $idx;
        }

        # each element is a tuple representing a picked field:
        # (output_field, colidx_needed_max_here, colname_output)
        my @picked_fields;

        # If we weren't asked for particular columns, take them all. This isn't
        # a no-op because we can have --has
        if( @picked_exprs_named )
        {
            foreach my $i_picked_exprs_named (0..$#picked_exprs_named)
            {
                my $accept = sub
                {
                    my ($expr, $name, $dupindex) = @_;

                    push @picked_fields,
                      [ expr_subst_col_names($options{perl} ? 'perl' : 'awk',
                                             $expr,
                                             $dupindex),
                        $name // $expr ];
                };

                my $acceptExactMatch = sub
                {
                    my ($picked_expr, $name) = @_;

                    if (defined $colindices_input{$picked_expr})
                    {
                        for my $dupindex (0..$#{$colindices_input{$picked_expr}})
                        {
                            $accept->( $picked_expr, $name, $dupindex);
                        }
                        return 1;
                    }
                    return undef;
                };

                my $acceptRegexMatch = sub
                {
                    my ($picked_expr) = @_;

                    my $picked_expr_re;
                    eval { $picked_expr_re = qr/$picked_expr/p; };
                    if ( !$@ )
                    {
                        # compiled regex successfully
                        my $matched_any;
                        my %next_dupindex;

                        # I look through cols_all_legend_input instead of
                        # keys(%colindices_input) to preserve the original order
                        for my $matched_legend_input (@cols_all_legend_input)
                        {
                            $next_dupindex{$matched_legend_input} //= 0;
                            if ( $matched_legend_input =~ /$picked_expr_re/p && length(${^MATCH}) > 0 )
                            {
                                $accept->($matched_legend_input, undef,
                                          $next_dupindex{$matched_legend_input});
                                $matched_any = 1;
                                $next_dupindex{$matched_legend_input}++;
                            }
                        }
                        return $matched_any;
                    }
                    return undef;
                };

                my $excludeExactMatch = sub
                {
                    my ($expr) = @_;

                    my @picked_fields_filtered = grep {$_->[2] ne $expr} @picked_fields;
                    if( scalar(@picked_fields_filtered) == scalar(@picked_fields) )
                    {
                        return undef;
                    }

                    @picked_fields = @picked_fields_filtered;
                    return 1;
                };

                my $excludeRegexMatch = sub
                {
                    my ($expr) = @_;

                    my $expr_re;

                    eval { $expr_re = qr/$expr/p; };
                    return undef if $@;

                    my @picked_fields_filtered = grep {! ($_->[2] =~ /$expr_re/p && length(${^MATCH})) } @picked_fields;

                    if( scalar(@picked_fields_filtered) == scalar(@picked_fields) )
                    {
                        return undef;
                    }

                    @picked_fields = @picked_fields_filtered;
                    return 1;
                };




                my $picked_expr_named = $picked_exprs_named[$i_picked_exprs_named];

                next if $acceptExactMatch->($picked_expr_named, undef);

                my ($name, $picked_expr) = $picked_expr_named =~ /(.*?)=(.*)/;
                $picked_expr //= $picked_expr_named;

                # No exact column match. If this is a named expression, I pass it on
                # to awk/perl
                if ( defined $name )
                {
                    $accept->($picked_expr, $name);
                    next;
                }

                # No exact matches were found, and not a named expression. This
                # is either a regex or an exclusion expression
                if( $picked_expr =~ /^!(.*)/ )
                {
                    # exclusion expression. I apply the same logic as before:
                    # try exact column matches first, and then a regex.
                    #
                    # I accumulate the picked list in order the arguments were
                    # given: each exclusion expression removes columns from the
                    # so-far-picked list. If the picked list BEGINS with an
                    # exclusion expression, we assume that ALL columns have been
                    # previously picked
                    #
                    # Here we match on the names of the OUTPUT columns
                    $picked_expr = $1;

                    if($i_picked_exprs_named == 0) { $acceptRegexMatch->('.'); }

                    next if $excludeExactMatch->($picked_expr);
                    next if $excludeRegexMatch->($picked_expr);

                    my @output_names_have = map {$_->[2]} @picked_fields;
                    die "Couldn't find requested column '$picked_expr' to exclude. Currently have output columns\n" . join('', map { "  $_\n" } @output_names_have);
                }

                next if $acceptRegexMatch->($picked_expr);

                die "Couldn't find requested column '$picked_expr'. Legend has the following columns:\n" . join('', map { "  $_\n" } @cols_all_legend_input);
            }

            if(!@picked_fields)
            {
                die "After processing --pick options, no fields remain!";
            }

            for my $picked_field (@picked_fields)
            {
                my ($output_field, $colidx_needed_max_here, $colname_output) = @$picked_field;

                push @colnames_output, $colname_output;

                if ( $colidx_needed_max_here > $colidx_needed_max )
                {
                    $colidx_needed_max = $colidx_needed_max_here;
                }
                push @langspecific_output_fields, $output_field;
            }
        }
        else
        {
            # no columns requested. I take ALL the columns. I make sure to not
            # explicitly look at any of the column names, so if we have
            # duplicate columns, things will remain functional
            @colnames_output = @cols_all_legend_input;
            if( !$options{perl} )
            {
                @langspecific_output_fields = map { '$'. $_ } 1..(1+$#cols_all_legend_input);
            }
            else
            {
                @langspecific_output_fields = map { "\$fields[$_]" } 0..$#cols_all_legend_input;
            }
        }

        # print out the new legend
        unless($options{dumpexprs} || $options{eval})
        {
            print "# @colnames_output\n";
            flush STDOUT if $options{unbuffered};
        }


        if( @must_have_col_names )
        {
            foreach my $col_name (@must_have_col_names)
            {
                # First I try exact matches for column names. Only unique
                # matches accepted
                if( defined $colindices_input{$col_name} )
                {
                    if(1 == @{$colindices_input{$col_name}})
                    {
                        push @must_have_col_indices_input, $colindices_input{$col_name}[0];
                        next;
                    }

                    die "--has found multiple columns named '$col_name'; --has expects unique columns";
                }


                # No exact matches. Try regex matches. Again, only unique
                # matches accepted
                my $col_name_re;
                eval { $col_name_re = qr/$col_name/p; };
                if( $@ )
                {
                    die "--has found no columns matching '$col_name'";
                }

                # compiled regex successfully
                my $matching_col_index;

                for my $matched_legend_input (keys(%colindices_input))
                {
                    if ( $matched_legend_input =~ /$col_name_re/p && length(${^MATCH}) > 0 )
                    {
                        # Found match. Is it unique?
                        if(defined $matching_col_index || 1 != @{$colindices_input{$matched_legend_input}})
                        {
                            die "--has found multiple columns matching '$col_name'; --has expects unique columns";
                        }

                        # Found unique (for now) column
                        $matching_col_index = $colindices_input{$matched_legend_input}[0];
                    }
                }
                if(!defined $matching_col_index)
                {
                    die "--has found no columns matching '$col_name'";
                }
                push @must_have_col_indices_input, $matching_col_index;
            }
        }

        last;
    }

    die "Got data line before a legend";
}

if(!%colindices_input)
{
    die "No legend received. Is the input file empty?";
}




# At this point I'm done dealing with the legend, and it's time to read in and
# process the data. I can keep going in perl, or I can generate an awk program,
# and let awk do this work. The reason: awk (mawk especialy) runs much faster.
# Both paths should produce the exact same output, and the test suite makes sure
# this is the case

if( !$options{perl} )
{
    my $awkprogram = makeAwkProgram();
    if( $options{dumpexprs} )
    {
        say $awkprogram;
        exit;
    }
    if($options{unbuffered})
    {
        exec 'mawk', '-Winteractive', $awkprogram;
    }
    else
    {
        exec 'mawk', $awkprogram;
    }

    exit; # dummy. We never get here
}

sub makeAwkProgram
{
    # The awk program I generate here is analogous to the logic in the data
    # while() loop above

    my $functions = join('', map { my ($sub) = expr_subst_col_names('awk', $_); "function $sub " } @{$options{function}});

    my $awkprogram_preamble = '';


    if($any_context_stuff)
    {
        # context-handling stuff. This is a mirror of the perl implementation
        # below. See the comments there for a description
        $awkprogram_preamble .= 'BEGIN {' .
          '__i1_contextbuffer       = 0; ' .
          '__N_contextbuffer        = 0; ' .
          '__N_printafter           = 0; ' .
          '__just_skipped_something = 0; ' .
          '__printed_something_ever = 0; ' .
          '} ';

        $awkprogram_preamble .= 'function __contextbuffer_output_and_clear() { ' .
          "__i0_contextbuffer = __i1_contextbuffer - __N_contextbuffer; " .
          "if(__i0_contextbuffer < 0){__i0_contextbuffer += $NcontextBefore;} " .
          "while (__N_contextbuffer) { " .
          "    print __contextbuffer[__i0_contextbuffer++]; " .
          "    if(__i0_contextbuffer == $NcontextBefore){__i0_contextbuffer = 0} " .
          "    __N_contextbuffer--; " .
          "} " .
          "} ";

        # pushes to the buffer. Returns TRUE if I did NOT just overwrite an element of
        # the buffer
        $awkprogram_preamble .= 'function __contextbuffer_push(__line) { ' .
          '__contextbuffer[__i1_contextbuffer++] = __line; ' .
          "if(__i1_contextbuffer == $NcontextBefore) {__i1_contextbuffer = 0} " .
          "if(__N_contextbuffer  != $NcontextBefore) {__N_contextbuffer++} " .
          "} ";
    }

    # Deal with comments. If printing, these do not count towards the context
    # stuff (-A/-B/-C)
    $awkprogram_preamble .=
      '/^ *(#|$)/ { ' . ($options{skipcomments} ? '' : 'print; ') . 'next } ';

    # skip incomplete records. Can happen if a log line at the end of a file was
    # cut off in the middle. These are invalid lines, so I don't even bother to
    # handle -A/-B/-C
    if( $colidx_needed_max >= 0)
    {
        $awkprogram_preamble .= (1+$colidx_needed_max) . " > NF { next } ";
    }

    # skip records that have empty input columns that must be non-empty
    if (@must_have_col_indices_input)
    {
        $awkprogram_preamble .=
          join(' || ', map { '$'.($_+1). " == \"-\"" } @must_have_col_indices_input);
        $awkprogram_preamble .= " { next } ";
    }

    my $not_matches_condition = join(' || ',
                                     map
                                     {
                                         my ($expr) = expr_subst_col_names('awk', $_);
                                         '!' . "($expr)"
                                     } @{$options{matches}});
    my $awkprogram_matches = '';
    my $awkprogram_print;
    if($options{eval})
    {
        if( length($not_matches_condition) )
        {
            $awkprogram_matches .= $not_matches_condition . '{next}';
        }
        my ($expr) = expr_subst_col_names('awk', $options{eval});
        $awkprogram_print .= "$expr ";
    }
    else
    {
        if( length($not_matches_condition) )
        {
            $awkprogram_matches .= $not_matches_condition . '{ ';

            if ($any_context_stuff)
            {
                # get the line
                $awkprogram_matches .= "__line = " . join('" "', @langspecific_output_fields) . "; ";

                # save and skip the record
                $awkprogram_matches .=
                  'if(__N_printafter) { ' .
                  '    print __line; ' .
                  '    __N_printafter--; ' .
                  '} ' .
                  "else { if(__N_contextbuffer == $NcontextBefore){__just_skipped_something = 1;}  ";
                if ($NcontextBefore)
                {
                    $awkprogram_matches .= '__contextbuffer_push(__line); ';
                }
                $awkprogram_matches .= '} ';
            }
            $awkprogram_matches .= ' next } ';
        }


        # past if(!matches) {}

        $awkprogram_print .= '{';


        my $record_accept_pre_print  = '';
        my $record_accept_post_print = '';
        if($any_context_stuff)
        {
            $record_accept_pre_print =
              'if( __just_skipped_something && __printed_something_ever) { print "##" } ' .
              '__just_skipped_something = 0; ' .
              '__printed_something_ever = 1; ';
            if($NcontextBefore)
            {
                $record_accept_pre_print .= '__contextbuffer_output_and_clear(); ';
            }
            ####### now print the thing
            $record_accept_post_print .=
              "__N_printafter = $NcontextAfter; ";
        }

        # skip empty records if we must. I evaluate the fields just one time to
        # not affect the state inside rel() and diff()
        if (!$options{skipempty})
        {
            # Not skipping the null fields. I just print everything I have
            $awkprogram_print .=
              $record_accept_pre_print .
              'print ' . (join(',', @langspecific_output_fields)) . '; ' .
              $record_accept_post_print . ' ';
        }
        else
        {
            # I'm skipping the null fields. I thus look at each field
            # individually, and print only if they're all non-null
            $awkprogram_print .=
              join(' ',
                   map { "__f$_ = $langspecific_output_fields[$_]; " }
                   0..$#langspecific_output_fields) .

                     '__line = ' . join('" "', map {"__f$_"} 0..$#langspecific_output_fields) . '; ';

            # Then I do skipempty. Important to do this after
            # evaluating ALL the fields to tick all the rel(), diff(),
            # ... state
            $awkprogram_print .=
              "if(" . join( ' && ', map { "__f$_  == \"-\""} 0..$#langspecific_output_fields ) .
              ") { next } ";

            # And THEN I print everything
            $awkprogram_print .=
              $record_accept_pre_print .
              'print ' . join(',', map {"__f$_"} 0..$#langspecific_output_fields) . '; ' .
              $record_accept_post_print . ' ';
        }

        $awkprogram_print .= '}';
    }

    my $outer_expr = get_reldiff_outer_expr();

    my $awkprogram_reldiff = '';
    for my $i (0..$specialops{rel}{N}-1)
    {
        $awkprogram_reldiff .= "function rel$i(x) { if(!__inited_rel$i) { __state_rel$i = x; __inited_rel$i = 1; } return x - __state_rel$i; } ";
    }
    for my $i (0..$specialops{diff}{N}-1)
    {
        $awkprogram_reldiff .= "function diff$i(x) { retval = __inited_diff$i ? (x - __state_diff$i) : \"-\"; __state_diff$i = x; __inited_diff$i = 1; return retval; } ";
    }
    for my $i (0..$specialops{sum}{N}-1)
    {
        $awkprogram_reldiff .= "function sum$i(x) { __state_sum$i += x; return __state_sum$i; } ";
    }
    for my $i (0..$specialops{prev}{N}-1)
    {
        $awkprogram_reldiff .= "function prev$i(x) { __prev = length(__state_prev$i) ? __state_prev$i : \"-\"; __state_prev$i = x; return __prev; } ";
    }

    my $awkprogram = $functions . $awkprogram_reldiff . $awkprogram_preamble;
    if(length($outer_expr))
    {
        $awkprogram .= "{ $outer_expr } ";
    }
    $awkprogram .= $awkprogram_matches . $awkprogram_print;
    return $awkprogram;
}

# line split(',', $s), but respects (). I.e. splitting "a,b,f(c,d)" produces 3
# tokens, not 4
sub split_on_comma_respect_parens
{
    my ($s) = @_;

    my @f;

  FIELDS: # loop accumulating fields
    while (1)
    {
        my $field_accum = '';

      FIELD_ACCUM: # accumulate THIS field. Keep grabbing tokens until I see an
                   # , or the end
        while(1)
        {
            if (length($s) == 0)
            {
                if (length($field_accum))
                {
                    push @f, $field_accum;
                }
                last FIELDS;
            }

            if ($s !~ /^         # start of string
                       ([^(,]*?) # some minimal number of non-comma, non-paren
                       ([(,])    # comma or paren
                      /px) {
                # didn't match. The whole thing is the last field
                push @f, $field_accum . $s;
                last FIELDS;
            }

            my ($pre,$sep) = ($1,$2);
            if ($sep eq ',')
            {
                # we have a field
                push @f, $field_accum . $pre;
                $field_accum = '';
                $s = ${^POSTMATCH};
                next FIELD_ACCUM;
            }

            # we have a paren. accumulate
            my ($paren_expr, $rest) = extract_bracketed($sep . ${^POSTMATCH}, '(');
            if ( !defined $paren_expr )
            {
                # non-matched paren. Accum normally
                $rest =~ /^\((.*)$/ or die "Weird... '$rest' should have started with a '('. Giving up";
                $field_accum .= '(';
                $s = $1;
                next FIELD_ACCUM;
            }
            $field_accum .= $pre . $paren_expr;
            $s = $rest;
        }
    }

    return @f;
}

sub find_outer_specialop
{
    # returns the FIRST outer specialop that appears in the given string, or
    # undef if none are found
    my ($str) = @_;

    my $re_any = join('|', @all_specialops);

    my $re = qr/^.*?\b($re_any)\s*\(/s;
    $str =~ $re or return undef;
    return $1;
}
sub subst_reldiff
{
    # This is somewhat convoluted. I want the meaning of rel() and diff() and
    # ... to be preserved regardless of any early-exit expressions. I.e. this
    # sequence is broken:
    #
    # - if(!matches) { continue }
    # - print rel() # update internal state
    #
    # because the internal state will not be updated if(!matches). I thus do
    # this instead:
    #
    # - _rel = rel()
    # - if(!matches) { continue }
    # - print _rel
    #
    # This works. But to make it work, I need to pre-compute all the outermost
    # rel() and diff() expressions. Outermost because rel(rel(x)) should still
    # work properly. I thus do this:
    #
    # rel( rel(xxx) ) ------>
    # function rel1() {} function rel2() {}
    # __rel1 = rel1( rel2(xxx) ); ... __rel1
    #
    # I.e. each rel() gets a function defined with its own state. Only the
    # outermost one is cached. This is done so that I evaluate all the rel()
    # unconditionally, and then do conditional stuff (due to matches or
    # skipempty)

    my ($what, $expr, $isouter) = @_;

    my $sigil = $options{perl} ? '$' : '';

    my $N          = \$specialops{$what}{N};
    my $outer_list = $specialops{$what}{outer};

    my $whatre = qr/\b$what\s*\(/p;
    while( $expr =~ /$whatre/p )
    {
        if( !$isouter )
        {
            # not an outer one. Simply replace the call with a specific,
            # numbered one
            $expr =~ s/$whatre/$what$$N(/;
        }
        else
        {
            # IS an outer one. Replace the call with a variable that gets
            # precomputed. Save the string for precomputation
            my $prematch = ${^PREMATCH};

            my ($paren_expr, $rest) = extract_bracketed("(${^POSTMATCH}", '[({');
            if (!defined $paren_expr)
            {
                die "Giving up: Couldn't parse '$expr'";
            }

            $expr = $prematch . $sigil . "__$what$$N" . $rest;
            push @$outer_list, [$$N, $paren_expr];
        }
        $$N++;
    }

    return $expr;
}

sub get_reldiff_outer_expr
{
    # should be called AFTER all the outer rel/diff/... expressions were
    # encountered. I.e. after the last expr_subst_col_names()
    my $sigil = $options{perl} ? '$' : '';
    my $expr = '';

    for my $what (@all_specialops)
    {
        for my $e (@{$specialops{$what}{outer}})
        {
            my ($i, $paren_expr) = @$e;

            # I keep substituting until I got everything. This is required
            # because I can have deeply recursive calls
            my $substituted = $paren_expr;
            while(1)
            {
                my $start = $substituted;
                for my $what_inner (@all_specialops)
                {
                    $substituted = subst_reldiff($what_inner, $substituted, 0);
                }
                if($substituted eq $start) { last; }
            }

            $expr .= $sigil . "__$what$i = $what$i" . $substituted . '; ';
        }
    }
    return $expr;
}

sub expr_subst_col_names
{
    # I take in a string with awk/perl code, and replace field references to
    # column references that the awk/perl program will understand. To minimize
    # the risk of ambiguous matches, I try to match longer strings first
    my ($language, $out, $dupindex) = @_;

    my $colidx_needed_max_here = -1;

    for my $key(reverse sort {length($a) <=> length($b)} keys %colindices_input)
    {
        # This looks odd. Mostly, $bound0 = $bound1 = '\b'. This would work to
        # Find "normal" alphanumeric keys in the string. But my keys may have
        # special characters in them. For instance, if I grab keys from the
        # 'top' command, it'll produce a legend including keys '%CPU', 'TIME+',
        # and the point before the '%' or after the '+' would not match \b. I
        # thus expand the regex at the boundary. I match EITHER the normal \b
        # meaning for a word-nonword transition OR a whitespace-nonword
        # transition. This means that whitespace becomes important: '1+%CPU'
        # will not be parsed as expected (but that's the RIGHT behavior here),
        # but '1+ %CPU' will be parsed correctly
        my $bound0 = qr/(?:(?<!\w)(?=\w)|(?:^|(?<=\s))(?!\w))/;
        my $bound1 = qr/(?:(?<=\w)(?!\w)|(?<!\w)(?:(?=\s)|$))/;
        my $re = qr/$bound0\Q$key\E$bound1/;

        next unless $out =~ /$re/;

        if (!defined $dupindex && 1 != @{$colindices_input{$key}})
        {
            die "Asked to operate on key '$key', but this isn't unique";
        }
        $dupindex //= 0;


        if ( $language eq 'perl' )
        {
            my $found = $out =~ s/$re/\$fields[$colindices_input{$key}[$dupindex]]/g;

            if($found && $colindices_input{$key}[$dupindex] > $colidx_needed_max_here)
            {
                $colidx_needed_max_here = $colindices_input{$key}[$dupindex];
            }
        }
        elsif( $language eq 'awk' )
        {
            # column index that awk knows about
            my $colidx = $colindices_input{$key}[$dupindex] + 1;

            my $found = $out =~ s/$re/\$$colidx/g;

            if($found && $colindices_input{$key}[$dupindex] > $colidx_needed_max_here)
            {
                $colidx_needed_max_here = $colindices_input{$key}[$dupindex];
            }
        }
        else
        {
            die "Unknown language '$language";
        }
    }

    while(my $what = find_outer_specialop($out))
    {
        $out = subst_reldiff ($what, $out, 1);
    }
    return ($out, $colidx_needed_max_here);
}



my $evalstr = join('', map { my ($sub) = expr_subst_col_names('perl', $_); "sub $sub\n"} @{$options{function}});

my $must_match_expr =
  join ' && ',
  map { my ($outexpr) = expr_subst_col_names( 'perl', $_); $outexpr; }
  @{$options{matches}};
$must_match_expr = 1 if !defined $must_match_expr || '' eq $must_match_expr;

$evalstr .= "sub matches { return $must_match_expr }\n";

if ( $options{eval} )
{
    my ($expr) = expr_subst_col_names( 'perl', $options{eval} );
    $evalstr .= "sub evalexpr { $expr }\n";
}

$evalstr .=
  'sub compute_output_fields { return [' . join(',', @langspecific_output_fields) . ']; }' . "\n";

$evalstr .=
  'sub compute_reldiff { ' . get_reldiff_outer_expr() . '}' . "\n";

# I'm defining the rel()/diff()/... functions. These should be global, so if I
# do this inside a for(){}, the functions end up local to that for(). I thus
# have an ugly manual loop
my $i = 0;
EVAL_REL_FUNC:
if( $i < $specialops{rel}{N} )
{
    $evalstr = "sub rel$i" . '{ my ($x) = @_; state $state=undef; if(!defined $state) { $state=$x; } return $x - $state; } ' . "\n" . $evalstr;
    $i++;
    goto EVAL_REL_FUNC;
}

$i = 0;
EVAL_DIFF_FUNC:
if( $i < $specialops{diff}{N} )
{
    $evalstr = "sub diff$i" . '{ my ($x) = @_; state $inited=0; state $state=0; my $retval = $inited ? ($x - $state) : undef; $state = $x; $inited=1; return $retval; } ' . $evalstr;
    $i++;
    goto EVAL_DIFF_FUNC;
}

$i = 0;
EVAL_SUM_FUNC:
if( $i < $specialops{sum}{N} )
{
    $evalstr = "sub sum$i" . '{ my ($x) = @_; state $state=0; $state += $x; return $state; } ' . $evalstr;
    $i++;
    goto EVAL_SUM_FUNC;
}

$i = 0;
EVAL_PREV_FUNC:
if( $i < $specialops{prev}{N} )
{
    $evalstr = "sub prev$i" . '{ my ($x) = @_; state $state=undef; my $prev = $state; $state = $x; return $prev; } ' . $evalstr;
    $i++;
    goto EVAL_PREV_FUNC;
}




if( $options{dumpexprs} )
{
    say "Expressions to evaluate:\n\n$evalstr";
    exit;
}

my @fields;

no strict;
no warnings;
eval $evalstr;
if( $@ )
{
    die "Error evaluating expression '$evalstr':\n$@";
}
use strict;
use warnings;




# The stuff from here until the main while(<STDIN>) loop is all for context
# handling (-A,-B,-C)

# circular buffer containing previous entries. Used for -B
my @contextbuffer;
@contextbuffer = (undef) x $NcontextBefore if $NcontextBefore;
my $i1_contextbuffer = 0; # the end; new entries written here
my $N_contextbuffer  = 0;

# how many record to print unconditionally. Used for -A
my $N_printafter = 0;

# used for the group separator '##'
my $just_skipped_something = 0;
my $printed_something_ever = 0;

sub contextbuffer_output_and_clear
{
    return unless $NcontextBefore;

    my $i0_contextbuffer =
      $i1_contextbuffer - $N_contextbuffer;
    $i0_contextbuffer += $NcontextBefore if $i0_contextbuffer < 0;

    while ($N_contextbuffer)
    {
        say $contextbuffer[$i0_contextbuffer++];
        $i0_contextbuffer = 0 if $i0_contextbuffer == $NcontextBefore;
        $N_contextbuffer--;
    }
}

# pushes to the buffer. Returns TRUE if I did NOT just overwrite an element of
# the buffer
sub contextbuffer_push
{
    return unless $NcontextBefore;

    my ($line) = @_;

    $contextbuffer[$i1_contextbuffer++] = $line;
    $i1_contextbuffer = 0 if $i1_contextbuffer == $NcontextBefore;
    $N_contextbuffer++ unless $N_contextbuffer == $NcontextBefore;
}


RECORD:
while(<STDIN>)
{
    # Data loop. Each statement here is analogous to the awk program generated
    # by makeAwkProgram();

    # Deal with comments. If printing, these do not count towards the context
    # stuff (-A/-B/-C)
    if(/^\s*(?:#|$)/p)
    {
        unless($options{skipcomments})
        {
            print;
            flush STDOUT if $options{unbuffered};
        }
        next;
    }

    chomp;
    @fields = map {q{-} eq $_ ? undef : $_ } split;

    # skip incomplete records. Can happen if a log line at the end of a file was
    # cut off in the middle. These are invalid lines, so I don't even bother to
    # handle -A/-B/-C
    next unless $colidx_needed_max <= $#fields;

    # skip records that have empty input columns that must be non-empty
    next if any {!defined $fields[$_]} @must_have_col_indices_input;

    compute_reldiff();

    # skip all records that don't match given expressions
    if($options{eval})
    {
        next unless matches();
        evalexpr();
        next;
    }

    # skip all records that don't match given expressions
    if(!matches())
    {
        next unless $any_context_stuff;

        my $fout = compute_output_fields();
        my $line = join(' ', map {$_ // '-'} @$fout);

        if ($N_printafter)
        {
            say $line;
            flush STDOUT if $options{unbuffered};
            $N_printafter--;
        }
        else
        {
            $just_skipped_something = 1 if $N_contextbuffer == $NcontextBefore;
            contextbuffer_push($line);
        }
        next;
    }

    my $fout = compute_output_fields();

    # skip empty records if we must
    next if $options{skipempty} && all {!defined $_} @$fout;
    my $line = join(' ', map {$_ // '-'} @$fout);

    if ($any_context_stuff)
    {
        say '##' if $just_skipped_something && $printed_something_ever;
        $just_skipped_something = 0;
        $printed_something_ever = 1;
        contextbuffer_output_and_clear();
        $N_printafter = $NcontextAfter;
    }
    say $line;
    flush STDOUT if $options{unbuffered};
}










__END__

=head1 NAME

vnl-filter - filters vnlogs to select particular rows, fields

=head1 SYNOPSIS

 $ cat run.vnl

 # time x   y   z   temperature
 3      1   2.3 4.8 30
 4      1.1 2.2 4.7 31
 6      1   2.0 4.0 35
 7      1   1.6 3.1 42


 $ <run.vnl vnl-filter -p x,y,z | vnl-align

 # x  y   z
 1   2.3 4.8
 1.1 2.2 4.7
 1   2.0 4.0
 1   1.6 3.1


 $ <run.vnl vnl-filter -p i=NR,time,'dist=sqrt(x*x + y*y + z*z)' | vnl-align

 # i time   dist
 1   3    5.41572
 2   4    5.30471
 3   6    4.58258
 4   7    3.62905


 $ <run.vnl vnl-filter 'temperature >= 35' | vnl-align

 # time x  y   z  temperature
 6      1 2.0 4.0 35
 7      1 1.6 3.1 42



 $ <run.vnl vnl-filter --eval '{s += temperature} END { print "mean temp: " s/NR}'

 mean temp: 34.5


 $ <run.vnl vnl-filter -p x,y | feedgnuplot --terminal 'dumb 80,30' --unset grid --domain --lines --exit

   2.3 +---------------------------------------------------------------------+
       |           +          +          ***************         +           |
       |                                                **************       |
       |                                                              *******|
   2.2 |-+                                                       ************|
       |                                                 ********            |
       |                                         ********                    |
   2.1 |-+                              *********                          +-|
       |                        ********                                     |
       |                ********                                             |
       |            ****                                                     |
     2 |-+         *                                                       +-|
       |           *                                                         |
       |           *                                                         |
       |           *                                                         |
   1.9 |-+         *                                                       +-|
       |           *                                                         |
       |           *                                                         |
       |           *                                                         |
   1.8 |-+         *                                                       +-|
       |           *                                                         |
       |           *                                                         |
   1.7 |-+         *                                                       +-|
       |           *                                                         |
       |           *                                                         |
       |           *          +           +           +          +           |
   1.6 +---------------------------------------------------------------------+
      0.98         1         1.02        1.04        1.06       1.08        1.1



=head1 DESCRIPTION

This tool is largely a frontend for awk to operate on vnlog files. Vnlog
is both an input and an output. This tool makes it very simple to select
specific rows and columns for output and to manipulate the data in various ways.

This is a UNIX-style tool, so the input/output of this tool is strictly
STDIN/STDOUT. Furthermore, in its usual form this tool is a filter, so the
format of the output is I<exactly> the same as the format of the input. The
exception to this is when using C<--eval>, in which the output is dependent on
whatever expression we're evaluating.

This tool is convenient to process both stored data or live data; in the latter
case, it's very useful to pipe the streaming output to C<feedgnuplot --stream>
to get a realtime visualization of the incoming data.

This tool reads enough of the input file to get a legend, at which point it
constructs an awk program to do the main work, and execs to awk (it's possible
to use perl as well, but this isn't as fast).

=head2 Input/output data format

The input/output data is vnlog: a plain-text table of values. Any lines
beginning with C<#> are treated as comments, and are passed through. The first
line that begins with C<#> but not C<##> or C<#!> is a I<legend> line. After the
C<#>, follow whitespace-separated field names. Each subsequent line is
whitespace-separated values matching this legend. For instance, this is a valid
vnlog file:

 #!/usr/bin/something
 ## more comments
 # x y z
 -0.016107 0.004362 0.005369
 -0.017449 0.006711 0.006711
 -0.018456 0.014093 0.006711
 -0.017449 0.018791 0.006376

C<vnl-filter> uses this format for both the input and the output. The
comments are preserved, but the legend is updated to reflect the fields in the
output file.

A string C<-> is used to indicate an undefined value, so this is also a valid
vnlog file:

 # x y z
 1 2 3
 4 - 6
 - - 7

=head2 Filtering

To select specific I<columns>, pass their names to the C<-p> option (short for
C<--print> or C<--pick>, which are synonyms). In its simplest form, to grab only
columns C<x> and C<y>, do

 vnl-filter -p x,y

See the detailed description of C<-p> below for more detail.

To select specific I<rows>, we use I<matches> expressions. Anything on the
C<vnl-filter> commandline and not attached to any C<--xxx> option is such
an expression. For instance

 vnl-filter 'size > 10'

would select only those rows whose C<size> column contains a value E<gt> 10. See
the detailed description of matches expressions below for more detail.

=head2 Context lines

C<vnl-filter> supports the context output options (C<-A>, C<-B> and C<-C>)
exactly like the C<grep> tool. I.e to print out all rows whose C<size> column
contains a value E<gt> 10 I<but also> include the 3 rows immediately before
I<and> after such matching rows, do this:

 vnl-filter -C3 'size > 10'

C<-B> reports the rows I<before> matching ones and C<-A> the rows I<after>
matching ones. C<-C> reports both. Note that this applies I<only> to I<matches>
expressions: records skipped because they fail C<--has> or C<--skipempty> are
I<not> included in contextual output.

=head2 Backend choice

By default, the parsing of arguments and the legend happens in perl, which then
constructs a simple awk script, and invokes C<mawk> to actually read the data
and to process it. This is done because awk is lighter weight and runs faster,
which is important because our data sets could be quite large. We default to
C<mawk> specifically, since this is a simpler implementation than C<gawk>, and
runs much faster. If for whatever reason we want to do everything with perl,
this can be requested with the C<--perl> option.

=head2 Special functions

For convenience we support several special functions in any expression passed on
to awk or perl (named expressions, matches expressions, C<--eval> strings).
These generally maintain some internal state, and vnl-filter makes sure that
this state is consistent. Note that these are evaluated I<after>
C<--skipcomments> and C<--has>. So any record skipped because of a C<--has>
expression, for instance, will I<not> be considered in C<prev()>, C<diff()> and
so on.

=over

=item *

C<rel(x)> returns value of C<x> relative to the first value of C<x>. For
instance we might want to see the time or position relative to the start, not
relative to some absolute beginning. Example:

 $ cat tst.vnl

 # time x
 100    200
 101    212
 102    209


 $ <tst.vnl vnl-filter -p 't=rel(time),x=rel(x)

 # t x
 0 0
 1 12
 2 9

=item *

C<diff(x)> returns the difference between the current value of C<x> and the
previous value of C<x>. The first row will always be -. Example:

 $ <tst.vnl vnl-filter -p x,'d1=diff(x),d2=diff(diff(x))' | vnl-align

 # x d1 d2
   1  -  -
   8  7  7
  27 19 12
  64 37 18
 125 61 24

=item *

C<sum(x)> returns the cumulative sum of C<x>. As C<diff(x)> can be thought of as
a derivative, C<sum(x)> can be thought of as an integral. So C<diff(sum(x))>
would return the same value as C<x> (except for the first row; C<diff()> always
returns - for the first row).

Example:

 $ <tst.vnl vnl-filter -p 'x,s=sum(x),ds=diff(sum(x))' | vnl-align

 # x  s   ds
   1   1   -
   8   9   8
  27  36  27
  64 100  64
 125 225 125

=item *

C<prev(x)> returns the previous value of C<x>. One could construct C<sum()> and
C<rel()> using this, if they weren't already available.

=back

=head1 ARGUMENTS

=head2 -p|--print|--pick expr

These option provide the mechanism to select specific columns for output. For
instance to pull out columns called C<lat>, C<lon>, and any column whose name
contains the string C<feature_>, do

 vnl-filter -p lat,lon,'feature_.*'

or, equivalently

 vnl-filter --print lat --print lon --print 'feature_.*'

We look for exact column name matches first, and if none are found, we try a
regex. If there was no column called exactly C<feature_>, then the above would
be equivalent to

 vnl-filter -p lat,lon,feature_

This mechanism is much more powerful than just selecting columns. First off, we
can rename chosen fields:

 vnl-filter -p w=feature_width

would pick the C<feature_width> field, but the resulting column in the output
would be named C<w>. When renaming a column in this way regexen are I<not>
supported, and exact field names must be given. But the string to the right of
the C<=> is passed on directly to awk (after replacing field names with column
indices), so any awk expression can be used here. For instance to compute the
length of a vector in separate columns C<x>, C<y>, and C<z> you can do:

 vnl-filter -p 'l=sqrt(x*x + y*y + z*z)'

A single column called C<l> would be produced.

We can also I<exclude> columns by preceding their name with C<!>. This works like
you expect. Rules:

=over

=item *

The pick/exclude directives are processed in order given to produce the output
picked-column list

=item *

If the first C<-p> item is an exclusion, we implicitly pick I<all> the columns
prior to processing the C<-p>.

=item *

The exclusion expressions match the I<output> column names, not the I<input>
names.

=item *

We match the exact column names first. If that fails, we match as a regex

=back

Example. To grab all the columns I<except> the temperature(s) do this:

 vnl-filter -p !temperature

To grab all the columns that describe I<something> about a robot (columns whose
names have the string C<robot_> in them), but I<not> its temperature (i.e.
I<not> "robot_temperature"), do this:

 vnl-filter -p robot_,!temperature

=head2 --has a,b,c,...

Used to select records (rows) that have a non-empty value in a particular field
(column). A I<null> value in a column is designated with a single C<->. If we
want to select only records that have a value in the C<x> column, we pass
C<--has x>. To select records that have data for I<all> of a given set of
columns, the C<--has> option can be repeated, or these multiple columns can be
given in a whitespace-less comma-separated list. For instance if we want only
records that have data in I<both> columns C<x> I<and> C<y> we can pass in
C<--has x,y> or C<--has x --has y>. If we want to combine multiple columns in an
I<or> (select rows that have data in I<any> of a given set of columns), use a
matches expression, as documented below.

If we want to select a column I<and> pick only rows that have a value in this
column, a shorthand syntax exists:

 vnl-filter --has col -p col

is equivalent to

 vnl-filter -p +col

Note that just like the column specifications in C<-p> the columns given to
C<--has> must match exactly I<or> as a regex. In either case, a unique matching
column must be found.

=head2 Matches expressions

Anything on the commandline not attached to any C<--xxx> option is a I<matches>
expression. These are used to select particular records (rows) in a data file.
For each row, we evaluate all the expressions. If I<all> the expressions
evaluate to true, that row is output. This expression is passed directly to the
awk (or perl) backend.

Example: to select all rows that have valid data in column C<a> I<or> column
C<b> I<or> column C<c> you can

 vnl-filter 'a != "-" || b != "-" || c != "-"'

or

 vnl-filter --perl 'defined a || defined b || defined c'

As with the named expressions given to C<-p> (described above), these are passed
directly to awk, so anything that can be done with awk is supported here.

=head2 -A N|--after-context N

Output N lines following each I<matches> expression, even those lines that do
not themselves match. This works just like the C<grep> options of the same name.
See L<Context lines>

=head2 -B N|--before-context N

Output N lines preceding each I<matches> expression, even those lines that do
not themselves match. This works just like the C<grep> options of the same name.
See L<Context lines>

=head2 -C N|--context N

Output N lines preceding and following each I<matches> expression, even those
lines that do not themselves match. This works just like the C<grep> options of
the same name. See L<Context lines>

=head2 --eval expr

Instead of printing out all matching records and picked columns, just run the
given chunk of awk (or perl). In this mode of operation, C<vnl-filter> acts
just like a glorified awk, that allows fields to be accessed by name instead of
by number, as it would be in raw awk.

Since the expression may print I<anything> or nothing at all, the output in this
mode is not necessarily itself a valid vnlog stream. And no column-selecting
arguments should be given, since they make no sense in this mode.

In awk the expr is a full set of pattern/action statements. So to print the sum
of columns C<a> and C<b> in each row, and at the end, print the sum of all
values in the C<a> column

 vnl-filter --eval '{print a+b; suma += a} END {print suma}'

In perl the arbitrary expression fits in like this:

 while(<>) # read each line
 {
   next unless matches; # skip non-matching lines
   eval expression;     # evaluate the arbitrary expression
 }

=head2 --function|--sub

Evaluates the given expression as a function that can be used in other
expressions. This is most useful when you want to print something that can't
trivially be written as a simple expression. For instance:

 $ cat tst.vnl
 # s
 1-2
 3-4
 5-6

 $ < tst.vnl
   vnl-filter --function 'before(x) { sub("-.*","",x); return x }' \
              --function 'after(x)  { sub(".*-","",x); return x }' \
              -p 'b=before(s),a=after(s)'
 # b a
 1 2
 3 4
 5 6

See the L<CAVEATS> section below if you're doing something
sufficiently-complicated where you need this.

=head2 --[no]skipempty

Do [not] skip records where all fields are blank. By default we I<do> skip all
empty records; to include them, pass C<--noskipempty>

=head2 --skipcomments

Don't output non-legend comments

=head2 --perl

By default all procesing is performed by C<mawk>, but if for whatever reason we
want perl instead, pass C<--perl>. Both modes work, but C<mawk> is noticeably
faster. C<--perl> could be useful because it is more powerful, which could be
important since a number of things pass commandline strings directly to the
underlying language (named expressions, matches expressions, C<--eval> strings).
Note that while variables in perl use sigils, column references should I<not>
use sigils. To print the sum of all values in column C<a> you'd do this in awk

 vnl-filter --eval '{suma += a} END {print suma}'

and this in perl

 vnl-filter --perl --eval '{$suma += a} END {say $suma}'

The perl strings are evaluated without C<use strict> or C<use warnings> so I
didn't have to declare C<$suma> in the example.

=head2 --dumpexprs

Used for debugging. This spits out all the final awk (or perl) program we run
for the given commandline options and given input. This is the final program,
with the column references resolved to numeric indices, so one can figure out
what went wrong.

=head2 --unbuffered

Flushes each line after each print. This makes sure each line is output as soon
as it is available, which is crucial for realtime output and streaming plots.

=head2 --stream

Synonym for C<--unbuffered>

=head1 CAVEATS

This tool is very lax in its input validation (on purpose). As a result, columns
with names like C<%CPU> and C<TIME+> do work (i.e. you can more or less feed in
output from C<top -b>). The downside is that shooting yourself in the foot is
possible. This tradeoff is currently set to work well for my use cases, but I'd
be interested in hearing other people's experiences. Potential
pitfalls/unexpected behaviors:

=over

=item *

When substituting column names I match I<either> a word-nonword transition
(C<\b>) I<or> a whitespace-nonword transition. The word boundaries is what would
be used 99% of the time. But the keys may have special characters in them, which
don't work with C<\b>. This means that whitespace becomes important: C<1+%CPU>
will not be parsed as expected, which is correct since C<+%CPU> is also a valid
field name. But C<1+ %CPU> will be parsed correctly, so if you have weird field
names, put the whitespace into your expressions. It'll make them more readable
anyway.

=item *

Strings passed to C<-p> are split on C<,> I<except> if the C<,> is inside
balanced C<()>. This makes it possible to say things like C<vnl-filter --function
'f(a,b) { ... }' -p 'c=f(a,b)'>. This is probably the right behavior, although
some questionable looking field names become potentially impossible: C<f(a> and
C<b)> I<could> otherwise be legal field names, but you're probably asking for
trouble if you do that.

=item *

All column names are replaced in all eval strings without regard to context. The
earlier example that reports the sum of values in a column: C<vnl-filter --eval
'{suma += a} END {print suma}'> will work fine if we I<do> have a column named
C<a> and do I<not> have a column named C<suma>. But will not do the right thing
if any of those are violated. It's the user's responsibility to make sure we're
talking about the right columns. The focus here was one-liners so hopefully
nobody has so many columns, they can't keep track of all of them in their head.
I don't see any way to resolve this without seriously impacting the scope of the
tool, so I'm leaving this alone. Comments welcome.

=item *

Currently there're two modes: a pick/print mode and an C<--eval> mode. Then
there's also C<--function>, which adds bits of C<--eval> to the pick/print mode,
but it feels maybe insufficient. I don't yet have strong feelings about what
this should become. Comments welcome

=back

=head1 REPOSITORY

https://github.com/dkogan/vnlog/

=head1 AUTHOR

Dima Kogan C<< <dima@secretsauce.net> >>

=head1 LICENSE AND COPYRIGHT

Copyright 2016-2017 California Institute of Technology

Copyright 2017-2019 Dima Kogan C<< <dima@secretsauce.net> >>


This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2.1 of the License, or (at your option) any
later version.

=cut
