Skip to content

Commit

Permalink
Add new URL Regex Filter.
Browse files Browse the repository at this point in the history
  • Loading branch information
shadowmoose committed Jan 18, 2018
1 parent 9e31862 commit 9b23537
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 18 deletions.
3 changes: 2 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ install:
- pip install coveralls
# command to run tests
script:
- coverage run main.py --test --update --skip_pauses --username=$username --password=$password --c_id=$client_id --c_secret=$client_secret --agent="TestUpdaterAgent"
- coverage run main.py --test --update_only --skip_pauses --username=$username --password=$password --c_id=$client_id --c_secret=$client_secret --agent="TestUpdaterAgent"
- coverage run main.py --test --skip_pauses --username=$username --password=$password --c_id=$client_id --c_secret=$client_secret --agent="TestUpdaterAgent"
after_script:
- COVERALLS_PARALLEL=true coveralls
notifications:
Expand Down
4 changes: 3 additions & 1 deletion classes/filters/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ def __init__(self, field, description):
self.operator = None
self._limit = None
self.description = description
self.accepts_operator = True


def set_operator(self, op):
Expand Down Expand Up @@ -76,6 +77,7 @@ def check(self, obj):
if regexp.search( str(val)):
return True
return False
print("Invalid comparator for Filter!")
assert False # This should never happen.


Expand Down Expand Up @@ -219,7 +221,7 @@ def get_filter_fields():
'author': 'The author of this element. (Text)',
'body': 'The text in this element. Blank if this post is a submission without selftext. (Text)',
'subreddit': 'The subreddit this element is from. (Text)',
'over_18': 'If this post is age-limited. (True/False)',
'over_18': 'If this post is age-limited, AKA "NSFW". (True/False)',
'created_utc':'The timestamp, in UTC seconds, that this element was posted. (#)',
'num_comments': 'The number of comments on this post. (#)',
'score': 'The number of net upvotes on this post. (#)',
Expand Down
17 changes: 17 additions & 0 deletions classes/filters/url_match_filter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from filters import filter
import re

class URLFilter(filter.Filter):
def __init__(self):
super().__init__(field='url_pattern', description='Individual URLs in each Post, matching a regex pattern.')
self.operator = filter.Operators.MATCH
self.accepts_operator = False


def check(self, obj):
regexp = re.compile(str(self.get_limit()), re.IGNORECASE)
urls = obj.get_urls()
for ur in urls:
if not regexp.search( str(ur).lower()):
obj.remove_url(ur)
return len(obj.get_urls()) > 0
28 changes: 19 additions & 9 deletions classes/redditelement.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import stringutil
import praw.models
import copy

class RedditElement(object):
"""
Expand Down Expand Up @@ -94,24 +95,34 @@ def add_url(self, url):
self._urls.append(url)


def remove_url(self, url):
if url in self._urls:
self._urls.remove(url)
else:
print("Cannot remove:", url)
if url in self._file_map:
del self._file_map[url]


def set_source(self, source_obj):
""" Sets this Element's source alias by pulling it directly from the object. """
self.source_alias = str(source_obj.get_alias())


def get_id(self):
"""get this element's ID. """
""" Get this element's ID. """
return self.id


def get_urls(self):
""" returns a list of all this element's UNIQUE urls. """
return self._urls
""" Returns a list of all this element's UNIQUE urls. """
return self._urls[:]


def get_completed_files(self):
""" Returns the [url]=[files] array build for the completed URLs of this element. """
return self._file_map
""" Returns deep copy of the [url]=[files] dict built for the completed URLs of this element.
Can be a bit expensive to call. """
return copy.deepcopy(self._file_map)


def get_json_url(self):
Expand All @@ -121,13 +132,12 @@ def get_json_url(self):

def contains_url(self, url):
""" if this element contains the given URL. """
return url in self.get_completed_files()
return url in self._file_map


def contains_file(self, file_name):
""" if this element contains the given file name. """
files = self.get_completed_files()
return any(file_name in str(files[key]) for key in files)
return any(file_name in str(self._file_map[key]) for key in self._file_map)


def remap_file(self, filename_old, filename_new):
Expand All @@ -146,7 +156,7 @@ def to_obj(self):
'id': self.id,
'title': self.title,
'author': self.author,
'urls': self._urls,
'urls': self.get_urls(),
'source_alias': self.source_alias,
}
return ob
13 changes: 7 additions & 6 deletions classes/wizards/source_wizard.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import console
import stringutil as su
import wizard_functions
import wizards.wizard_functions as wizard_functions

class SourceEditor:
def __init__(self, source, settings):
Expand Down Expand Up @@ -88,11 +88,12 @@ def _add_filter(self):
if new_filter is None:
print('Not adding Filter.')
return
comp = console.prompt_list(
'How should we compare this field to the value you set?',
[(fv.value.replace('.', ''), fv) for fv in filter.Operators]
)
new_filter.set_operator(comp)
if new_filter.accepts_operator:
comp = console.prompt_list(
'How should we compare this field to the value you set?',
[(fv.value.replace('.', ''), fv) for fv in filter.Operators]
)
new_filter.set_operator(comp)
limit = console.string('Value to compare to', auto_strip=False)
if limit is None:
print('Aborted filter setup.')
Expand Down
7 changes: 7 additions & 0 deletions docs/release notes/Release 1.52.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Happy New Year, Everyone!

While I was away, a few of you found a couple bugs that I deemed important enough to push a small release for.

I'm back now, and a few new features are in the works. In the mean time, this update will make sure the client can fully launch itself from scratch, as well as download posts with names that normally exceed the Windows filename limit.

Special thanks to GreysenEvans & will76 for reporting and bugfixing these issues.
12 changes: 12 additions & 0 deletions docs/release notes/Release 1.53.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Release 1.53:

This version patches a few small bits of code, and implements a new Filter option.

Want to limit the sites you download from? Now with pattern support, you can do just that!

Thanks to *will76* for the suggestion.

### Change Log:

+ Now supports filtering out any URLs that don't match a given pattern.
+ Updating the program and running without a restart will now always properly reload the main file.
2 changes: 1 addition & 1 deletion docs/site/User_Guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,5 +101,5 @@ There are a few things to take note of, if you're interested in the nitty-gritty
+ "Unlimited" generally isn't a thing. Reddit limits most resources to the first 1000 results.
+ RMD will likely be limited to the first 1000 results for any given Source.
+ Due to Reddit API limitations, signing in to RMD is absolutely required.
+ For Filter comparisons, numeric comparison is only done if both values can be converted from Strings.
+ For Filter comparisons, numeric comparison (for "min"/"max") is only done if both values can be converted to numbers.
+ I'm always open to bug reports or feature requests, so hit me up!

0 comments on commit 9b23537

Please sign in to comment.