diff --git a/docs/conf.py b/docs/conf.py
index 4f125865..55462ac6 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -336,10 +336,12 @@
intersphinx_mapping = {
'flask': ('https://flask.palletsprojects.com/en/latest', None),
'flask_caching': ('https://flask-caching.readthedocs.io/en/latest', None),
- 'oauth_dropins': ('https://oauth-dropins.readthedocs.io/en/latest', None),
+ 'oauth_dropins': ('https://oauth-dropins.readthedocs.io/en/stable', None),
+ 'praw': ('https://praw.readthedocs.io/en/stable', None),
'python': ('https://docs.python.org/3/', None),
- 'requests': ('https://requests.readthedocs.io/en/stable/', None),
- 'urllib3': ('https://urllib3.readthedocs.io/en/latest', None),
- 'webob': ('https://webob.readthedocs.io/en/latest', None),
- 'werkzeug': ('https://werkzeug.palletsprojects.com/en/latest/', None),
+ 'requests': ('https://requests.readthedocs.io/en/stable', None),
+ 'urllib3': ('https://urllib3.readthedocs.io/en/stable', None),
+ 'webob': ('https://webob.readthedocs.io/en/stable', None),
+ 'websockets': ('https://websockets.readthedocs.io/en/stable', None),
+ 'werkzeug': ('https://werkzeug.palletsprojects.com/en/latest', None),
}
diff --git a/docs/source/granary.rst b/docs/source/granary.rst
index 6583a81a..47cd927a 100644
--- a/docs/source/granary.rst
+++ b/docs/source/granary.rst
@@ -8,72 +8,89 @@ Reference documentation.
as1
---
.. automodule:: granary.as1
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
as2
---
.. automodule:: granary.as2
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
atom
----
.. automodule:: granary.atom
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
bluesky
-------
.. automodule:: granary.bluesky
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
facebook
--------
.. automodule:: granary.facebook
- :exclude-members: __getnewargs__, __getstate__, __new__, __repr__
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
flickr
------
.. automodule:: granary.flickr
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
github
------
.. automodule:: granary.github
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
instagram
---------
.. automodule:: granary.instagram
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
jsonfeed
--------
.. automodule:: granary.jsonfeed
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
mastodon
--------
.. automodule:: granary.mastodon
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
meetup
------
.. automodule:: granary.meetup
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
microformats2
-------------
.. automodule:: granary.microformats2
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
nostr
-----
.. automodule:: granary.nostr
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
pixelfed
--------
.. automodule:: granary.pixelfed
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
reddit
------
.. automodule:: granary.reddit
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
rss
---
.. automodule:: granary.rss
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
source
------
.. automodule:: granary.source
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
twitter
-------
.. automodule:: granary.twitter
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
diff --git a/granary/as1.py b/granary/as1.py
index f48402dd..c2da7add 100644
--- a/granary/as1.py
+++ b/granary/as1.py
@@ -76,7 +76,7 @@ def get_object(obj, field='object'):
field (str)
Returns:
- dict
+ dict:
"""
if not obj:
return {}
@@ -95,7 +95,7 @@ def get_objects(obj, field='object'):
field (str)
Returns:
- sequence of dict
+ sequence of dict:
"""
if not obj:
return []
@@ -117,7 +117,7 @@ def get_owner(obj):
obj (dict): decoded JSON ActivityStreams object
Returns:
- str
+ str:
"""
if not obj:
return None
@@ -188,11 +188,11 @@ def is_public(obj):
http://activitystrea.ms/specs/json/targeting/1.0/
Expects values generated by this library: ``objectType`` ``group``, ``alias``
- @public``, ``@unlisted``, or ``@private``.
+ ``@public``, ``@unlisted``, or ``@private``.
Also, important point: this defaults to True, ie public. Bridgy depends on
that and prunes the to field from stored activities in Response objects (in
- ``bridgy/util.prune_activity()``). If the default here ever changes, be sure to
+ ``bridgy/util.prune_activity``). If the default here ever changes, be sure to
update Bridgy's code.
"""
to = obj.get('to') or get_object(obj).get('to') or []
@@ -209,7 +209,7 @@ def add_rsvps_to_event(event, rsvps):
Args:
event (dict): ActivityStreams event object
- rsvps (sequence dict): ActivityStreams RSVP activity objects
+ rsvps (sequence of dict): ActivityStreams RSVP activity objects
"""
for rsvp in rsvps:
field = RSVP_VERB_TO_COLLECTION.get(rsvp.get('verb'))
@@ -264,7 +264,7 @@ def activity_changed(before, after, log=False):
"""Returns whether two activities or objects differ meaningfully.
Only compares a few fields: ``objectType``, ``verb``, ``content``,
- ``location``, and ``image`. Notably does *not* compare ``author``,
+ ``location``, and ``image``. Notably does *not* compare ``author``,
``published``, or ``updated``.
Args:
@@ -272,7 +272,7 @@ def activity_changed(before, after, log=False):
after: dict, ActivityStreams activity or object
Returns:
- bool
+ bool:
"""
def changed(b, a, field, label, ignore=None):
b_val = b.get(field)
@@ -306,11 +306,11 @@ def changed(b, a, field, label, ignore=None):
def append_in_reply_to(before, after):
- """Appends before's ``inReplyTo``s to after, in place.
+ """Appends before's ``inReplyTo`` to ``after``, in place.
Args:
- before: dict, ActivityStreams activity or object
- after: dict, ActivityStreams activity or object
+ before (dict): ActivityStreams activity or object
+ after (dict): ActivityStreams activity or object
"""
obj_b = get_object(before) or before
obj_a = get_object(after) or after
@@ -358,7 +358,7 @@ def original_post_discovery(
kwargs: passed to :func:`requests.head` when following redirects
Returns:
- (list of str, list of str): original post URLs, mentions tuple
+ (list of str, list of str) tuple: (original post URLs, mentions)
"""
obj = get_object(activity) or activity
content = obj.get('content', '').strip()
diff --git a/granary/atom.py b/granary/atom.py
index 03af3568..34d6500e 100644
--- a/granary/atom.py
+++ b/granary/atom.py
@@ -52,7 +52,7 @@ def _text(elem, field=None):
field (str)
Returns:
- str or None
+ str or None:
"""
if field:
if ':' not in field:
@@ -78,7 +78,7 @@ def _as1_value(elem, field):
field (str)
Returns:
- str or None
+ str or None:
"""
type = _text(elem, f'activity:{field}')
if type:
diff --git a/granary/bluesky.py b/granary/bluesky.py
index 1ff5386a..1eea3f31 100644
--- a/granary/bluesky.py
+++ b/granary/bluesky.py
@@ -71,7 +71,7 @@ def url_to_did_web(url):
url (str)
Returns:
- str
+ str:
"""
parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
@@ -102,7 +102,7 @@ def did_web_to_url(did):
did (str)
Returns:
- str
+ str:
"""
if not did or not DID_WEB_PATTERN.match(did):
raise ValueError(f'Invalid did:web: {did}')
diff --git a/granary/facebook.py b/granary/facebook.py
index 61c6f9b7..e928934d 100644
--- a/granary/facebook.py
+++ b/granary/facebook.py
@@ -533,7 +533,7 @@ def get_share(self, activity_user_id, activity_id, share_id, activity=None):
activity (dict): activity object, optional
Returns:
- dict
+ dict:
"""
orig_id = f'{activity_user_id}_{activity_id}'
@@ -581,7 +581,7 @@ def get_reaction(self, activity_user_id, activity_id, reaction_user_id,
activity (dict): activity object (optional)
Returns:
- dict
+ dict:
"""
if '_' not in reaction_id: # handle just name of reaction type
reaction_id = f'{activity_id}_{reaction_id}_by_{reaction_user_id}'
@@ -920,7 +920,7 @@ def base_id(cls, url):
"""Guesses the id of the object in the given URL.
Returns:
- str or None
+ str or None:
"""
params = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
event_id = params.get('event_time_id')
@@ -2195,7 +2195,7 @@ def _scraped_content(cls, tag):
tag (bs4.Tag)
Returns:
- str
+ str:
"""
# TODO: distinguish between text elements with actual whitespace
# before/after and without. this adds space to all of them, including
@@ -2602,7 +2602,7 @@ def urlopen_batch_full(self, requests):
]
Returns:
- (sequence of dict): responses in Facebook's batch format, except that body
+ sequence of dict: responses in Facebook's batch format, except that body
is JSON-decoded if possible, and headers is a single dict, not a list of
dicts, e.g.::
diff --git a/granary/flickr.py b/granary/flickr.py
index 9bc135a2..b3b153b5 100644
--- a/granary/flickr.py
+++ b/granary/flickr.py
@@ -366,7 +366,7 @@ def preview_delete(self, id):
id (int or str): photo id to delete
Returns:
- ``CreationResult``
+ CreationResult:
"""
return source.creation_result(
description=f'delete this photo.')
@@ -732,7 +732,7 @@ def user_id(self):
https://www.flickr.com/services/api/flickr.people.getLimits.html
Returns:
- str
+ str:
"""
if not self._user_id:
resp = self.call_api_method('flickr.people.getLimits')
@@ -748,7 +748,7 @@ def path_alias(self):
https://www.flickr.com/services/api/flickr.people.getInfo.html
Returns:
- str
+ str:
"""
if not self._path_alias:
resp = self.call_api_method('flickr.people.getInfo', {
@@ -764,7 +764,7 @@ def user_url(self, user_id):
user_id (str): user's alphanumeric ``nsid`` or path alias
Returns:
- str, a profile URL
+ str: a profile URL
"""
return user_id and f'https://www.flickr.com/people/{user_id}/'
diff --git a/granary/github.py b/granary/github.py
index 1fefdced..8d7c1297 100644
--- a/granary/github.py
+++ b/granary/github.py
@@ -266,7 +266,7 @@ def base_id(cls, url):
url (str):
Returns:
- str or None
+ str or None:
"""
parts = urllib.parse.urlparse(url).path.strip('/').split('/')
if len(parts) == 4 and util.is_int(parts[3]):
@@ -752,7 +752,7 @@ def existing_labels(self, owner, repo):
repo (str)
Returns:
- set of str
+ set of str:
"""
resp = self.graphql(GRAPHQL_REPO_LABELS, locals())
diff --git a/granary/microformats2.py b/granary/microformats2.py
index cbfe7709..9e955865 100644
--- a/granary/microformats2.py
+++ b/granary/microformats2.py
@@ -1212,7 +1212,7 @@ def tags_to_html(tags, classname, visible=True):
visible (bool): whether to visibly include ``displayName``
Returns:
- str
+ str:
"""
urls = {} # stores (url, displayName) tuples
for tag in tags:
@@ -1275,7 +1275,7 @@ def img(src, alt=''):
alt (str): ``alt`` attribute value, or None
Returns:
- str
+ str:
"""
if isinstance(src, dict):
assert not alt
@@ -1292,7 +1292,7 @@ def vid(src, poster=''):
poster (str): optional URL of the poster or preview image
Returns:
- str
+ str:
"""
poster_img = f'' if poster else ''
@@ -1308,7 +1308,7 @@ def aud(src):
src (str): URL of the audio
Returns:
- str
+ str:
"""
return f''
@@ -1323,7 +1323,7 @@ def maybe_linked(text, url=None, linked_classname=None, unlinked_classname=None)
unlinked_classname (str): optional ``class`` attribute to use if not ``url``
Returns:
- str
+ str:
"""
if url:
classname = f' class="{linked_classname}"' if linked_classname else ''
@@ -1341,7 +1341,7 @@ def maybe_datetime(dt, classname):
classname (str): class name
Returns:
- str
+ str:
"""
if dt:
return f''
diff --git a/granary/nostr.py b/granary/nostr.py
index 26960f8c..6a5e0b2e 100644
--- a/granary/nostr.py
+++ b/granary/nostr.py
@@ -76,10 +76,10 @@ def id_for(event):
"""Generates an id for a Nostr event.
Args:
- event: dict, JSON Nostr event
+ event (dict): Nostr event
Returns:
- str, 32-character hex-encoded sha256 hash of the event, serialized
+ str: 32-character hex-encoded sha256 hash of the event, serialized
according to NIP-01
"""
event.setdefault('tags', [])
@@ -110,10 +110,10 @@ def uri_to_id(uri):
Based on NIP-19 and NIP-21.
Args:
- uri: str
+ uri (str)
Returns:
- str
+ str:
"""
if not uri or not is_bech32(uri):
return uri
@@ -128,11 +128,11 @@ def id_to_uri(prefix, id):
Based on NIP-19 and NIP-21.
Args:
- prefix: str
- id: str
+ prefix (str)
+ id (str)
Returns:
- str
+ str:
"""
if not id:
return id
@@ -145,9 +145,10 @@ def from_as1(obj):
"""Converts an ActivityStreams 1 activity or object to a Nostr event.
Args:
- obj: dict, AS1 activity or object
+ obj (dict): AS1 activity or object
- Returns: dict, JSON Nostr event
+ Returns:
+ dict: Nostr event
"""
type = as1.object_type(obj)
inner_obj = as1.get_object(obj)
@@ -270,9 +271,10 @@ def to_as1(event):
"""Converts a Nostr event to an ActivityStreams 2 activity or object.
Args:
- event: dict, JSON Nostr event
+ event (dict): Nostr event
- Returns: dict, AS1 activity or object
+ Returns:
+ dict: AS1 activity or object
"""
if not event:
return {}
@@ -400,10 +402,10 @@ def to_as1(event):
class Nostr(Source):
- """Nostr source class. See file docstring and Source class for details.
+ """Nostr source class. See file docstring and :class:`Source` for details.
Attributes:
- relays: sequence of str, relay hostnames
+ relays (sequence of str): relay hostnames
"""
DOMAIN = None
@@ -419,10 +421,10 @@ def get_actor(self, user_id=None):
"""Fetches and returns a Nostr user profile.
Args:
- user_id: str, NIP-21 'nostr:npub...'
+ user_id (str): NIP-21 ``nostr:npub...``
Returns:
- dict, AS1 actor object
+ dict: AS1 actor object
"""
if not user_id or not user_id.removeprefix('nostr:').startswith('npub'):
raise ValueError(f'Expected nostr:npub..., got {user_id}')
@@ -512,21 +514,21 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
return self.make_activities_base_response(util.trim_nulls(activities.values()))
def query(self, websocket, filter):
- """Runs a Nostr REQ query on an open websocket.
+ """Runs a Nostr ``REQ`` query on an open websocket.
- Sends the query, collects the responses, and closes the REQ subscription.
- If `limit` is not set on the filter, defaults it to 20
+ Sends the query, collects the responses, and closes the ``REQ`` subscription.
+ If ``limit`` is not set on the filter, it defaults to 20.
Args:
- websocket: :class:`websockets.ClientConnection`
- filter: dict NIP-01 REQ filter
- limit: int
+ websocket (websockets.sync.client.ClientConnection)
+ filter (dict): NIP-01 ``REQ`` filter
+ limit (int)
Returns:
- list of dict Nostr events
+ list of dict: Nostr events
Raises:
- AssertionError if the filter 'limit' field is not set.
+ AssertionError: if the filter ``limit`` field is not set.
"""
limit = filter.setdefault('limit', 20)
diff --git a/granary/reddit.py b/granary/reddit.py
index 5cd5252f..31214209 100644
--- a/granary/reddit.py
+++ b/granary/reddit.py
@@ -3,9 +3,10 @@
Not thread safe!
Reddit API docs:
-https://github.com/reddit-archive/reddit/wiki/API
-https://www.reddit.com/dev/api
-https://www.reddit.com/prefs/apps
+
+* https://github.com/reddit-archive/reddit/wiki/API
+* https://www.reddit.com/dev/api
+* https://www.reddit.com/prefs/apps
PRAW API docs:
https://praw.readthedocs.io/
@@ -30,7 +31,7 @@
class Reddit(source.Source):
- """Reddit source class. See file docstring and Source class for details."""
+ """Reddit source class. See file docstring and :class:`source.Source` for details."""
DOMAIN = 'reddit.com'
BASE_URL = 'https://reddit.com'
@@ -52,10 +53,10 @@ def post_id(self, url):
"""Guesses the post id of the given URL.
Args:
- url: string
+ url (str)
Returns:
- string, or None
+ str or None:
"""
path_parts = urllib.parse.urlparse(url).path.rstrip('/').split('/')
if len(path_parts) >= 2:
@@ -75,14 +76,15 @@ def praw_to_actor(self, praw_user):
https://github.com/snarfed/bridgy/issues/1021
Ideally this would be part of PRAW, but they seem uninterested:
- https://github.com/praw-dev/praw/issues/131
- https://github.com/praw-dev/praw/issues/1140
+
+ * https://github.com/praw-dev/praw/issues/131
+ * https://github.com/praw-dev/praw/issues/1140
Args:
- user: PRAW Redditor object
+ user (praw.models.Redditor)
Returns:
- an ActivityStreams actor dict, ready to be JSON-encoded
+ dict: ActivityStreams actor
"""
try:
user = reddit.praw_to_user(praw_user)
@@ -96,10 +98,10 @@ def user_to_actor(self, user):
"""Converts a dict user to an actor.
Args:
- user: JSON user
+ user (dict): Reddit user
Returns:
- an ActivityStreams actor dict, ready to be JSON-encoded
+ dict: ActivityStreams actor
"""
username = user.get('name')
if not username:
@@ -142,11 +144,11 @@ def praw_to_object(self, thing, type):
Note that this will make external API calls to lazily load some attributes.
Args:
- thing: a PRAW object, Submission or Comment
- type: string to denote whether to get submission or comment content
+ thing (praw.models.Submission or praw.models.Comment)
+ type (str): either ``submission`` or ``comment``, which content to get
Returns:
- an ActivityStreams object dict, ready to be JSON-encoded
+ dict: ActivityStreams object
"""
id = getattr(thing, 'id', None)
if not id:
@@ -206,15 +208,15 @@ def praw_to_activity(self, thing, type):
Note that this will make external API calls to lazily load some attributes.
- https://praw.readthedocs.io/en/latest/code_overview/models/submission.html
- https://praw.readthedocs.io/en/latest/code_overview/models/comment.html
+ * https://praw.readthedocs.io/en/latest/code_overview/models/submission.html
+ * https://praw.readthedocs.io/en/latest/code_overview/models/comment.html
Args:
- thing: a PRAW object, Submission or Comment
- type: string to denote whether to get submission or comment content
+ thing (praw.models.Submission or praw.models.Comment)
+ type (str): whether to get submission or comment content
Returns:
- an ActivityStreams activity dict, ready to be JSON-encoded
+ dict: ActivityStreams activity
"""
obj = self.praw_to_object(thing, type)
if not obj:
@@ -235,8 +237,8 @@ def _fetch_replies(self, activities, cache=None):
Only includes top level comments!
Args:
- activities: list of activity dicts
- cache: dict, cache as described in get_activities_response()
+ activities (list of dict)
+ cache (dict): cache as described in :meth:`Source.get_activities_response`
"""
for activity in activities:
id = util.parse_tag_uri(activity.get('id'))[1]
@@ -267,7 +269,8 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
fetch_mentions=False, search_query=None, **kwargs):
"""Fetches submissions and ActivityStreams activities.
- Currently only implements activity_id, search_query and fetch_replies.
+ Currently only implements ``activity_id``, ``search_query`` and
+ ``fetch_replies``.
"""
if activity_id:
submissions = [self.api.submission(id=activity_id)]
@@ -287,9 +290,10 @@ def get_actor(self, user_id=None):
"""Fetches a Reddit user and converts them to an AS1 actor.
Args:
- user_id: str
+ user_id (str)
- Returns: dict, AS1 actor, or {} if the user isn't found
+ Returns
+ dict: AS1 actor, or ``{}`` if the user isn't found
"""
return self.praw_to_actor(self._redditor(user_id=user_id))
@@ -298,10 +302,13 @@ def get_comment(self, comment_id, activity_id=None, activity_author_id=None,
"""Returns an ActivityStreams comment object.
Args:
- comment_id: string comment id
- activity_id: string activity id, Ignored
- activity_author_id: string activity author id. Ignored.
- activity: activity object, Ignored
+ comment_id (str): comment id
+ activity_id (str): activity id; ignored!
+ activity_author_id (str): activity author id; ignored!
+ activity (dict): activity object; ignored!
+
+ Returns:
+ dict: ActivityStreams object
"""
return self.praw_to_object(self.api.comment(id=comment_id), 'comment')
diff --git a/granary/rss.py b/granary/rss.py
index a04663b2..7b6c2755 100644
--- a/granary/rss.py
+++ b/granary/rss.py
@@ -1,14 +1,16 @@
"""Convert between ActivityStreams and RSS 2.0.
RSS 2.0 spec: http://www.rssboard.org/rss-specification
+
Feedgen docs: https://feedgen.kiesow.be/
Apple iTunes Podcasts feed requirements:
https://help.apple.com/itc/podcasts_connect/#/itc1723472cb
Notably:
+
* Valid RSS 2.0.
-* Each podcast item requires .
+* Each podcast item requires ````.
* Images should be JPEG or PNG, 1400x1400 to 3000x3000.
* HTTP server that hosts assets and files should support range requests.
"""
@@ -36,15 +38,15 @@ def from_activities(activities, actor=None, title=None, feed_url=None,
"""Converts ActivityStreams activities to an RSS 2.0 feed.
Args:
- activities: sequence of ActivityStreams activity dicts
- actor: ActivityStreams actor dict, the author of the feed
- title: string, the feed title
- feed_url: string, the URL for this RSS feed
- home_page_url: string, the home page URL
- hfeed: dict, parsed mf2 h-feed, if available
+ activities (sequence): of ActivityStreams activity dicts
+ actor (dict): ActivityStreams actor, author of the feed
+ title (str): the feed title
+ feed_url (str): the URL for this RSS feed
+ home_page_url (str): the home page URL
+ hfeed (dict): parsed mf2 ``h-feed``, if available
Returns:
- str with RSS 2.0 XML
+ str: RSS 2.0 XML
"""
try:
iter(activities)
@@ -184,10 +186,10 @@ def to_activities(rss):
"""Converts an RSS feed to ActivityStreams 1 activities.
Args:
- rss: str, RSS document with top-level element
+ rss (str): RSS document with top-level ```` element
Returns:
- list of ActivityStreams activity dicts
+ list of dict: ActivityStreams activity
"""
parsed = feedparser.parse(rss)
activities = []
diff --git a/granary/source.py b/granary/source.py
index 26b20b2c..dfc5ec47 100644
--- a/granary/source.py
+++ b/granary/source.py
@@ -47,15 +47,15 @@
'content', 'description', 'abort', 'error_plain', 'error_html'])
"""Result of creating a new object in a silo.
- :meth:`create()` and :meth:`preview_create()` use this to provide a detailed
+ :meth:`create` and :meth:`preview_create` use this to provide a detailed
description of publishing failures. If ``abort`` is False, we should continue
looking for an entry to publish; if True, we should immediately inform the
user. ``error_plain`` text is sent in response to failed publish webmentions;
``error_html`` will be displayed to the user when publishing interactively.
Attributes:
- content (str or dict): str HTML snippet for :meth:`preview_create()`, dict for
- :meth:`create()`
+ content (str or dict): str HTML snippet for :meth:`preview_create`, dict for
+ :meth:`create`
description (str): HTML snippet describing the publish action, e.g.
``@-reply`` or ``RSVP yes to this event``. The verb itself is surrounded by a
```` to allow styling. May also include ```` link(s) and
@@ -81,7 +81,7 @@ def html_to_text(html, baseurl='', **kwargs):
Args:
baseurl (str): base URL to use when resolving relative URLs. Passed through
- to ``HTML2Text()``.
+ to ``HTML2Text``.
kwargs: html2text options:
https://github.com/Alir3z4/html2text/blob/master/docs/usage.md#available-options
"""
@@ -138,7 +138,7 @@ class Source(object, metaclass=SourceMeta):
"""Abstract base class for a source (e.g. Facebook, Twitter).
Concrete subclasses must override the class constants below and implement
- :meth:`get_activities()`.
+ :meth:`get_activities`.
Attributes:
DOMAIN (str): the source's domain
@@ -157,8 +157,8 @@ class Source(object, metaclass=SourceMeta):
Defaults to Twitter's limit, 280 characters as of 2019-10-12.
TRUNCATE_URL_LENGTH (int): optional number of characters that URLs count
for. Defaults to Twitter's, 23 as of 2019-10-12.
- OPTIMIZED_COMMENTS (bool): whether :meth:`get_comment()` is optimized and
- only fetches the requested comment. If False, :meth:`get_comment()` fetches
+ OPTIMIZED_COMMENTS (bool): whether :meth:`get_comment` is optimized and
+ only fetches the requested comment. If False, :meth:`get_comment` fetches
many or all of the post's comments to find the requested one.
"""
POST_ID_RE = None
@@ -178,17 +178,17 @@ def get_actor(self, user_id=None):
user_id: str, defaults to current user
Returns:
- dict: ActivityStreams actor object
+ dict: ActivityStreams actor
"""
raise NotImplementedError()
def get_activities(self, *args, **kwargs):
"""Fetches and returns a list of activities.
- See get_activities_response() for args and kwargs.
+ See :meth:`get_activities_response` for args and kwargs.
Returns:
- list: ActivityStreams activity dicts
+ list of dict: ActivityStreams activities
"""
return self.get_activities_response(*args, **kwargs)['items']
@@ -200,7 +200,7 @@ def get_activities_response(
search_query=None, scrape=False, **kwargs):
"""Fetches and returns ActivityStreams activities and response details.
- Subclasses should override this. See :meth:`get_activities()` for an
+ Subclasses should override this. See :meth:`get_activities` for an
alternative that just returns the list of activities.
If user_id is provided, only that user's activity(s) are included.
@@ -213,22 +213,22 @@ def get_activities_response(
group id is string id of group or @self, @friends, @all, @search:
http://opensocial-resources.googlecode.com/svn/spec/2.0/Social-Data.xml#Group-ID
- The fetch_* kwargs all default to False because they often require extra API
- round trips. Some sources return replies, likes, and shares in the same
+ The ``fetch_*`` kwargs all default to False because they often require extra
+ API round trips. Some sources return replies, likes, and shares in the same
initial call, so they may be included even if you don't set their kwarg to
True.
Args:
user_id (str): defaults to the currently authenticated user
- group_id (str): one of '@self', '@all', '@friends', '@search'. defaults
- to '@friends'
+ group_id (str): one of ``@self``, ``@all``, ``@friends``, ``@search``. defaults
+ to ``@friends``
app_id (str):
activity_id (str):
start_index (int): >= 0
count (int): >= 0
etag (str): optional ETag to send with the API request. Results will
only be returned if the ETag has changed. Should include enclosing
- double quotes, e.g. '"ABC123"'
+ double quotes, e.g. ``"ABC123"``
min_id (only): return activities with ids greater than this
cache (dict): optional, used to cache metadata like comment and like counts
per activity across calls. Used to skip expensive API calls that haven't
@@ -253,27 +253,27 @@ def get_activities_response(
The returned dict has at least these keys:
- * items: list of activity dicts
- * startIndex: int or None
- * itemsPerPage: int
- * totalResults: int or None (e.g. if it can 't be calculated efficiently)
- * filtered: False
- * sorted: False
- * updatedSince: False
- * etag: str etag returned by the API's initial call to get activities
+ * ``items`` (list of dict): activities
+ * ``startIndex`` (int or None)
+ * ``itemsPerPage`` (int)
+ * ``totalResults`` (int or None, eg if it can't be calculated efficiently)
+ * ``filtered``: False
+ * ``sorted``: False
+ * ``updatedSince``: False
+ * ``etag`` (str): ETag returned by the API's initial call to get activities
Raises:
- :class:`ValueError`: if any argument is invalid for this source
- :class:`NotImplementedError`: if the source doesn't support the requested
- operation, e.g. Facebook doesn't support search.
+ ValueError: if any argument is invalid for this source
+ NotImplementedError: if the source doesn't support the requested
+ operation, eg Facebook doesn't support search.
"""
raise NotImplementedError()
@classmethod
def make_activities_base_response(cls, activities, *args, **kwargs):
- """Generates a base response dict for :meth:`get_activities_response()`.
+ """Generates a base response dict for :meth:`get_activities_response`.
- See :meth:`get_activities()` for args and kwargs.
+ See :meth:`get_activities` for args and kwargs.
"""
activities = list(activities)
return {
@@ -305,7 +305,7 @@ def scraped_to_activities(self, scraped, count=None, fetch_extras=False,
fetches, if necessary.
Returns:
- tuple: ([AS activities], AS logged in actor (ie viewer))
+ (list of dict, dict) tuple: ([AS activities], AS logged in actor (ie viewer))
"""
raise NotImplementedError()
@@ -319,7 +319,7 @@ def scraped_to_activity(self, scraped):
scraped (str): scraped data from a single post permalink
Returns:
- tuple: (AS activity or None, AS logged in actor (ie viewer))
+ (dict, dict) tuple: : (AS activity or None, AS logged in actor (ie viewer))
"""
raise NotImplementedError()
@@ -366,16 +366,16 @@ def create(self, obj, include_link=OMIT_LINK, ignore_formatting=False):
converting its HTML to plain text styling (newlines, etc.)
Returns:
- :class:`CreationResult`: The result. `content` will be a dict or ``None``.
- If the newly created object has an id or permalink, they'll be provided in
- the values for ``id`` and ``url``.
+ CreationResult: The result. ``content`` will be a dict or None. If the
+ newly created object has an id or permalink, they'll be provided in the
+ values for ``id`` and ``url``.
"""
raise NotImplementedError()
def preview_create(self, obj, include_link=OMIT_LINK, ignore_formatting=False):
"""Previews creating a new object: a post, comment, like, share, or RSVP.
- Returns HTML that previews what :meth:`create()` with the same object will
+ Returns HTML that previews what :meth:`create` with the same object will
do.
Subclasses should override this. Different sites will support different
@@ -392,7 +392,7 @@ def preview_create(self, obj, include_link=OMIT_LINK, ignore_formatting=False):
converting its HTML to plain text styling (newlines, etc.)
Returns:
- :class:`CreationResult`: The result. `content` will be a dict or ``None``.
+ CreationResult: The result. `content` will be a dict or ``None``.
"""
raise NotImplementedError()
@@ -405,7 +405,7 @@ def delete(self, id):
id (str): silo object id
Returns:
- :class:`CreationResult`
+ CreationResult:
"""
raise NotImplementedError()
@@ -416,7 +416,7 @@ def preview_delete(self, id):
id (str): silo object id
Returns:
- :class:`CreationResult`
+ CreationResult:
"""
raise NotImplementedError()
@@ -427,7 +427,7 @@ def get_event(self, event_id):
id (str): site-specific event id
Returns:
- dict: decoded ActivityStreams activity, or ``None``
+ dict: decoded ActivityStreams activity, or None
"""
raise NotImplementedError()
@@ -449,7 +449,7 @@ def get_comment(self, comment_id, activity_id=None, activity_author_id=None,
dict: ActivityStreams comment object
Raises:
- :class:`ValueError`: if any argument is invalid for this source
+ ValueError: if any argument is invalid for this source
"""
raise NotImplementedError()
@@ -540,7 +540,7 @@ def get_blocklist(self):
"""Fetches and returns the current user's block list.
...ie the users that the current user is blocking. The exact semantics of
- "blocking" vary from silo to silo.
+ blocking vary from silo to silo.
Returns:
sequence of dict: actor objects
@@ -566,7 +566,7 @@ def user_to_actor(self, user):
user (dict): a decoded JSON silo user object
Returns:
- dict: ActivityStreams actor, ready to be JSON-encoded
+ dict: ActivityStreams actor
"""
raise NotImplementedError()
@@ -689,7 +689,7 @@ def tag_uri(self, name):
return util.tag_uri(self.DOMAIN, name)
def base_object(self, obj):
- """Returns the 'base' silo object that an object operates on.
+ """Returns the ``base`` silo object that an object operates on.
For example, if the object is a comment, this returns the post that it's a
comment on. If it's an RSVP, this returns the event. The id in the returned
@@ -762,7 +762,7 @@ def post_id(cls, url):
def _content_for_create(self, obj, ignore_formatting=False, prefer_name=False,
strip_first_video_tag=False, strip_quotations=False):
- """Returns content text for :meth:`create()` and :meth:`preview_create()`.
+ """Returns content text for :meth:`create` and :meth:`preview_create`.
Returns ``summary`` if available, then ``content``, then ``displayName``.
@@ -833,8 +833,8 @@ def _content_for_create(self, obj, ignore_formatting=False, prefer_name=False,
def truncate(self, content, url, include_link, type=None, quote_url=None):
"""Shorten text content to fit within a character limit.
- Character limit and URL character length are taken from the
- ``TRUNCATE_TEXT_LENGTH`` and ``TRUNCATE_URL_LENGTH`` class constants.
+ Character limit and URL character length are taken from
+ :const:`TRUNCATE_TEXT_LENGTH` and :const:`TRUNCATE_URL_LENGTH`.
Args:
content (str)
diff --git a/granary/twitter.py b/granary/twitter.py
index ee7ce204..4f189b1f 100644
--- a/granary/twitter.py
+++ b/granary/twitter.py
@@ -2,9 +2,10 @@
Uses the v1.1 REST API: https://developer.twitter.com/en/docs/api-reference-index
-The Audience Targeting 'to' field is set to @public or @private based on whether
-the tweet author's 'protected' field is true or false.
+The Audience Targeting ``to`` field is set to ``@public`` or ``@private`` based
+on whether the tweet author's ``protected`` field is true or false.
https://dev.twitter.com/docs/platform-objects/users
+
"""
import collections
import datetime
@@ -137,7 +138,7 @@ def dst(self, dt):
class Twitter(source.Source):
- """Twitter source class. See file docstring and Source class for details."""
+ """Twitter source class. See file docstring and :class:`Source` for details."""
DOMAIN = 'twitter.com'
BASE_URL = 'https://twitter.com/'
@@ -174,10 +175,10 @@ def __init__(self, access_token_key, access_token_secret, username=None,
OAuth access token by creating an app here: https://dev.twitter.com/apps/new
Args:
- access_token_key: string, OAuth access token key
- access_token_secret: string, OAuth access token secret
- username: string, optional, the current user. Used in e.g. preview/create.
- scrape_headers: dict, optional, with string HTTP header keys and values to
+ access_token_key (str): OAuth access token key
+ access_token_secret (str): OAuth access token secret
+ username (str): optional, the current user. Used in e.g. preview/create.
+ scrape_headers (dict): optional, with string HTTP header keys and values to
use when scraping likes
"""
self.access_token_key = access_token_key
@@ -189,7 +190,7 @@ def get_actor(self, screen_name=None):
"""Returns a user as a JSON ActivityStreams actor dict.
Args:
- screen_name: string username. Defaults to the current user.
+ screen_name (str): username. Defaults to the current user.
"""
url = API_CURRENT_USER if screen_name is None else API_USER % screen_name
return self.user_to_actor(self.urlopen(url))
@@ -203,24 +204,18 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
search_query=None, scrape=False, **kwargs):
"""Fetches posts and converts them to ActivityStreams activities.
- XXX HACK: this is currently hacked for bridgy to NOT pass min_id to the
- request for fetching activity tweets themselves, but to pass it to all of
- the requests for filling in replies, retweets, etc. That's because we want
- to find new replies and retweets of older initial tweets.
- TODO: find a better way.
-
- See :meth:`source.Source.get_activities_response()` for details. app_id is
- ignored. min_id is translated to Twitter's since_id.
+ See :meth:`source.Source.get_activities_response` for details. ``app_id``
+ is ignored. ``min_id`` is translated to Twitter's ``since_id``.
The code for handling ETags (and 304 Not Changed responses and setting
- If-None-Match) is here, but unused right now since Twitter evidently doesn't
- support ETags. From https://dev.twitter.com/discussions/5800 :
- "I've confirmed with our team that we're not explicitly supporting this
- family of features."
-
- Likes (nee favorites) are scraped from twitter.com, since Twitter's REST
- API doesn't offer a way to fetch them. You can also get them from the
- Streaming API, though, and convert them with streaming_event_to_object().
+ ``If-None-Match``) is here, but unused right now since Twitter evidently
+ doesn't support ETags. From https://dev.twitter.com/discussions/5800 : "I've
+ confirmed with our team that we're not explicitly supporting this family of
+ features."
+
+ Likes (nee favorites) are scraped from twitter.com, since Twitter's REST API
+ doesn't offer a way to fetch them. You can also get them from the Streaming
+ API, though, and convert them with :meth:`streaming_event_to_object`.
https://dev.twitter.com/docs/streaming-apis/messages#Events_event
Shares (ie retweets) are fetched with a separate API call per tweet:
@@ -232,17 +227,17 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
Quote tweets are fetched by searching for the possibly quoted tweet's ID,
using the OR operator to search up to 5 IDs at a time, and then checking
- the quoted_status_id_str field
+ the ``quoted_status_id_str`` field:
https://dev.twitter.com/overview/api/tweets#quoted_status_id_str
- Use the group_id @self to retrieve a user_id’s timeline. If user_id is None
- or @me, it will return tweets for the current API user.
+ Use the group_id @self to retrieve a user_id’s timeline. If ``user_id`` is
+ None or ``@me``, it will return tweets for the current API user.
group_id can be used to specify the slug of a list for which to return tweets.
By default the current API user’s lists will be used, but lists owned by other
- users can be fetched by explicitly passing a username to user_id, e.g. to
- fetch tweets from the list @exampleuser/example-list you would call
- get_activities(user_id='exampleuser', group_id='example-list').
+ users can be fetched by explicitly passing a username to ``user_id``, e.g. to
+ fetch tweets from the list ``@exampleuser/example-list`` you would call
+ ``get_activities(user_id='exampleuser', group_id='example-list')``.
Twitter replies default to including a mention of the user they're replying
to, which overloads mentions a bit. When fetch_mentions is True, we determine
@@ -250,11 +245,18 @@ def get_activities_response(self, user_id=None, group_id=None, app_id=None,
* it's not a reply, OR
* it's a reply, but not to the current user, AND
- * the tweet it's replying to doesn't @-mention the current user
+ * the tweet it's replying to doesn't @-mention the current user
Raises:
- NotImplementedError: if fetch_likes is True but scrape_headers was not
+ NotImplementedError: if ``fetch_likes`` is True but ``scrape_headers`` was not
provided to the constructor.
+
+ XXX HACK: this is currently hacked for Bridgy to NOT pass ``min_id`` to the
+ request for fetching activity tweets themselves, but to pass it to all of
+ the requests for filling in replies, retweets, etc. That's because we want
+ to find new replies and retweets of older initial tweets.
+ TODO: find a better way.
+
"""
if fetch_likes and not self.scrape_headers:
raise NotImplementedError('fetch_likes requires scrape_headers')
@@ -432,13 +434,14 @@ def fetch_replies(self, activities, min_id=None):
Includes indirect replies ie reply chains, not just direct replies. Searches
for @-mentions, matches them to the original tweets with
- in_reply_to_status_id_str, and recurses until it's walked the entire tree.
+ ``in_reply_to_status_id_str``, and recurses until it's walked the entire
+ tree.
Args:
- activities: list of activity dicts
+ activities (list of dict)
Returns:
- same activities list
+ list of dict: same activities
"""
# cache searches for @-mentions for individual users. maps username to dict
@@ -492,15 +495,15 @@ def fetch_mentions(self, username, tweets, min_id=None):
"""Fetches a user's @-mentions and returns them as ActivityStreams.
Tries to only include explicit mentions, not mentions automatically created
- by @-replying. See the :meth:`get_activities()` docstring for details.
+ by @-replying. See :meth:`get_activities_response` for details.
Args:
- username: string
- tweets: list of Twitter API objects. used to find quote tweets quoting them.
- min_id: only return activities with ids greater than this
+ username (str)
+ tweets (list): of Twitter API objects. used to find quote tweets quoting them.
+ min_id (str): only return activities with ids greater than this
Returns:
- list of activity dicts
+ list of dict: activities
"""
# get @-name mentions
url = API_SEARCH % {
@@ -564,10 +567,10 @@ def get_comment(self, comment_id, activity_id=None, activity_author_id=None,
"""Returns an ActivityStreams comment object.
Args:
- comment_id: string comment id
- activity_id: string activity id, optional
- activity_author_id: string activity author id. Ignored.
- activity: activity object, optional
+ comment_id (str): comment id
+ activity_id (str): activity id, optional
+ activity_author_id (str): activity author id; ignored
+ activity (dict): original object, optional
"""
self._validate_id(comment_id)
url = API_STATUS % comment_id
@@ -577,10 +580,10 @@ def get_share(self, activity_user_id, activity_id, share_id, activity=None):
"""Returns an ActivityStreams 'share' activity object.
Args:
- activity_user_id: string id of the user who posted the original activity
- activity_id: string activity id
- share_id: string id of the share object
- activity: activity object, optional
+ activity_user_id (str): id of the user who posted the original activity
+ activity_id (str): activity id
+ share_id (str): id of the share object
+ activity (dict): original object, optional
"""
self._validate_id(share_id)
url = API_STATUS % share_id
@@ -598,12 +601,11 @@ def get_blocklist(self):
at once. :(
Returns:
- sequence of actor objects
+ list of dict: actors
Raises:
- :class:`source.RateLimited` if we hit the rate limit. The partial
- attribute will have the list of user ids we fetched before hitting the
- limit.
+ source.RateLimited: if we hit the rate limit. The partial attribute will
+ have the list of user ids we fetched before hitting the limit.
"""
return self._get_blocklist_fn(API_BLOCKS,
lambda resp: (self.user_to_actor(user) for user in resp.get('users', [])))
@@ -614,17 +616,16 @@ def get_blocklist_ids(self):
May make multiple API calls, using cursors, to fully fetch large blocklists.
https://dev.twitter.com/overview/api/cursoring
- Subject to the same rate limiting as get_blocklist(), but each API call
- returns ~4k ids, so realistically this can actually fetch blocklists of up
- to 75k users at once. Beware though, many Twitter users have even more!
+ Subject to the same rate limiting as :meth:`get_blocklist`, but each API
+ call returns ~4k ids, so realistically this can actually fetch blocklists of
+ up to 75k users at once. Beware though, many Twitter users have even more!
Returns:
- sequence of string Twitter user ids
+ sequence of str: Twitter user ids
Raises:
- :class:`source.RateLimited` if we hit the rate limit. The partial
- attribute will have the list of user ids we fetched before hitting the
- limit.
+ source.RateLimited: if we hit the rate limit. The partial attribute will
+ have the list of user ids we fetched before hitting the limit.
"""
return self._get_blocklist_fn(API_BLOCK_IDS, lambda resp: resp.get('ids', []))
@@ -648,15 +649,13 @@ def create(self, obj, include_link=source.OMIT_LINK,
"""Creates a tweet, reply tweet, retweet, or favorite.
Args:
- obj: ActivityStreams object
- include_link: string
- ignore_formatting: bool
+ obj (dict): ActivityStreams object
+ include_link (str)
+ ignore_formatting(bool):
Returns:
- a CreationResult whose content will be a dict with 'id', 'url',
- and 'type' keys (all optional) for the newly created Twitter
- object (or None)
-
+ CreationResult: content will be a dict with ``id``, ``url``, and ``type``
+ keys (all optional) for the newly created Twitter object (or None)
"""
return self._create(obj, preview=False, include_link=include_link,
ignore_formatting=ignore_formatting)
@@ -667,12 +666,11 @@ def preview_create(self, obj, include_link=source.OMIT_LINK,
Args:
obj: ActivityStreams object
- include_link: string
- ignore_formatting: bool
+ include_link (str):
+ ignore_formatting (bool):
Returns:
- a CreationResult whose content will be a str HTML
- snippet (or None)
+ CreationResult or None: content will be an HTML snippet
"""
return self._create(obj, preview=True, include_link=include_link,
ignore_formatting=ignore_formatting)
@@ -681,22 +679,20 @@ def _create(self, obj, preview=None, include_link=source.OMIT_LINK,
ignore_formatting=False):
"""Creates or previews creating a tweet, reply tweet, retweet, or favorite.
- https://dev.twitter.com/docs/api/1.1/post/statuses/update
- https://dev.twitter.com/docs/api/1.1/post/statuses/retweet/:id
- https://dev.twitter.com/docs/api/1.1/post/favorites/create
+ * https://dev.twitter.com/docs/api/1.1/post/statuses/update
+ * https://dev.twitter.com/docs/api/1.1/post/statuses/retweet/:id
+ * https://dev.twitter.com/docs/api/1.1/post/favorites/create
Args:
- obj: ActivityStreams object
- preview: bool
- include_link: string
- ignore_formatting: bool
+ obj (dict): ActivityStreams object
+ preview (bool)
+ include_link (str)
+ ignore_formatting (bool)
Returns:
- a CreationResult
-
- If preview is True, the content will be a str HTML
- snippet. If False, it will be a dict with 'id' and 'url' keys
- for the newly created Twitter object.
+ CreationResult: If ``preview`` is True, ``content`` will be an HTML
+ snippet. If False, it will be a dict with ``id`` and ``url`` keys for the
+ newly created Twitter object.
"""
assert preview in (False, True)
type = obj.get('objectType')
@@ -896,11 +892,12 @@ def upload_images(self, images):
https://developer.twitter.com/en/docs/media/upload-media/uploading-media/media-best-practices
Args:
- images: sequence of AS image objects, eg:
- [{'url': 'http://picture', 'displayName': 'a thing'}, ...]
+ images (sequence of dict): AS image objects, eg::
+
+ [{'url': 'http://picture', 'displayName': 'a thing'}, ...]
Returns:
- list of string media ids or :class:`CreationResult` on error
+ list of str, or CreationResult on error: media ids
"""
ids = []
for image in images:
@@ -943,17 +940,17 @@ def upload_video(self, url):
Chunked upload consists of multiple API calls:
- * command=INIT, which allocates the media id
- * command=APPEND for each 5MB block, up to 15MB total
- * command=FINALIZE
+ * ``command=INIT``, which allocates the media id
+ * ``command=APPEND`` for each 5MB block, up to 15MB total
+ * ``command=FINALIZE``
https://developer.twitter.com/en/docs/media/upload-media/uploading-media/chunked-media-upload
Args:
- url: string URL of images
+ url (str): URL of images
Returns:
- string media id or :class:`CreationResult` on error
+ str, or :class:`CreationResult` on error: media id
"""
video_resp = util.urlopen(url)
error = self._check_media(url, video_resp, VIDEO_MIME_TYPES, 'MP4 videos',
@@ -1028,16 +1025,16 @@ def _check_media(url, resp, types, label, max_size):
"""Checks that an image or video is an allowed type and size.
Args:
- url: string
- resp: urlopen result object
- types: sequence of allowed string MIME types
- label: string, human-readable description of the allowed MIME types, to be
+ url (str):
+ resp (urllib.response.addinfourl): :func:`urllib.request.urlopen`` response
+ types (sequence of str): allowed str MIME types
+ label (str): human-readable description of the allowed MIME types, to be
used in an error message
- max_size: int, maximum allowed size, in bytes
+ max_size (int): maximum allowed size, in bytes
Returns:
- None if the url's type and size are valid, :class:`CreationResult`
- with abort=True otherwise
+ None or CreationResult: None if the url's type and size are valid,
+ :class:`CreationResult` with ``abort=True`` otherwise
"""
type = resp.headers.get('Content-Type')
if not type:
@@ -1060,9 +1057,10 @@ def delete(self, id):
"""Deletes a tweet. The authenticated user must have authored it.
Args:
- id: int or string, tweet id to delete
+ id (int or str): tweet id to delete
- Returns: CreationResult, content is Twitter API response dict
+ Returns:
+ CreationResult: content is Twitter API response dict
"""
resp = self.urlopen(API_DELETE_TWEET, data=urllib.parse.urlencode({'id': id}))
return source.creation_result(resp)
@@ -1071,9 +1069,10 @@ def preview_delete(self, id):
"""Previews deleting a tweet.
Args:
- id: int or string, tweet id to delete
+ id (int or str): tweet id to delete
- Returns: CreationResult
+ Returns:
+ CreationResult:
"""
url = self.status_url(self.username or '_', id)
return source.creation_result(description=f"""delete
@@ -1081,7 +1080,7 @@ def preview_delete(self, id):
{self.embed_post({'url': url})}""")
def urlopen(self, url, parse_response=True, **kwargs):
- """Wraps :func:`urllib2.urlopen()` and adds an OAuth signature."""
+ """Wraps :func:`urllib.request.urlopen` and adds an OAuth signature."""
if not url.startswith('http'):
url = API_BASE + url
@@ -1111,18 +1110,19 @@ def request():
return request()
def base_object(self, obj):
- """Returns the 'base' silo object that an object operates on.
+ """Returns the "base" silo object that an object operates on.
+
+ Includes special handling for Twitter photo and video URLs, eg:
- Includes special handling for Twitter photo and video URLs, e.g.
- https://twitter.com/nelson/status/447465082327298048/photo/1
- https://twitter.com/nelson/status/447465082327298048/video/1
+ * ``https://twitter.com/nelson/status/447465082327298048/photo/1``
+ * ``https://twitter.com/nelson/status/447465082327298048/video/1``
Args:
- obj: ActivityStreams object
+ obj (dict): ActivityStreams object
Returns:
- dict, minimal ActivityStreams object. Usually has at least id and
- url fields; may also have author.
+ dict: minimal ActivityStreams object. Usually has at least ``id`` and
+ ``url`` fields; may also have author.
"""
base_obj = super(Twitter, self).base_object(obj)
url = base_obj.get('url')
@@ -1146,10 +1146,10 @@ def tweet_to_activity(self, tweet):
"""Converts a tweet to an activity.
Args:
- tweet: dict, a decoded JSON tweet
+ tweet (dict): a decoded JSON tweet
Returns:
- an ActivityStreams activity dict, ready to be JSON-encoded
+ dict: ActivityStreams activity
"""
obj = self.tweet_to_object(tweet)
activity = {
@@ -1183,7 +1183,7 @@ def tweet_to_object(self, tweet):
"""Converts a tweet to an object.
Args:
- tweet: dict, a decoded JSON tweet
+ tweet (dict): a decoded JSON tweet
Returns:
an ActivityStreams object dict, ready to be JSON-encoded
@@ -1384,14 +1384,15 @@ def tweet_to_object(self, tweet):
@staticmethod
def _get_entities(tweet):
- """Merges and returns a tweet's entities and extended_entities.
+ """Merges and returns a tweet's ``entities`` and ``extended_entities``.
- Most entities are in the entities field - urls, hashtags, user_mentions,
- symbols, etc. Media are special though: extended_entities is always
- preferred. It has videos, animated gifs, and multiple photos. entities only
- has one photo at most, either the first or a thumbnail from the video, and
- its type is always 'photo' even for videos and animated gifs. (The id and
- id_str will be the same.) So ignore it unless extended_entities is missing.
+ Most entities are in the ``entities`` field - urls, hashtags, user_mentions,
+ symbols, etc. Media are special though: ``extended_entities`` is always
+ preferred. It has videos, animated gifs, and multiple photos. ``entities``
+ only has one photo at most, either the first or a thumbnail from the video,
+ and its type is always ``photo`` even for videos and animated gifs. (The
+ ``id`` and ``id_str`` will be the same.) So ignore it unless
+ ``extended_entities`` is missing.
https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/extended-entities-object
"""
@@ -1418,12 +1419,12 @@ def _get_entities(tweet):
def _video_url(self, media):
"""Returns the best video URL from a media object.
- Prefers MIME types that start with video/, then falls back to others.
+ Prefers MIME types that start with ``video/``, then falls back to others.
https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/extended-entities-object
- Twitter videos in extended entities currently often have both .m3u8 (HLS)
- and .mp4 variants. Twitter threatened to drop the MP4s in Aug 2016, but
+ Twitter videos in extended entities currently often have both ``.m3u8`` (HLS)
+ and ``.mp4`` variants. Twitter threatened to drop the MP4s in Aug 2016, but
they're still there as of Dec 2017.
https://twittercommunity.com/t/retiring-mp4-video-output-support-on-august-1st-2016/66045
@@ -1431,9 +1432,10 @@ def _video_url(self, media):
https://twittercommunity.com/t/mp4-still-appears-despite-of-retiring-announcment/78894
Args:
- media: dict, Twitter media object
+ media (dict): Twitter media object
- Returns: string URL
+ Returns:
+ str: URL
"""
variants = media.get('video_info', {}).get('variants')
if not variants:
@@ -1455,10 +1457,10 @@ def user_to_actor(self, user):
"""Converts a user to an actor.
Args:
- user: dict, a decoded JSON Twitter user
+ user (dict): a decoded JSON Twitter user
Returns:
- an ActivityStreams actor dict, ready to be JSON-encoded
+ dict: ActivityStreams actor
"""
username = user.get('screen_name')
if not username:
@@ -1494,10 +1496,10 @@ def retweet_to_object(self, retweet):
"""Converts a retweet to a share activity object.
Args:
- retweet: dict, a decoded JSON tweet
+ retweet (dict): a decoded JSON tweet
Returns:
- an ActivityStreams object dict
+ dict: ActivityStreams object
"""
orig = retweet.get('retweeted_status')
if not orig:
@@ -1522,10 +1524,10 @@ def streaming_event_to_object(self, event):
Right now, only converts favorite events to like objects.
Args:
- event: dict, a decoded JSON Streaming API event
+ event (dict): a decoded JSON Streaming API event
Returns:
- an ActivityStreams object dict
+ dict: ActivityStreams object
"""
source = event.get('source')
tweet = event.get('target_object')
@@ -1538,11 +1540,11 @@ def _make_like(self, tweet, liker):
"""Generates and returns a ActivityStreams like object.
Args:
- tweet: Twitter tweet dict
- liker: Twitter user dict
+ tweet (dict): Twitter tweet
+ liker (dict): Twitter user
Returns:
- ActivityStreams object dict
+ dict: ActivityStreams object
"""
# TODO: unify with Mastodon._make_like()
tweet_id = tweet.get('id_str')
@@ -1568,10 +1570,10 @@ def rfc2822_to_iso8601(time_str):
"""Converts a timestamp string from RFC 2822 format to ISO 8601.
Example RFC 2822 timestamp string generated by Twitter:
- 'Wed May 23 06:01:13 +0000 2007'
+ ``Wed May 23 06:01:13 +0000 2007``
Resulting ISO 8610 timestamp string:
- '2007-05-23T06:01:13'
+ ``2007-05-23T06:01:13``
"""
if not time_str:
return None