allura
リビジョン | 6b404c226f19a34f8a782ebb3e0737bffbc7b1f6 (tree) |
---|---|
日時 | 2012-06-22 23:34:39 |
作者 | Cory Johns <johnsca@geek...> |
コミッター | Cory Johns |
[#4345] Fixed / expanded tests, re-enabled text/html feed processing
Signed-off-by: Cory Johns <johnsca@geek.net>
@@ -175,15 +175,10 @@ class RssFeedsCommand(base.BlogCommand): | ||
175 | 175 | if ct.type != 'text/html': |
176 | 176 | content += '[plain]%s[/plain]' % ct.value |
177 | 177 | else: |
178 | - if False: | |
179 | - # FIXME: disabled until https://sourceforge.net/p/allura/tickets/4345 | |
180 | - # because the bad formatting from [plain] is worse than bad formatting from unintentional markdown syntax | |
181 | - parser = MDHTMLParser() | |
182 | - parser.feed(ct.value) | |
183 | - parser.close() # must be before using the result_doc | |
184 | - markdown_content = html2text.html2text(parser.result_doc, baseurl=e.link) | |
185 | - else: | |
186 | - markdown_content = html2text.html2text(ct.value, baseurl=e.link) | |
178 | + parser = MDHTMLParser() | |
179 | + parser.feed(ct.value) | |
180 | + parser.close() # must be before using the result_doc | |
181 | + markdown_content = html2text.html2text(parser.result_doc, baseurl=e.link) | |
187 | 182 | |
188 | 183 | content += markdown_content |
189 | 184 | else: |
@@ -1,9 +1,12 @@ | ||
1 | +from datetime import datetime, timedelta | |
1 | 2 | import pylons |
2 | 3 | pylons.c = pylons.tmpl_context |
3 | 4 | pylons.g = pylons.app_globals |
4 | 5 | from pylons import c, g |
5 | 6 | from nose.tools import assert_equal |
6 | 7 | |
8 | +from html2text import html2text | |
9 | + | |
7 | 10 | from ming.orm.ormsession import ThreadLocalORMSession |
8 | 11 | |
9 | 12 | from alluratest.controller import setup_basic_test, setup_global_objects |
@@ -13,13 +16,60 @@ from allura.lib import helpers as h | ||
13 | 16 | from forgeblog import model as BM |
14 | 17 | from forgeblog.command import rssfeeds |
15 | 18 | |
19 | +import mock | |
20 | + | |
21 | + | |
16 | 22 | test_config = 'test.ini#main' |
17 | 23 | |
18 | 24 | def setUp(): |
19 | 25 | setup_basic_test() |
20 | 26 | setup_global_objects() |
21 | 27 | |
22 | -def test_pull_rss_feeds(): | |
28 | +def _mock_feed(*entries): | |
29 | + class attrdict(dict): | |
30 | + def __getattr__(self, name): | |
31 | + return self[name] | |
32 | + | |
33 | + feed = mock.Mock() | |
34 | + feed.bozo = False | |
35 | + feed.entries = [] | |
36 | + for e in entries: | |
37 | + _mock_feed.i += 1 | |
38 | + entry = attrdict( | |
39 | + content_type='text/plain', | |
40 | + title='Default Title %d' % _mock_feed.i, | |
41 | + subtitle='', | |
42 | + summary='', | |
43 | + link='http://example.com/', | |
44 | + updated=datetime.now()+timedelta(days=_mock_feed.i - 100)) | |
45 | + entry.update(e) | |
46 | + entry['updated_parsed'] = entry['updated'].timetuple() | |
47 | + if 'content' in entry: | |
48 | + entry['content'] = [attrdict(type=entry['content_type'], value=entry['content'])] | |
49 | + feed.entries.append(entry) | |
50 | + | |
51 | + return feed | |
52 | +_mock_feed.i = 0 | |
53 | + | |
54 | +@mock.patch.object(rssfeeds.feedparser, 'parse') | |
55 | +def test_pull_rss_feeds(parsefeed): | |
56 | + parsefeed.return_value = _mock_feed( | |
57 | + dict(title='Test', subtitle='test', summary='This is a test'), | |
58 | + dict(content_type='text/plain', content='Test feed'), | |
59 | + dict(content_type='text/html', content= | |
60 | + "<p>1. foo</p>\n" | |
61 | + "\n" | |
62 | + "<p>\n" | |
63 | + "#foo bar <a href='baz'>baz</a>\n" | |
64 | + "foo bar\n" | |
65 | + "</p>\n" | |
66 | + "\n" | |
67 | + "<p>#foo bar <a href='baz'>\n" | |
68 | + "baz\n" | |
69 | + "</a></p>\n" | |
70 | + ), | |
71 | + ) | |
72 | + | |
23 | 73 | base_app = M.AppConfig.query.find().all()[0] |
24 | 74 | tmp_app = M.AppConfig(tool_name=u'Blog', discussion_id=base_app.discussion_id, |
25 | 75 | project_id=base_app.project_id, |
@@ -27,14 +77,31 @@ def test_pull_rss_feeds(): | ||
27 | 77 | u'project_name': base_app.project.name, |
28 | 78 | u'mount_point': u'blog', |
29 | 79 | u'mount_label': u'Blog'}) |
30 | - new_external_feeds = ['http://wordpress.org/news/feed/'] | |
80 | + new_external_feeds = ['http://example.com/news/feed/'] | |
31 | 81 | BM.Globals(app_config_id=tmp_app._id, external_feeds=new_external_feeds) |
32 | 82 | ThreadLocalORMSession.flush_all() |
33 | 83 | |
34 | 84 | cmd = rssfeeds.RssFeedsCommand('pull-rss-feeds') |
35 | 85 | cmd.run([test_config, '-a', tmp_app._id]) |
36 | 86 | cmd.command() |
37 | - assert len(BM.BlogPost.query.find({'app_config_id': tmp_app._id}).all()) > 0 | |
87 | + parsefeed.assert_called_with('http://example.com/news/feed/') | |
88 | + posts = BM.BlogPost.query.find({'app_config_id': tmp_app._id}).sort('timestamp', 1) | |
89 | + assert_equal(posts.count(), 3) | |
90 | + posts = posts.all() | |
91 | + assert_equal(posts[0].title, 'Test') | |
92 | + assert_equal(posts[0].text, '[plain]This is a test[/plain] [link](http://example.com/)') | |
93 | + assert_equal(posts[1].title, 'Default Title 2') | |
94 | + assert_equal(posts[1].text, '[plain]Test feed[/plain] [link](http://example.com/)') | |
95 | + assert_equal(posts[2].title, 'Default Title 3') | |
96 | + assert_equal(posts[2].text, | |
97 | + "[plain]1. foo[/plain]\n" | |
98 | + "\n" | |
99 | + "[plain]#foo bar [/plain][[plain]baz[/plain]](baz) " | |
100 | + "[plain]foo bar[/plain] \n" | |
101 | + "\n" | |
102 | + "[plain]#foo bar [/plain][ [plain]baz[/plain] ](baz)\n " | |
103 | + "[link](http://example.com/)" | |
104 | + ) | |
38 | 105 | |
39 | 106 | def test_plaintext_parser(): |
40 | 107 | parser = rssfeeds.MDHTMLParser() |
@@ -89,7 +156,7 @@ def test_plaintext_parser_wrapped(): | ||
89 | 156 | ) |
90 | 157 | |
91 | 158 | def test_plaintext_preprocessor(): |
92 | - html = g.markdown.convert( | |
159 | + text = html2text( | |
93 | 160 | "[plain]1. foo[/plain]\n" |
94 | 161 | "\n" |
95 | 162 | "[plain]#foo bar [/plain]<a href='baz'>[plain]baz[/plain]</a>\n" |
@@ -99,22 +166,15 @@ def test_plaintext_preprocessor(): | ||
99 | 166 | "[plain]baz[/plain]\n" |
100 | 167 | "</a>\n" |
101 | 168 | ) |
169 | + html = g.markdown.convert(text) | |
102 | 170 | assert_equal(html, |
103 | - '<div class="markdown_content">' | |
104 | - '1. foo\n' | |
105 | - '\n' | |
106 | - '<p>' | |
107 | - '#foo bar <a href="../baz">baz</a><br />' | |
108 | - 'foo bar' | |
109 | - '</p><p>' | |
110 | - '#foo bar <a href="../baz"><br />' | |
111 | - 'baz<br />' | |
112 | - '</a>' | |
113 | - '</p></div>' | |
171 | + '<div class="markdown_content"><p>1. foo ' | |
172 | + '#foo bar <a href="../baz">baz</a> foo bar ' | |
173 | + '#foo bar <a href="../baz"> baz </a></p></div>' | |
114 | 174 | ) |
115 | 175 | |
116 | 176 | def test_plaintext_preprocessor_wrapped(): |
117 | - html = g.markdown.convert( | |
177 | + text = html2text( | |
118 | 178 | "<p>[plain]1. foo[/plain]</p>\n" |
119 | 179 | "\n" |
120 | 180 | "<p>\n" |
@@ -126,16 +186,10 @@ def test_plaintext_preprocessor_wrapped(): | ||
126 | 186 | "[plain]baz[/plain]\n" |
127 | 187 | "</a></p>\n" |
128 | 188 | ) |
189 | + html = g.markdown.convert(text) | |
129 | 190 | assert_equal(html, |
130 | - '<div class="markdown_content">' | |
131 | - '<p>1. foo</p>\n' | |
191 | + '<div class="markdown_content">1. foo\n' | |
132 | 192 | '\n' |
133 | - '<p>\n' | |
134 | - '#foo bar <a href="../baz">baz</a>\n' | |
135 | - 'foo bar\n' | |
136 | - '</p>\n' | |
137 | - '<p>#foo bar <a href="../baz">\n' | |
138 | - 'baz\n' | |
139 | - '</a></p>\n' | |
140 | - '</div>' | |
193 | + '<p>#foo bar <a href="../baz">baz</a> foo bar </p>\n' | |
194 | + '<p>#foo bar <a href="../baz"> baz </a></p></div>' | |
141 | 195 | ) |