From d91604a51960c3e031a9053f37f501419347fcd3 Mon Sep 17 00:00:00 2001 From: keepcosmos Date: Fri, 15 Apr 2016 20:51:29 +0900 Subject: [PATCH] initial commit --- .gitignore | 5 + README.md | 20 + config/config.exs | 30 + lib/document.ex | 58 ++ lib/readability.ex | 10 + lib/test.js | 1835 ++++++++++++++++++++++++++++++++++++ lib/test.rb | 522 ++++++++++ lib/title_finder.ex | 64 ++ mix.exs | 34 + mix.lock | 2 + test/features/nytimes.html | 1198 +++++++++++++++++++++++ test/readability_test.exs | 8 + test/test_helper.exs | 1 + test/title_finder_test.exs | 45 + 14 files changed, 3832 insertions(+) create mode 100644 .gitignore create mode 100644 README.md create mode 100644 config/config.exs create mode 100644 lib/document.ex create mode 100644 lib/readability.ex create mode 100644 lib/test.js create mode 100644 lib/test.rb create mode 100644 lib/title_finder.ex create mode 100644 mix.exs create mode 100644 mix.lock create mode 100644 test/features/nytimes.html create mode 100644 test/readability_test.exs create mode 100644 test/test_helper.exs create mode 100644 test/title_finder_test.exs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..755b605 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +/_build +/cover +/deps +erl_crash.dump +*.ez diff --git a/README.md b/README.md new file mode 100644 index 0000000..136e3a3 --- /dev/null +++ b/README.md @@ -0,0 +1,20 @@ +# Readability + +**TODO: Add description** + +## Installation + +If [available in Hex](https://hex.pm/docs/publish), the package can be installed as: + + 1. Add readability to your list of dependencies in `mix.exs`: + + def deps do + [{:readability, "~> 0.0.1"}] + end + + 2. Ensure readability is started before your application: + + def application do + [applications: [:readability]] + end + diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 0000000..ad0fe89 --- /dev/null +++ b/config/config.exs @@ -0,0 +1,30 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Mix.Config module. +use Mix.Config + +# This configuration is loaded before any dependency and is restricted +# to this project. If another project depends on this project, this +# file won't be loaded nor affect the parent project. For this reason, +# if you want to provide default values for your application for +# 3rd-party users, it should be done in your "mix.exs" file. + +# You can configure for your application as: +# +# config :readability, key: :value +# +# And access this configuration in your application as: +# +# Application.get_env(:readability, :key) +# +# Or configure a 3rd-party app: +# +# config :logger, level: :info +# + +# It is also possible to import configuration files, relative to this +# directory. For example, you can emulate configuration per environment +# by uncommenting the line below and defining dev.exs, test.exs and such. +# Configuration from the imported file will override the ones defined +# here (which is why it is important to import them last). +# +# import_config "#{Mix.env}.exs" diff --git a/lib/document.ex b/lib/document.ex new file mode 100644 index 0000000..5016756 --- /dev/null +++ b/lib/document.ex @@ -0,0 +1,58 @@ +defmodule Readability.Document do + @default_options [retry_length: 250, + min_text_length: 25, + remove_unlikely_candidates: true, + weight_classes: true, + clean_conditionally: true, + remove_empty_nodes: true, + min_image_width: 130, + min_image_height: 80, + ignore_image_format: [], + blacklist: nil, + whitelist: nil + ] + + @regexes [ unlikelyCandidatesRe: ~r/combx|comment|community|disqus|extra|foot|header|lightbox|modal|menu|meta|nav|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup/i, + okMaybeItsACandidateRe: ~r/and|article|body|column|main|shadow/i, + positiveRe: ~r/article|body|content|entry|hentry|main|page|pagination|post|text|blog|story/i, + negativeRe: ~r/combx|comment|com-|contact|foot|footer|footnote|link|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|utility|widget/i, + divToPElementsRe: ~r/<(a|blockquote|dl|div|img|ol|p|pre|table|ul)/i, + replaceBrsRe: ~r/(]*>[ \n\r\t]*){2,}/i, + replaceFontsRe: ~r/<(\/?)font[^>]*>/i, + trimRe: ~r/^\s+|\s+$/, + normalizeRe: ~r/\s{2,}/, + killBreaksRe: ~r/((\s| ?)*){1,}/, + videoRe: ~r/http:\/\/(www\.)?(youtube|vimeo)\.com/i + ] + + def html do + page + |> String.replace(@regexes[:replaceBrsRe], "

") + |> String.replace(@regexes[:replaceFontsRe], "<\1span>") + |> Floki.find("html") + |> Floki.filter_out(:comment) + end + + def title do + html |> Floki.find("title") |> Floki.text + end + + def content do + html + |> Floki.filter_out("script") + |> Floki.filter_out("style") + end + + def page do + {:ok, f} = File.read("test/features/nytimes.html") + f + end + + def default_options do + @default_options + end + + def regexes do + @regexes + end +end diff --git a/lib/readability.ex b/lib/readability.ex new file mode 100644 index 0000000..fc7121b --- /dev/null +++ b/lib/readability.ex @@ -0,0 +1,10 @@ +defmodule Readability do + alias Readability.TitleFinder + + @type html_tree :: tuple | list + + def title(html) when is_binary(html), do: parse(html) |> title + def title(html_tree), do: TitleFinder.title(html_tree) + + defp parse(raw_html), do: Floki.parse(raw_html) +end diff --git a/lib/test.js b/lib/test.js new file mode 100644 index 0000000..53b4051 --- /dev/null +++ b/lib/test.js @@ -0,0 +1,1835 @@ +/*eslint-env es6:false*/ +/* + * Copyright (c) 2010 Arc90 Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * This code is heavily based on Arc90's readability.js (1.7.1) script + * available at: http://code.google.com/p/arc90labs-readability + */ +var root = this; + +/** + * Public constructor. + * @param {Object} uri The URI descriptor object. + * @param {HTMLDocument} doc The document to parse. + * @param {Object} options The options object. + */ +var Readability = function(uri, doc, options) { + options = options || {}; + + this._uri = uri; + this._doc = doc; + this._biggestFrame = false; + this._articleByline = null; + this._articleDir = null; + + // Configureable options + this._debug = !!options.debug; + this._maxElemsToParse = options.maxElemsToParse || this.DEFAULT_MAX_ELEMS_TO_PARSE; + this._nbTopCandidates = options.nbTopCandidates || this.DEFAULT_N_TOP_CANDIDATES; + this._maxPages = options.maxPages || this.DEFAULT_MAX_PAGES; + + // Start with all flags set + this._flags = this.FLAG_STRIP_UNLIKELYS | + this.FLAG_WEIGHT_CLASSES | + this.FLAG_CLEAN_CONDITIONALLY; + + // The list of pages we've parsed in this call of readability, + // for autopaging. As a key store for easier searching. + this._parsedPages = {}; + + // A list of the ETag headers of pages we've parsed, in case they happen to match, + // we'll know it's a duplicate. + this._pageETags = {}; + + // Make an AJAX request for each page and append it to the document. + this._curPageNum = 1; + + var logEl; + + // Control whether log messages are sent to the console + if (this._debug) { + logEl = function(e) { + var rv = e.nodeName + " "; + if (e.nodeType == e.TEXT_NODE) { + return rv + '("' + e.textContent + '")'; + } + var classDesc = e.className && ("." + e.className.replace(/ /g, ".")); + var elDesc = ""; + if (e.id) + elDesc = "(#" + e.id + classDesc + ")"; + else if (classDesc) + elDesc = "(" + classDesc + ")"; + return rv + elDesc; + }; + this.log = function () { + if ("dump" in root) { + var msg = Array.prototype.map.call(arguments, function(x) { + return (x && x.nodeName) ? logEl(x) : x; + }).join(" "); + dump("Reader: (Readability) " + msg + "\n"); + } else if ("console" in root) { + var args = ["Reader: (Readability) "].concat(arguments); + console.log.apply(console, args); + } + }; + } else { + this.log = function () {}; + } +} + +Readability.prototype = { + FLAG_STRIP_UNLIKELYS: 0x1, + FLAG_WEIGHT_CLASSES: 0x2, + FLAG_CLEAN_CONDITIONALLY: 0x4, + + // Max number of nodes supported by this parser. Default: 0 (no limit) + DEFAULT_MAX_ELEMS_TO_PARSE: 0, + + // The number of top candidates to consider when analysing how + // tight the competition is among candidates. + DEFAULT_N_TOP_CANDIDATES: 5, + + // The maximum number of pages to loop through before we call + // it quits and just show a link. + DEFAULT_MAX_PAGES: 5, + + // Element tags to score by default. + DEFAULT_TAGS_TO_SCORE: "section,h2,h3,h4,h5,h6,p,td,pre".toUpperCase().split(","), + + // All of the regular expressions in use within readability. + // Defined up here so we don't instantiate them repeatedly in loops. + REGEXPS: { + unlikelyCandidates: /banner|combx|comment|community|disqus|extra|foot|header|menu|modal|related|remark|rss|share|shoutbox|sidebar|skyscraper|sponsor|ad-break|agegate|pagination|pager|popup/i, + okMaybeItsACandidate: /and|article|body|column|main|shadow/i, + positive: /article|body|content|entry|hentry|main|page|pagination|post|text|blog|story/i, + negative: /hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|modal|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|tool|widget/i, + extraneous: /print|archive|comment|discuss|e[\-]?mail|share|reply|all|login|sign|single|utility/i, + byline: /byline|author|dateline|writtenby/i, + replaceFonts: /<(\/?)font[^>]*>/gi, + normalize: /\s{2,}/g, + videos: /\/\/(www\.)?(dailymotion|youtube|youtube-nocookie|player\.vimeo)\.com/i, + nextLink: /(next|weiter|continue|>([^\|]|$)|»([^\|]|$))/i, + prevLink: /(prev|earl|old|new|<|«)/i, + whitespace: /^\s*$/, + hasContent: /\S$/, + }, + + DIV_TO_P_ELEMS: [ "A", "BLOCKQUOTE", "DL", "DIV", "IMG", "OL", "P", "PRE", "TABLE", "UL", "SELECT" ], + + ALTER_TO_DIV_EXCEPTIONS: ["DIV", "ARTICLE", "SECTION", "P"], + + /** + * Run any post-process modifications to article content as necessary. + * + * @param Element + * @return void + **/ + _postProcessContent: function(articleContent) { + // Readability cannot open relative uris so we convert them to absolute uris. + this._fixRelativeUris(articleContent); + }, + + /** + * Iterate over a NodeList, which doesn't natively fully implement the Array + * interface. + * + * For convenience, the current object context is applied to the provided + * iterate function. + * + * @param NodeList nodeList The NodeList. + * @param Function fn The iterate function. + * @return void + */ + _forEachNode: function(nodeList, fn) { + return Array.prototype.forEach.call(nodeList, fn, this); + }, + + /** + * Iterate over a NodeList, return true if any of the provided iterate + * function calls returns true, false otherwise. + * + * For convenience, the current object context is applied to the + * provided iterate function. + * + * @param NodeList nodeList The NodeList. + * @param Function fn The iterate function. + * @return Boolean + */ + _someNode: function(nodeList, fn) { + return Array.prototype.some.call(nodeList, fn, this); + }, + + /** + * Concat all nodelists passed as arguments. + * + * @return ...NodeList + * @return Array + */ + _concatNodeLists: function() { + var slice = Array.prototype.slice; + var args = slice.call(arguments); + var nodeLists = args.map(function(list) { + return slice.call(list); + }); + return Array.prototype.concat.apply([], nodeLists); + }, + + _getAllNodesWithTag: function(node, tagNames) { + if (node.querySelectorAll) { + return node.querySelectorAll(tagNames.join(',')); + } + return [].concat.apply([], tagNames.map(function(tag) { + return node.getElementsByTagName(tag); + })); + }, + + /** + * Converts each and uri in the given element to an absolute URI, + * ignoring #ref URIs. + * + * @param Element + * @return void + */ + _fixRelativeUris: function(articleContent) { + var scheme = this._uri.scheme; + var prePath = this._uri.prePath; + var pathBase = this._uri.pathBase; + + function toAbsoluteURI(uri) { + // If this is already an absolute URI, return it. + if (/^[a-zA-Z][a-zA-Z0-9\+\-\.]*:/.test(uri)) + return uri; + + // Scheme-rooted relative URI. + if (uri.substr(0, 2) == "//") + return scheme + "://" + uri.substr(2); + + // Prepath-rooted relative URI. + if (uri[0] == "/") + return prePath + uri; + + // Dotslash relative URI. + if (uri.indexOf("./") === 0) + return pathBase + uri.slice(2); + + // Ignore hash URIs: + if (uri[0] == "#") + return uri; + + // Standard relative URI; add entire path. pathBase already includes a + // trailing "/". + return pathBase + uri; + } + + var links = articleContent.getElementsByTagName("a"); + this._forEachNode(links, function(link) { + var href = link.getAttribute("href"); + if (href) { + // Replace links with javascript: URIs with text content, since + // they won't work after scripts have been removed from the page. + if (href.indexOf("javascript:") === 0) { + var text = this._doc.createTextNode(link.textContent); + link.parentNode.replaceChild(text, link); + } else { + link.setAttribute("href", toAbsoluteURI(href)); + } + } + }); + + var imgs = articleContent.getElementsByTagName("img"); + this._forEachNode(imgs, function(img) { + var src = img.getAttribute("src"); + if (src) { + img.setAttribute("src", toAbsoluteURI(src)); + } + }); + }, + + /** + * Get the article title as an H1. + * + * @return void + **/ + _getArticleTitle: function() { + var doc = this._doc; + var curTitle = ""; + var origTitle = ""; + + try { + curTitle = origTitle = doc.title; + + // If they had an element with id "title" in their HTML + if (typeof curTitle !== "string") + curTitle = origTitle = this._getInnerText(doc.getElementsByTagName('title')[0]); + } catch(e) {} + + if (curTitle.match(/ [\|\-] /)) { + curTitle = origTitle.replace(/(.*)[\|\-] .*/gi,'$1'); + + if (curTitle.split(' ').length < 3) + curTitle = origTitle.replace(/[^\|\-]*[\|\-](.*)/gi,'$1'); + } else if (curTitle.indexOf(': ') !== -1) { + // Check if we have an heading containing this exact string, so we + // could assume it's the full title. + var headings = this._concatNodeLists( + doc.getElementsByTagName('h1'), + doc.getElementsByTagName('h2') + ); + var match = this._someNode(headings, function(heading) { + return heading.textContent === curTitle; + }); + + // If we don't, let's extract the title out of the original title string. + if (!match) { + curTitle = origTitle.- substring(origTitle.lastIndexOf(':') + 1); + + // If the title is now too short, try the first colon instead: + if (curTitle.split(' ').length < 3) + curTitle = origTitle.substring(origTitle.indexOf(':') + 1); + } + } else if (curTitle.length > 150 || curTitle.length < 15) { + var hOnes = doc.getElementsByTagName('h1'); + + if (hOnes.length === 1) + curTitle = this._getInnerText(hOnes[0]); + } + + curTitle = curTitle.trim(); + + if (curTitle.split(' ').length <= 4) + curTitle = origTitle; + + return curTitle; + }, + + /** + * Prepare the HTML document for readability to scrape it. + * This includes things like stripping javascript, CSS, and handling terrible markup. + * + * @return void + **/ + _prepDocument: function() { + var doc = this._doc; + + // Remove all style tags in head + this._forEachNode(doc.getElementsByTagName("style"), function(styleNode) { + styleNode.parentNode.removeChild(styleNode); + }); + + if (doc.body) { + this._replaceBrs(doc.body); + } + + this._forEachNode(doc.getElementsByTagName("font"), function(fontNode) { + this._setNodeTag(fontNode, "SPAN"); + }); + }, + + /** + * Finds the next element, starting from the given node, and ignoring + * whitespace in between. If the given node is an element, the same node is + * returned. + */ + _nextElement: function (node) { + var next = node; + while (next + && (next.nodeType != Node.ELEMENT_NODE) + && this.REGEXPS.whitespace.test(next.textContent)) { + next = next.nextSibling; + } + return next; + }, + + /** + * Replaces 2 or more successive
elements with a single

. + * Whitespace between
elements are ignored. For example: + *

foo
bar


abc
+ * will become: + *
foo
bar

abc

+ */ + _replaceBrs: function (elem) { + this._forEachNode(elem.getElementsByTagName("br"), function(br) { + var next = br.nextSibling; + + // Whether 2 or more
elements have been found and replaced with a + //

block. + var replaced = false; + + // If we find a
chain, remove the
s until we hit another element + // or non-whitespace. This leaves behind the first
in the chain + // (which will be replaced with a

later). + while ((next = this._nextElement(next)) && (next.tagName == "BR")) { + replaced = true; + var brSibling = next.nextSibling; + next.parentNode.removeChild(next); + next = brSibling; + } + + // If we removed a
chain, replace the remaining
with a

. Add + // all sibling nodes as children of the

until we hit another
+ // chain. + if (replaced) { + var p = this._doc.createElement("p"); + br.parentNode.replaceChild(p, br); + + next = p.nextSibling; + while (next) { + // If we've hit another

, we're done adding children to this

. + if (next.tagName == "BR") { + var nextElem = this._nextElement(next); + if (nextElem && nextElem.tagName == "BR") + break; + } + + // Otherwise, make this node a child of the new

. + var sibling = next.nextSibling; + p.appendChild(next); + next = sibling; + } + } + }); + }, + + _setNodeTag: function (node, tag) { + this.log("_setNodeTag", node, tag); + if (node.__JSDOMParser__) { + node.localName = tag.toLowerCase(); + node.tagName = tag.toUpperCase(); + return node; + } + + var replacement = node.ownerDocument.createElement(tag); + while (node.firstChild) { + replacement.appendChild(node.firstChild); + } + node.parentNode.replaceChild(replacement, node); + if (node.readability) + replacement.readability = node.readability; + + for (var i = 0; i < node.attributes.length; i++) { + replacement.setAttribute(node.attributes[i].name, node.attributes[i].value); + } + return replacement; + }, + + /** + * Prepare the article node for display. Clean out any inline styles, + * iframes, forms, strip extraneous

tags, etc. + * + * @param Element + * @return void + **/ + _prepArticle: function(articleContent) { + this._cleanStyles(articleContent); + + // Clean out junk from the article content + this._cleanConditionally(articleContent, "form"); + this._clean(articleContent, "object"); + this._clean(articleContent, "embed"); + this._clean(articleContent, "h1"); + this._clean(articleContent, "footer"); + + // If there is only one h2, they are probably using it as a header + // and not a subheader, so remove it since we already have a header. + if (articleContent.getElementsByTagName('h2').length === 1) + this._clean(articleContent, "h2"); + + this._clean(articleContent, "iframe"); + this._cleanHeaders(articleContent); + + // Do these last as the previous stuff may have removed junk + // that will affect these + this._cleanConditionally(articleContent, "table"); + this._cleanConditionally(articleContent, "ul"); + this._cleanConditionally(articleContent, "div"); + + // Remove extra paragraphs + this._forEachNode(articleContent.getElementsByTagName('p'), function(paragraph) { + var imgCount = paragraph.getElementsByTagName('img').length; + var embedCount = paragraph.getElementsByTagName('embed').length; + var objectCount = paragraph.getElementsByTagName('object').length; + // At this point, nasty iframes have been removed, only remain embedded video ones. + var iframeCount = paragraph.getElementsByTagName('iframe').length; + var totalCount = imgCount + embedCount + objectCount + iframeCount; + + if (totalCount === 0 && !this._getInnerText(paragraph, false)) + paragraph.parentNode.removeChild(paragraph); + }); + + this._forEachNode(articleContent.getElementsByTagName("br"), function(br) { + var next = this._nextElement(br.nextSibling); + if (next && next.tagName == "P") + br.parentNode.removeChild(br); + }); + }, + + /** + * Initialize a node with the readability object. Also checks the + * className/id for special names to add to its score. + * + * @param Element + * @return void + **/ + _initializeNode: function(node) { + node.readability = {"contentScore": 0}; + + switch(node.tagName) { + case 'DIV': + node.readability.contentScore += 5; + break; + + case 'PRE': + case 'TD': + case 'BLOCKQUOTE': + node.readability.contentScore += 3; + break; + + case 'ADDRESS': + case 'OL': + case 'UL': + case 'DL': + case 'DD': + case 'DT': + case 'LI': + case 'FORM': + node.readability.contentScore -= 3; + break; + + case 'H1': + case 'H2': + case 'H3': + case 'H4': + case 'H5': + case 'H6': + case 'TH': + node.readability.contentScore -= 5; + break; + } + + node.readability.contentScore += this._getClassWeight(node); + }, + + _removeAndGetNext: function(node) { + var nextNode = this._getNextNode(node, true); + node.parentNode.removeChild(node); + return nextNode; + }, + + /** + * Traverse the DOM from node to node, starting at the node passed in. + * Pass true for the second parameter to indicate this node itself + * (and its kids) are going away, and we want the next node over. + * + * Calling this in a loop will traverse the DOM depth-first. + */ + _getNextNode: function(node, ignoreSelfAndKids) { + // First check for kids if those aren't being ignored + if (!ignoreSelfAndKids && node.firstElementChild) { + return node.firstElementChild; + } + // Then for siblings... + if (node.nextElementSibling) { + return node.nextElementSibling; + } + // And finally, move up the parent chain *and* find a sibling + // (because this is depth-first traversal, we will have already + // seen the parent nodes themselves). + do { + node = node.parentNode; + } while (node && !node.nextElementSibling); + return node && node.nextElementSibling; + }, + + /** + * Like _getNextNode, but for DOM implementations with no + * firstElementChild/nextElementSibling functionality... + */ + _getNextNodeNoElementProperties: function(node, ignoreSelfAndKids) { + function nextSiblingEl(n) { + do { + n = n.nextSibling; + } while (n && n.nodeType !== n.ELEMENT_NODE); + return n; + } + // First check for kids if those aren't being ignored + if (!ignoreSelfAndKids && node.children[0]) { + return node.children[0]; + } + // Then for siblings... + var next = nextSiblingEl(node); + if (next) { + return next; + } + // And finally, move up the parent chain *and* find a sibling + // (because this is depth-first traversal, we will have already + // seen the parent nodes themselves). + do { + node = node.parentNode; + if (node) + next = nextSiblingEl(node); + } while (node && !next); + return node && next; + }, + + _checkByline: function(node, matchString) { + if (this._articleByline) { + return false; + } + + if (node.getAttribute !== undefined) { + var rel = node.getAttribute("rel"); + } + + if ((rel === "author" || this.REGEXPS.byline.test(matchString)) && this._isValidByline(node.textContent)) { + this._articleByline = node.textContent.trim(); + return true; + } + + return false; + }, + + _getNodeAncestors: function(node, maxDepth) { + maxDepth = maxDepth || 0; + var i = 0, ancestors = []; + while (node.parentNode) { + ancestors.push(node.parentNode) + if (maxDepth && ++i === maxDepth) + break; + node = node.parentNode; + } + return ancestors; + }, + + /*** + * grabArticle - Using a variety of metrics (content score, classname, element types), find the content that is + * most likely to be the stuff a user wants to read. Then return it wrapped up in a div. + * + * @param page a document to run upon. Needs to be a full document, complete with body. + * @return Element + **/ + _grabArticle: function (page) { + this.log("**** grabArticle ****"); + var doc = this._doc; + var isPaging = (page !== null ? true: false); + page = page ? page : this._doc.body; + + // We can't grab an article if we don't have a page! + if (!page) { + this.log("No body found in document. Abort."); + return null; + } + + var pageCacheHtml = page.innerHTML; + + // Check if any "dir" is set on the toplevel document element + this._articleDir = doc.documentElement.getAttribute("dir"); + + while (true) { + var stripUnlikelyCandidates = this._flagIsActive(this.FLAG_STRIP_UNLIKELYS); + + // First, node prepping. Trash nodes that look cruddy (like ones with the + // class name "comment", etc), and turn divs into P tags where they have been + // used inappropriately (as in, where they contain no other block level elements.) + var elementsToScore = []; + var node = this._doc.documentElement; + + while (node) { + var matchString = node.className + " " + node.id; + + // Check to see if this node is a byline, and remove it if it is. + if (this._checkByline(node, matchString)) { + node = this._removeAndGetNext(node); + continue; + } + + // Remove unlikely candidates + if (stripUnlikelyCandidates) { + if (this.REGEXPS.unlikelyCandidates.test(matchString) && + !this.REGEXPS.okMaybeItsACandidate.test(matchString) && + node.tagName !== "BODY" && + node.tagName !== "A") { + this.log("Removing unlikely candidate - " + matchString); + node = this._removeAndGetNext(node); + continue; + } + } + + if (this.DEFAULT_TAGS_TO_SCORE.indexOf(node.tagName) !== -1) { + elementsToScore.push(node); + } + + // Turn all divs that don't have children block level elements into p's + if (node.tagName === "DIV") { + // Sites like http://mobile.slate.com encloses each paragraph with a DIV + // element. DIVs with only a P element inside and no text content can be + // safely converted into plain P elements to avoid confusing the scoring + // algorithm with DIVs with are, in practice, paragraphs. + if (this._hasSinglePInsideElement(node)) { + var newNode = node.children[0]; + node.parentNode.replaceChild(newNode, node); + node = newNode; + } else if (!this._hasChildBlockElement(node)) { + node = this._setNodeTag(node, "P"); + elementsToScore.push(node); + } else { + // EXPERIMENTAL + this._forEachNode(node.childNodes, function(childNode) { + if (childNode.nodeType === Node.TEXT_NODE) { + var p = doc.createElement('p'); + p.textContent = childNode.textContent; + p.style.display = 'inline'; + p.className = 'readability-styled'; + node.replaceChild(p, childNode); + } + }); + } + } + node = this._getNextNode(node); + } + + /** + * Loop through all paragraphs, and assign a score to them based on how content-y they look. + * Then add their score to their parent node. + * + * A score is determined by things like number of commas, class names, etc. Maybe eventually link density. + **/ + var candidates = []; + this._forEachNode(elementsToScore, function(elementToScore) { + if (!elementToScore.parentNode || typeof(elementToScore.parentNode.tagName) === 'undefined') + return; + + // If this paragraph is less than 25 characters, don't even count it. + var innerText = this._getInnerText(elementToScore); + if (innerText.length < 25) + return; + + // Exclude nodes with no ancestor. + var ancestors = this._getNodeAncestors(elementToScore, 3); + if (ancestors.length === 0) + return; + + var contentScore = 0; + + // Add a point for the paragraph itself as a base. + contentScore += 1; + + // Add points for any commas within this paragraph. + contentScore += innerText.split(',').length; + + // For every 100 characters in this paragraph, add another point. Up to 3 points. + contentScore += Math.min(Math.floor(innerText.length / 100), 3); + + // Initialize and score ancestors. + this._forEachNode(ancestors, function(ancestor, level) { + if (!ancestor.tagName) + return; + + if (typeof(ancestor.readability) === 'undefined') { + this._initializeNode(ancestor); + candidates.push(ancestor); + } + + // Node score divider: + // - parent: 1 (no division) + // - grandparent: 2 + // - great grandparent+: ancestor level * 3 + if (level === 0) + var scoreDivider = 1; + else if (level === 1) + scoreDivider = 2; + else + scoreDivider = level * 3; + ancestor.readability.contentScore += contentScore / scoreDivider; + }); + }); + + // After we've calculated scores, loop through all of the possible + // candidate nodes we found and find the one with the highest score. + var topCandidates = []; + for (var c = 0, cl = candidates.length; c < cl; c += 1) { + var candidate = candidates[c]; + + // Scale the final candidates score based on link density. Good content + // should have a relatively small link density (5% or less) and be mostly + // unaffected by this operation. + var candidateScore = candidate.readability.contentScore * (1 - this._getLinkDensity(candidate)); + candidate.readability.contentScore = candidateScore; + + this.log('Candidate:', candidate, "with score " + candidateScore); + + for (var t = 0; t < this._nbTopCandidates; t++) { + var aTopCandidate = topCandidates[t]; + + if (!aTopCandidate || candidateScore > aTopCandidate.readability.contentScore) { + topCandidates.splice(t, 0, candidate); + if (topCandidates.length > this._nbTopCandidates) + topCandidates.pop(); + break; + } + } + } + + var topCandidate = topCandidates[0] || null; + var neededToCreateTopCandidate = false; + + // If we still have no top candidate, just use the body as a last resort. + // We also have to copy the body node so it is something we can modify. + if (topCandidate === null || topCandidate.tagName === "BODY") { + // Move all of the page's children into topCandidate + topCandidate = doc.createElement("DIV"); + neededToCreateTopCandidate = true; + // Move everything (not just elements, also text nodes etc.) into the container + // so we even include text directly in the body: + var kids = page.childNodes; + while (kids.length) { + this.log("Moving child out:", kids[0]); + topCandidate.appendChild(kids[0]); + } + + page.appendChild(topCandidate); + + this._initializeNode(topCandidate); + } else if (topCandidate) { + // Because of our bonus system, parents of candidates might have scores + // themselves. They get half of the node. There won't be nodes with higher + // scores than our topCandidate, but if we see the score going *up* in the first + // few steps up the tree, that's a decent sign that there might be more content + // lurking in other places that we want to unify in. The sibling stuff + // below does some of that - but only if we've looked high enough up the DOM + // tree. + var parentOfTopCandidate = topCandidate.parentNode; + var lastScore = topCandidate.readability.contentScore; + // The scores shouldn't get too low. + var scoreThreshold = lastScore / 3; + while (parentOfTopCandidate && parentOfTopCandidate.readability) { + var parentScore = parentOfTopCandidate.readability.contentScore; + if (parentScore < scoreThreshold) + break; + if (parentScore > lastScore) { + // Alright! We found a better parent to use. + topCandidate = parentOfTopCandidate; + break; + } + lastScore = parentOfTopCandidate.readability.contentScore; + parentOfTopCandidate = parentOfTopCandidate.parentNode; + } + } + + // Now that we have the top candidate, look through its siblings for content + // that might also be related. Things like preambles, content split by ads + // that we removed, etc. + var articleContent = doc.createElement("DIV"); + if (isPaging) + articleContent.id = "readability-content"; + + var siblingScoreThreshold = Math.max(10, topCandidate.readability.contentScore * 0.2); + var siblings = topCandidate.parentNode.children; + + for (var s = 0, sl = siblings.length; s < sl; s++) { + var sibling = siblings[s]; + var append = false; + + this.log("Looking at sibling node:", sibling, sibling.readability ? ("with score " + sibling.readability.contentScore) : ''); + this.log("Sibling has score", sibling.readability ? sibling.readability.contentScore : 'Unknown'); + + if (sibling === topCandidate) { + append = true; + } else { + var contentBonus = 0; + + // Give a bonus if sibling nodes and top candidates have the example same classname + if (sibling.className === topCandidate.className && topCandidate.className !== "") + contentBonus += topCandidate.readability.contentScore * 0.2; + + if (sibling.readability && + ((sibling.readability.contentScore + contentBonus) >= siblingScoreThreshold)) { + append = true; + } else if (sibling.nodeName === "P") { + var linkDensity = this._getLinkDensity(sibling); + var nodeContent = this._getInnerText(sibling); + var nodeLength = nodeContent.length; + + if (nodeLength > 80 && linkDensity < 0.25) { + append = true; + } else if (nodeLength < 80 && nodeLength > 0 && linkDensity === 0 && + nodeContent.search(/\.( |$)/) !== -1) { + append = true; + } + } + } + + if (append) { + this.log("Appending node:", sibling); + + if (this.ALTER_TO_DIV_EXCEPTIONS.indexOf(sibling.nodeName) === -1) { + // We have a node that isn't a common block level element, like a form or td tag. + // Turn it into a div so it doesn't get filtered out later by accident. + this.log("Altering sibling:", sibling, 'to div.'); + + sibling = this._setNodeTag(sibling, "DIV"); + } + + articleContent.appendChild(sibling); + // siblings is a reference to the children array, and + // sibling is removed from the array when we call appendChild(). + // As a result, we must revisit this index since the nodes + // have been shifted. + s -= 1; + sl -= 1; + } + } + + if (this._debug) + this.log("Article content pre-prep: " + articleContent.innerHTML); + // So we have all of the content that we need. Now we clean it up for presentation. + this._prepArticle(articleContent); + if (this._debug) + this.log("Article content post-prep: " + articleContent.innerHTML); + + if (this._curPageNum === 1) { + if (neededToCreateTopCandidate) { + // We already created a fake div thing, and there wouldn't have been any siblings left + // for the previous loop, so there's no point trying to create a new div, and then + // move all the children over. Just assign IDs and class names here. No need to append + // because that already happened anyway. + topCandidate.id = "readability-page-1"; + topCandidate.className = "page"; + } else { + var div = doc.createElement("DIV"); + div.id = "readability-page-1"; + div.className = "page"; + var children = articleContent.childNodes; + while (children.length) { + div.appendChild(children[0]); + } + articleContent.appendChild(div); + } + } + + if (this._debug) + this.log("Article content after paging: " + articleContent.innerHTML); + + // Now that we've gone through the full algorithm, check to see if + // we got any meaningful content. If we didn't, we may need to re-run + // grabArticle with different flags set. This gives us a higher likelihood of + // finding the content, and the sieve approach gives us a higher likelihood of + // finding the -right- content. + if (this._getInnerText(articleContent, true).length < 500) { + page.innerHTML = pageCacheHtml; + + if (this._flagIsActive(this.FLAG_STRIP_UNLIKELYS)) { + this._removeFlag(this.FLAG_STRIP_UNLIKELYS); + } else if (this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) { + this._removeFlag(this.FLAG_WEIGHT_CLASSES); + } else if (this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) { + this._removeFlag(this.FLAG_CLEAN_CONDITIONALLY); + } else { + return null; + } + } else { + return articleContent; + } + } + }, + + /** + * Check whether the input string could be a byline. + * This verifies that the input is a string, and that the length + * is less than 100 chars. + * + * @param possibleByline {string} - a string to check whether its a byline. + * @return Boolean - whether the input string is a byline. + */ + _isValidByline: function(byline) { + if (typeof byline == 'string' || byline instanceof String) { + byline = byline.trim(); + return (byline.length > 0) && (byline.length < 100); + } + return false; + }, + + /** + * Attempts to get excerpt and byline metadata for the article. + * + * @return Object with optional "excerpt" and "byline" properties + */ + _getArticleMetadata: function() { + var metadata = {}; + var values = {}; + var metaElements = this._doc.getElementsByTagName("meta"); + + // Match "description", or Twitter's "twitter:description" (Cards) + // in name attribute. + var namePattern = /^\s*((twitter)\s*:\s*)?(description|title)\s*$/gi; + + // Match Facebook's Open Graph title & description properties. + var propertyPattern = /^\s*og\s*:\s*(description|title)\s*$/gi; + + // Find description tags. + this._forEachNode(metaElements, function(element) { + var elementName = element.getAttribute("name"); + var elementProperty = element.getAttribute("property"); + + if ([elementName, elementProperty].indexOf("author") !== -1) { + metadata.byline = element.getAttribute("content"); + return; + } + + var name = null; + if (namePattern.test(elementName)) { + name = elementName; + } else if (propertyPattern.test(elementProperty)) { + name = elementProperty; + } + + if (name) { + var content = element.getAttribute("content"); + if (content) { + // Convert to lowercase and remove any whitespace + // so we can match below. + name = name.toLowerCase().replace(/\s/g, ''); + values[name] = content.trim(); + } + } + }); + + if ("description" in values) { + metadata.excerpt = values["description"]; + } else if ("og:description" in values) { + // Use facebook open graph description. + metadata.excerpt = values["og:description"]; + } else if ("twitter:description" in values) { + // Use twitter cards description. + metadata.excerpt = values["twitter:description"]; + } + + if ("og:title" in values) { + // Use facebook open graph title. + metadata.title = values["og:title"]; + } else if ("twitter:title" in values) { + // Use twitter cards title. + metadata.title = values["twitter:title"]; + } + + return metadata; + }, + + /** + * Removes script tags from the document. + * + * @param Element + **/ + _removeScripts: function(doc) { + this._forEachNode(doc.getElementsByTagName('script'), function(scriptNode) { + scriptNode.nodeValue = ""; + scriptNode.removeAttribute('src'); + + if (scriptNode.parentNode) + scriptNode.parentNode.removeChild(scriptNode); + }); + this._forEachNode(doc.getElementsByTagName('noscript'), function(noscriptNode) { + if (noscriptNode.parentNode) + noscriptNode.parentNode.removeChild(noscriptNode); + }); + }, + + /** + * Check if this node has only whitespace and a single P element + * Returns false if the DIV node contains non-empty text nodes + * or if it contains no P or more than 1 element. + * + * @param Element + **/ + _hasSinglePInsideElement: function(element) { + // There should be exactly 1 element child which is a P: + if (element.children.length != 1 || element.children[0].tagName !== "P") { + return false; + } + + // And there should be no text nodes with real content + return !this._someNode(element.childNodes, function(node) { + return node.nodeType === Node.TEXT_NODE && + this.REGEXPS.hasContent.test(node.textContent); + }); + }, + + /** + * Determine whether element has any children block level elements. + * + * @param Element + */ + _hasChildBlockElement: function (element) { + return this._someNode(element.childNodes, function(node) { + return this.DIV_TO_P_ELEMS.indexOf(node.tagName) !== -1 || + this._hasChildBlockElement(node); + }); + }, + + /** + * Get the inner text of a node - cross browser compatibly. + * This also strips out any excess whitespace to be found. + * + * @param Element + * @param Boolean normalizeSpaces (default: true) + * @return string + **/ + _getInnerText: function(e, normalizeSpaces) { + normalizeSpaces = (typeof normalizeSpaces === 'undefined') ? true : normalizeSpaces; + var textContent = e.textContent.trim(); + + if (normalizeSpaces) { + return textContent.replace(this.REGEXPS.normalize, " "); + } else { + return textContent; + } + }, + + /** + * Get the number of times a string s appears in the node e. + * + * @param Element + * @param string - what to split on. Default is "," + * @return number (integer) + **/ + _getCharCount: function(e,s) { + s = s || ","; + return this._getInnerText(e).split(s).length - 1; + }, + + /** + * Remove the style attribute on every e and under. + * TODO: Test if getElementsByTagName(*) is faster. + * + * @param Element + * @return void + **/ + _cleanStyles: function(e) { + e = e || this._doc; + if (!e) + return; + var cur = e.firstChild; + + // Remove any root styles, if we're able. + if (typeof e.removeAttribute === 'function' && e.className !== 'readability-styled') + e.removeAttribute('style'); + + // Go until there are no more child nodes + while (cur !== null) { + if (cur.nodeType === cur.ELEMENT_NODE) { + // Remove style attribute(s) : + if (cur.className !== "readability-styled") + cur.removeAttribute("style"); + + this._cleanStyles(cur); + } + + cur = cur.nextSibling; + } + }, + + /** + * Get the density of links as a percentage of the content + * This is the amount of text that is inside a link divided by the total text in the node. + * + * @param Element + * @return number (float) + **/ + _getLinkDensity: function(element) { + var textLength = this._getInnerText(element).length; + if (textLength === 0) + return 0; + + var linkLength = 0; + + // XXX implement _reduceNodeList? + this._forEachNode(element.getElementsByTagName("a"), function(linkNode) { + linkLength += this._getInnerText(linkNode).length; + }); + + return linkLength / textLength; + }, + + /** + * Find a cleaned up version of the current URL, to use for comparing links for possible next-pageyness. + * + * @author Dan Lacy + * @return string the base url + **/ + _findBaseUrl: function() { + var uri = this._uri; + var noUrlParams = uri.path.split("?")[0]; + var urlSlashes = noUrlParams.split("/").reverse(); + var cleanedSegments = []; + var possibleType = ""; + + for (var i = 0, slashLen = urlSlashes.length; i < slashLen; i += 1) { + var segment = urlSlashes[i]; + + // Split off and save anything that looks like a file type. + if (segment.indexOf(".") !== -1) { + possibleType = segment.split(".")[1]; + + // If the type isn't alpha-only, it's probably not actually a file extension. + if (!possibleType.match(/[^a-zA-Z]/)) + segment = segment.split(".")[0]; + } + + // EW-CMS specific segment replacement. Ugly. + // Example: http://www.ew.com/ew/article/0,,20313460_20369436,00.html + if (segment.indexOf(',00') !== -1) + segment = segment.replace(',00', ''); + + // If our first or second segment has anything looking like a page number, remove it. + if (segment.match(/((_|-)?p[a-z]*|(_|-))[0-9]{1,2}$/i) && ((i === 1) || (i === 0))) + segment = segment.replace(/((_|-)?p[a-z]*|(_|-))[0-9]{1,2}$/i, ""); + + var del = false; + + // If this is purely a number, and it's the first or second segment, + // it's probably a page number. Remove it. + if (i < 2 && segment.match(/^\d{1,2}$/)) + del = true; + + // If this is the first segment and it's just "index", remove it. + if (i === 0 && segment.toLowerCase() === "index") + del = true; + + // If our first or second segment is smaller than 3 characters, + // and the first segment was purely alphas, remove it. + if (i < 2 && segment.length < 3 && !urlSlashes[0].match(/[a-z]/i)) + del = true; + + // If it's not marked for deletion, push it to cleanedSegments. + if (!del) + cleanedSegments.push(segment); + } + + // This is our final, cleaned, base article URL. + return uri.scheme + "://" + uri.host + cleanedSegments.reverse().join("/"); + }, + + /** + * Look for any paging links that may occur within the document. + * + * @param body + * @return object (array) + **/ + _findNextPageLink: function(elem) { + var uri = this._uri; + var possiblePages = {}; + var allLinks = elem.getElementsByTagName('a'); + var articleBaseUrl = this._findBaseUrl(); + + // Loop through all links, looking for hints that they may be next-page links. + // Things like having "page" in their textContent, className or id, or being a child + // of a node with a page-y className or id. + // + // Also possible: levenshtein distance? longest common subsequence? + // + // After we do that, assign each page a score, and + for (var i = 0, il = allLinks.length; i < il; i += 1) { + var link = allLinks[i]; + var linkHref = allLinks[i].href.replace(/#.*$/, '').replace(/\/$/, ''); + + // If we've already seen this page, ignore it. + if (linkHref === "" || + linkHref === articleBaseUrl || + linkHref === uri.spec || + linkHref in this._parsedPages) { + continue; + } + + // If it's on a different domain, skip it. + if (uri.host !== linkHref.split(/\/+/g)[1]) + continue; + + var linkText = this._getInnerText(link); + + // If the linkText looks like it's not the next page, skip it. + if (linkText.match(this.REGEXPS.extraneous) || linkText.length > 25) + continue; + + // If the leftovers of the URL after removing the base URL don't contain + // any digits, it's certainly not a next page link. + var linkHrefLeftover = linkHref.replace(articleBaseUrl, ''); + if (!linkHrefLeftover.match(/\d/)) + continue; + + if (!(linkHref in possiblePages)) { + possiblePages[linkHref] = {"score": 0, "linkText": linkText, "href": linkHref}; + } else { + possiblePages[linkHref].linkText += ' | ' + linkText; + } + + var linkObj = possiblePages[linkHref]; + + // If the articleBaseUrl isn't part of this URL, penalize this link. It could + // still be the link, but the odds are lower. + // Example: http://www.actionscript.org/resources/articles/745/1/JavaScript-and-VBScript-Injection-in-ActionScript-3/Page1.html + if (linkHref.indexOf(articleBaseUrl) !== 0) + linkObj.score -= 25; + + var linkData = linkText + ' ' + link.className + ' ' + link.id; + if (linkData.match(this.REGEXPS.nextLink)) + linkObj.score += 50; + + if (linkData.match(/pag(e|ing|inat)/i)) + linkObj.score += 25; + + if (linkData.match(/(first|last)/i)) { + // -65 is enough to negate any bonuses gotten from a > or » in the text, + // If we already matched on "next", last is probably fine. + // If we didn't, then it's bad. Penalize. + if (!linkObj.linkText.match(this.REGEXPS.nextLink)) + linkObj.score -= 65; + } + + if (linkData.match(this.REGEXPS.negative) || linkData.match(this.REGEXPS.extraneous)) + linkObj.score -= 50; + + if (linkData.match(this.REGEXPS.prevLink)) + linkObj.score -= 200; + + // If a parentNode contains page or paging or paginat + var parentNode = link.parentNode; + var positiveNodeMatch = false; + var negativeNodeMatch = false; + + while (parentNode) { + var parentNodeClassAndId = parentNode.className + ' ' + parentNode.id; + + if (!positiveNodeMatch && parentNodeClassAndId && parentNodeClassAndId.match(/pag(e|ing|inat)/i)) { + positiveNodeMatch = true; + linkObj.score += 25; + } + + if (!negativeNodeMatch && parentNodeClassAndId && parentNodeClassAndId.match(this.REGEXPS.negative)) { + // If this is just something like "footer", give it a negative. + // If it's something like "body-and-footer", leave it be. + if (!parentNodeClassAndId.match(this.REGEXPS.positive)) { + linkObj.score -= 25; + negativeNodeMatch = true; + } + } + + parentNode = parentNode.parentNode; + } + + // If the URL looks like it has paging in it, add to the score. + // Things like /page/2/, /pagenum/2, ?p=3, ?page=11, ?pagination=34 + if (linkHref.match(/p(a|g|ag)?(e|ing|ination)?(=|\/)[0-9]{1,2}/i) || linkHref.match(/(page|paging)/i)) + linkObj.score += 25; + + // If the URL contains negative values, give a slight decrease. + if (linkHref.match(this.REGEXPS.extraneous)) + linkObj.score -= 15; + + /** + * Minor punishment to anything that doesn't match our current URL. + * NOTE: I'm finding this to cause more harm than good where something is exactly 50 points. + * Dan, can you show me a counterexample where this is necessary? + * if (linkHref.indexOf(window.location.href) !== 0) { + * linkObj.score -= 1; + * } + **/ + + // If the link text can be parsed as a number, give it a minor bonus, with a slight + // bias towards lower numbered pages. This is so that pages that might not have 'next' + // in their text can still get scored, and sorted properly by score. + var linkTextAsNumber = parseInt(linkText, 10); + if (linkTextAsNumber) { + // Punish 1 since we're either already there, or it's probably + // before what we want anyways. + if (linkTextAsNumber === 1) { + linkObj.score -= 10; + } else { + linkObj.score += Math.max(0, 10 - linkTextAsNumber); + } + } + } + + // Loop thrugh all of our possible pages from above and find our top + // candidate for the next page URL. Require at least a score of 50, which + // is a relatively high confidence that this page is the next link. + var topPage = null; + for (var page in possiblePages) { + if (possiblePages.hasOwnProperty(page)) { + if (possiblePages[page].score >= 50 && + (!topPage || topPage.score < possiblePages[page].score)) + topPage = possiblePages[page]; + } + } + + if (topPage) { + var nextHref = topPage.href.replace(/\/$/,''); + + this.log('NEXT PAGE IS ' + nextHref); + this._parsedPages[nextHref] = true; + return nextHref; + } else { + return null; + } + }, + + _successfulRequest: function(request) { + return (request.status >= 200 && request.status < 300) || + request.status === 304 || + (request.status === 0 && request.responseText); + }, + + _ajax: function(url, options) { + var request = new XMLHttpRequest(); + + function respondToReadyState(readyState) { + if (request.readyState === 4) { + if (this._successfulRequest(request)) { + if (options.success) + options.success(request); + } else { + if (options.error) + options.error(request); + } + } + } + + if (typeof options === 'undefined') + options = {}; + + request.onreadystatechange = respondToReadyState; + + request.open('get', url, true); + request.setRequestHeader('Accept', 'text/html'); + + try { + request.send(options.postBody); + } catch (e) { + if (options.error) + options.error(); + } + + return request; + }, + + _appendNextPage: function(nextPageLink) { + var doc = this._doc; + this._curPageNum += 1; + + var articlePage = doc.createElement("DIV"); + articlePage.id = 'readability-page-' + this._curPageNum; + articlePage.className = 'page'; + articlePage.innerHTML = '

§

'; + + doc.getElementById("readability-content").appendChild(articlePage); + + if (this._curPageNum > this._maxPages) { + var nextPageMarkup = "
View Next Page
"; + articlePage.innerHTML = articlePage.innerHTML + nextPageMarkup; + return; + } + + // Now that we've built the article page DOM element, get the page content + // asynchronously and load the cleaned content into the div we created for it. + (function(pageUrl, thisPage) { + this._ajax(pageUrl, { + success: function(r) { + + // First, check to see if we have a matching ETag in headers - if we do, this is a duplicate page. + var eTag = r.getResponseHeader('ETag'); + if (eTag) { + if (eTag in this._pageETags) { + this.log("Exact duplicate page found via ETag. Aborting."); + articlePage.style.display = 'none'; + return; + } else { + this._pageETags[eTag] = 1; + } + } + + // TODO: this ends up doubling up page numbers on NYTimes articles. Need to generically parse those away. + var page = doc.createElement("DIV"); + + // Do some preprocessing to our HTML to make it ready for appending. + // - Remove any script tags. Swap and reswap newlines with a unicode + // character because multiline regex doesn't work in javascript. + // - Turn any noscript tags into divs so that we can parse them. This + // allows us to find any next page links hidden via javascript. + // - Turn all double br's into p's - was handled by prepDocument in the original view. + // Maybe in the future abstract out prepDocument to work for both the original document + // and AJAX-added pages. + var responseHtml = r.responseText.replace(/\n/g,'\uffff').replace(/.*?<\/script>/gi, ''); + responseHtml = responseHtml.replace(/\n/g,'\uffff').replace(/.*?<\/script>/gi, ''); + responseHtml = responseHtml.replace(/\uffff/g,'\n').replace(/<(\/?)noscript/gi, '<$1div'); + responseHtml = responseHtml.replace(this.REGEXPS.replaceFonts, '<$1span>'); + + page.innerHTML = responseHtml; + this._replaceBrs(page); + + // Reset all flags for the next page, as they will search through it and + // disable as necessary at the end of grabArticle. + this._flags = 0x1 | 0x2 | 0x4; + + var nextPageLink = this._findNextPageLink(page); + + // NOTE: if we end up supporting _appendNextPage(), we'll need to + // change this call to be async + var content = this._grabArticle(page); + + if (!content) { + this.log("No content found in page to append. Aborting."); + return; + } + + // Anti-duplicate mechanism. Essentially, get the first paragraph of our new page. + // Compare it against all of the the previous document's we've gotten. If the previous + // document contains exactly the innerHTML of this first paragraph, it's probably a duplicate. + var firstP = content.getElementsByTagName("P").length ? content.getElementsByTagName("P")[0] : null; + if (firstP && firstP.innerHTML.length > 100) { + for (var i = 1; i <= this._curPageNum; i += 1) { + var rPage = doc.getElementById('readability-page-' + i); + if (rPage && rPage.innerHTML.indexOf(firstP.innerHTML) !== -1) { + this.log('Duplicate of page ' + i + ' - skipping.'); + articlePage.style.display = 'none'; + this._parsedPages[pageUrl] = true; + return; + } + } + } + + this._removeScripts(content); + + thisPage.innerHTML = thisPage.innerHTML + content.innerHTML; + + // After the page has rendered, post process the content. This delay is necessary because, + // in webkit at least, offsetWidth is not set in time to determine image width. We have to + // wait a little bit for reflow to finish before we can fix floating images. + setTimeout((function() { + this._postProcessContent(thisPage); + }).bind(this), 500); + + + if (nextPageLink) + this._appendNextPage(nextPageLink); + } + }); + }).bind(this)(nextPageLink, articlePage); + }, + + /** + * Get an elements class/id weight. Uses regular expressions to tell if this + * element looks good or bad. + * + * @param Element + * @return number (Integer) + **/ + _getClassWeight: function(e) { + if (!this._flagIsActive(this.FLAG_WEIGHT_CLASSES)) + return 0; + + var weight = 0; + + // Look for a special classname + if (typeof(e.className) === 'string' && e.className !== '') { + if (this.REGEXPS.negative.test(e.className)) + weight -= 25; + + if (this.REGEXPS.positive.test(e.className)) + weight += 25; + } + + // Look for a special ID + if (typeof(e.id) === 'string' && e.id !== '') { + if (this.REGEXPS.negative.test(e.id)) + weight -= 25; + + if (this.REGEXPS.positive.test(e.id)) + weight += 25; + } + + return weight; + }, + + /** + * Clean a node of all elements of type "tag". + * (Unless it's a youtube/vimeo video. People love movies.) + * + * @param Element + * @param string tag to clean + * @return void + **/ + _clean: function(e, tag) { + var isEmbed = ["object", "embed", "iframe"].indexOf(tag) !== -1; + + this._forEachNode(e.getElementsByTagName(tag), function(element) { + // Allow youtube and vimeo videos through as people usually want to see those. + if (isEmbed) { + var attributeValues = [].map.call(element.attributes, function(attr) { + return attr.value; + }).join("|"); + + // First, check the elements attributes to see if any of them contain youtube or vimeo + if (this.REGEXPS.videos.test(attributeValues)) + return; + + // Then check the elements inside this element for the same. + if (this.REGEXPS.videos.test(element.innerHTML)) + return; + } + + element.parentNode.removeChild(element); + }); + }, + + /** + * Check if a given node has one of its ancestor tag name matching the + * provided one. + * @param HTMLElement node + * @param String tagName + * @param Number maxDepth + * @return Boolean + */ + _hasAncestorTag: function(node, tagName, maxDepth) { + maxDepth = maxDepth || 3; + tagName = tagName.toUpperCase(); + var depth = 0; + while (node.parentNode) { + if (depth > maxDepth) + return false; + if (node.parentNode.tagName === tagName) + return true; + node = node.parentNode; + depth++; + } + return false; + }, + + /** + * Clean an element of all tags of type "tag" if they look fishy. + * "Fishy" is an algorithm based on content length, classnames, link density, number of images & embeds, etc. + * + * @return void + **/ + _cleanConditionally: function(e, tag) { + if (!this._flagIsActive(this.FLAG_CLEAN_CONDITIONALLY)) + return; + + var tagsList = e.getElementsByTagName(tag); + var curTagsLength = tagsList.length; + var isList = tag === "ul" || tag === "ol"; + + // Gather counts for other typical elements embedded within. + // Traverse backwards so we can remove nodes at the same time + // without effecting the traversal. + // + // TODO: Consider taking into account original contentScore here. + for (var i = curTagsLength-1; i >= 0; i -= 1) { + var weight = this._getClassWeight(tagsList[i]); + var contentScore = 0; + + this.log("Cleaning Conditionally", tagsList[i]); + + if (weight + contentScore < 0) { + tagsList[i].parentNode.removeChild(tagsList[i]); + } else if (this._getCharCount(tagsList[i],',') < 10) { + // If there are not very many commas, and the number of + // non-paragraph elements is more than paragraphs or other + // ominous signs, remove the element. + var p = tagsList[i].getElementsByTagName("p").length; + var img = tagsList[i].getElementsByTagName("img").length; + var li = tagsList[i].getElementsByTagName("li").length-100; + var input = tagsList[i].getElementsByTagName("input").length; + + var embedCount = 0; + var embeds = tagsList[i].getElementsByTagName("embed"); + for (var ei = 0, il = embeds.length; ei < il; ei += 1) { + if (!this.REGEXPS.videos.test(embeds[ei].src)) + embedCount += 1; + } + + var linkDensity = this._getLinkDensity(tagsList[i]); + var contentLength = this._getInnerText(tagsList[i]).length; + var toRemove = false; + if (img > p && !this._hasAncestorTag(tagsList[i], "figure")) { + toRemove = true; + } else if (!isList && li > p) { + toRemove = true; + } else if (input > Math.floor(p/3)) { + toRemove = true; + } else if (!isList && contentLength < 25 && (img === 0 || img > 2)) { + toRemove = true; + } else if (!isList && weight < 25 && linkDensity > 0.2) { + toRemove = true; + } else if (weight >= 25 && linkDensity > 0.5) { + toRemove = true; + } else if ((embedCount === 1 && contentLength < 75) || embedCount > 1) { + toRemove = true; + } + + if (toRemove) { + tagsList[i].parentNode.removeChild(tagsList[i]); + } + } + } + }, + + /** + * Clean out spurious headers from an Element. Checks things like classnames and link density. + * + * @param Element + * @return void + **/ + _cleanHeaders: function(e) { + for (var headerIndex = 1; headerIndex < 3; headerIndex += 1) { + var headers = e.getElementsByTagName('h' + headerIndex); + for (var i = headers.length - 1; i >= 0; i -= 1) { + if (this._getClassWeight(headers[i]) < 0) + headers[i].parentNode.removeChild(headers[i]); + } + } + }, + + _flagIsActive: function(flag) { + return (this._flags & flag) > 0; + }, + + _addFlag: function(flag) { + this._flags = this._flags | flag; + }, + + _removeFlag: function(flag) { + this._flags = this._flags & ~flag; + }, + + /** + * Decides whether or not the document is reader-able without parsing the whole thing. + * + * @return boolean Whether or not we suspect parse() will suceeed at returning an article object. + */ + isProbablyReaderable: function(helperIsVisible) { + var nodes = this._getAllNodesWithTag(this._doc, ["p", "pre"]); + + // FIXME we should have a fallback for helperIsVisible, but this is + // problematic because of jsdom's elem.style handling - see + // https://github.com/mozilla/readability/pull/186 for context. + + var score = 0; + // This is a little cheeky, we use the accumulator 'score' to decide what to return from + // this callback: + return this._someNode(nodes, function(node) { + if (helperIsVisible && !helperIsVisible(node)) + return false; + var matchString = node.className + " " + node.id; + + if (this.REGEXPS.unlikelyCandidates.test(matchString) && + !this.REGEXPS.okMaybeItsACandidate.test(matchString)) { + return false; + } + + if (node.matches && node.matches("li p")) { + return false; + } + + var textContentLength = node.textContent.trim().length; + if (textContentLength < 140) { + return false; + } + + score += Math.sqrt(textContentLength - 140); + + if (score > 20) { + return true; + } + return false; + }); + }, + + /** + * Runs readability. + * + * Workflow: + * 1. Prep the document by removing script tags, css, etc. + * 2. Build readability's DOM tree. + * 3. Grab the article content from the current dom tree. + * 4. Replace the current DOM tree with the new one. + * 5. Read peacefully. + * + * @return void + **/ + parse: function () { + // Avoid parsing too large documents, as per configuration option + if (this._maxElemsToParse > 0) { + var numTags = this._doc.getElementsByTagName("*").length; + if (numTags > this._maxElemsToParse) { + throw new Error("Aborting parsing document; " + numTags + " elements found"); + } + } + + if (typeof this._doc.documentElement.firstElementChild === "undefined") { + this._getNextNode = this._getNextNodeNoElementProperties; + } + // Remove script tags from the document. + this._removeScripts(this._doc); + + // FIXME: Disabled multi-page article support for now as it + // needs more work on infrastructure. + + // Make sure this document is added to the list of parsed pages first, + // so we don't double up on the first page. + // this._parsedPages[uri.spec.replace(/\/$/, '')] = true; + + // Pull out any possible next page link first. + // var nextPageLink = this._findNextPageLink(doc.body); + + this._prepDocument(); + + var metadata = this._getArticleMetadata(); + var articleTitle = metadata.title || this._getArticleTitle(); + + var articleContent = this._grabArticle(); + if (!articleContent) + return null; + + this.log("Grabbed: " + articleContent.innerHTML); + + this._postProcessContent(articleContent); + + // if (nextPageLink) { + // // Append any additional pages after a small timeout so that people + // // can start reading without having to wait for this to finish processing. + // setTimeout((function() { + // this._appendNextPage(nextPageLink); + // }).bind(this), 500); + // } + + // If we haven't found an excerpt in the article's metadata, use the article's + // first paragraph as the excerpt. This is used for displaying a preview of + // the article's content. + if (!metadata.excerpt) { + var paragraphs = articleContent.getElementsByTagName("p"); + if (paragraphs.length > 0) { + metadata.excerpt = paragraphs[0].textContent.trim(); + } + } + + return { uri: this._uri, + title: articleTitle, + byline: metadata.byline || this._articleByline, + dir: this._articleDir, + content: articleContent.innerHTML, + length: articleContent.textContent.length, + excerpt: metadata.excerpt }; + } +}; diff --git a/lib/test.rb b/lib/test.rb new file mode 100644 index 0000000..813de6b --- /dev/null +++ b/lib/test.rb @@ -0,0 +1,522 @@ +# encoding: utf-8 + +require 'rubygems' +require 'nokogiri' +require 'guess_html_encoding' + +module Readability + class Document + DEFAULT_OPTIONS = { + :retry_length => 250, + :min_text_length => 25, + :remove_unlikely_candidates => true, + :weight_classes => true, + :clean_conditionally => true, + :remove_empty_nodes => true, + :min_image_width => 130, + :min_image_height => 80, + :ignore_image_format => [], + :blacklist => nil, + :whitelist => nil + }.freeze + + REGEXES = { + :unlikelyCandidatesRe => /combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup/i, + :okMaybeItsACandidateRe => /and|article|body|column|main|shadow/i, + :positiveRe => /article|body|content|entry|hentry|main|page|pagination|post|text|blog|story/i, + :negativeRe => /combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget/i, + :divToPElementsRe => /<(a|blockquote|dl|div|img|ol|p|pre|table|ul)/i, + :replaceBrsRe => /(]*>[ \n\r\t]*){2,}/i, + :replaceFontsRe => /<(\/?)font[^>]*>/i, + :trimRe => /^\s+|\s+$/, + :normalizeRe => /\s{2,}/, + :killBreaksRe => /((\s| ?)*){1,}/, + :videoRe => /http:\/\/(www\.)?(youtube|vimeo)\.com/i + } + + attr_accessor :options, :html, :best_candidate, :candidates, :best_candidate_has_image + + def initialize(input, options = {}) + @options = DEFAULT_OPTIONS.merge(options) + @input = input + + if RUBY_VERSION =~ /^(1\.9|2)/ && !@options[:encoding] + @input = GuessHtmlEncoding.encode(@input, @options[:html_headers]) unless @options[:do_not_guess_encoding] + @options[:encoding] = @input.encoding.to_s + end + + @input = @input.gsub(REGEXES[:replaceBrsRe], '

').gsub(REGEXES[:replaceFontsRe], '<\1span>') + @remove_unlikely_candidates = @options[:remove_unlikely_candidates] + @weight_classes = @options[:weight_classes] + @clean_conditionally = @options[:clean_conditionally] + @best_candidate_has_image = true + make_html + handle_exclusions!(@options[:whitelist], @options[:blacklist]) + end + + def images(content=nil, reload=false) + begin + require 'fastimage' + rescue LoadError + raise "Please install fastimage in order to use the #images feature." + end + + @best_candidate_has_image = false if reload + + prepare_candidates + list_images = [] + tested_images = [] + content = @best_candidate[:elem] unless reload + + return list_images if content.nil? + elements = content.css("img").map(&:attributes) + + elements.each do |element| + next unless element["src"] + + url = element["src"].value + height = element["height"].nil? ? 0 : element["height"].value.to_i + width = element["width"].nil? ? 0 : element["width"].value.to_i + + if url =~ /\Ahttps?:\/\//i && (height.zero? || width.zero?) + image = get_image_size(url) + next unless image + else + image = {:width => width, :height => height} + end + + image[:format] = File.extname(url).gsub(".", "") + + if tested_images.include?(url) + debug("Image was tested: #{url}") + next + end + + tested_images.push(url) + if image_meets_criteria?(image) + list_images << url + else + debug("Image discarded: #{url} - height: #{image[:height]} - width: #{image[:width]} - format: #{image[:format]}") + end + end + + (list_images.empty? and content != @html) ? images(@html, true) : list_images + end + + def images_with_fqdn_uris!(source_uri) + images_with_fqdn_uris(@html, source_uri) + end + + def images_with_fqdn_uris(document = @html.dup, source_uri) + uri = URI.parse(source_uri) + host = uri.host + scheme = uri.scheme + port = uri.port # defaults to 80 + + base = "#{scheme}://#{host}:#{port}/" + + images = [] + document.css("img").each do |elem| + begin + elem['src'] = URI.join(base,elem['src']).to_s if URI.parse(elem['src']).host == nil + images << elem['src'].to_s + rescue URI::InvalidURIError => exc + elem.remove + end + end + + images(document,true) + end + + def get_image_size(url) + w, h = FastImage.size(url) + raise "Couldn't get size." if w.nil? || h.nil? + {:width => w, :height => h} + rescue => e + debug("Image error: #{e}") + nil + end + + def image_meets_criteria?(image) + return false if options[:ignore_image_format].include?(image[:format].downcase) + image[:width] >= (options[:min_image_width] || 0) && image[:height] >= (options[:min_image_height] || 0) + end + + def title + title = @html.css("title").first + title ? title.text : nil + end + + # Look through the @html document looking for the author + # Precedence Information here on the wiki: (TODO attach wiki URL if it is accepted) + # Returns nil if no author is detected + def author + # Let's grab this author: + # + author_elements = @html.xpath('//meta[@name = "dc.creator"]') + unless author_elements.empty? + author_elements.each do |element| + return element['content'].strip if element['content'] + end + end + + # Now let's try to grab this + # + #

By
+ author_elements = @html.xpath('//*[contains(@class, "vcard")]//*[contains(@class, "fn")]') + unless author_elements.empty? + author_elements.each do |element| + return element.text.strip if element.text + end + end + + # Now let's try to grab this + # + # TODO: strip out the (rel)? + author_elements = @html.xpath('//a[@rel = "author"]') + unless author_elements.empty? + author_elements.each do |element| + return element.text.strip if element.text + end + end + + author_elements = @html.xpath('//*[@id = "author"]') + unless author_elements.empty? + author_elements.each do |element| + return element.text.strip if element.text + end + end + end + + def content(remove_unlikely_candidates = :default) + @remove_unlikely_candidates = false if remove_unlikely_candidates == false + + prepare_candidates + article = get_article(@candidates, @best_candidate) + + cleaned_article = sanitize(article, @candidates, options) + if article.text.strip.length < options[:retry_length] + if @remove_unlikely_candidates + @remove_unlikely_candidates = false + elsif @weight_classes + @weight_classes = false + elsif @clean_conditionally + @clean_conditionally = false + else + # nothing we can do + return cleaned_article + end + + make_html + content + else + cleaned_article + end + end + + def get_article(candidates, best_candidate) + # Now that we have the top candidate, look through its siblings for content that might also be related. + # Things like preambles, content split by ads that we removed, etc. + + sibling_score_threshold = [10, best_candidate[:content_score] * 0.2].max + output = Nokogiri::XML::Node.new('div', @html) + best_candidate[:elem].parent.children.each do |sibling| + append = false + append = true if sibling == best_candidate[:elem] + append = true if candidates[sibling] && candidates[sibling][:content_score] >= sibling_score_threshold + + if sibling.name.downcase == "p" + link_density = get_link_density(sibling) + node_content = sibling.text + node_length = node_content.length + + append = if node_length > 80 && link_density < 0.25 + true + elsif node_length < 80 && link_density == 0 && node_content =~ /\.( |$)/ + true + end + end + + if append + sibling_dup = sibling.dup # otherwise the state of the document in processing will change, thus creating side effects + sibling_dup.name = "div" unless %w[div p].include?(sibling.name.downcase) + output << sibling_dup + end + end + + output + end + + def select_best_candidate(candidates) + sorted_candidates = candidates.values.sort { |a, b| b[:content_score] <=> a[:content_score] } + + debug("Top 5 candidates:") + sorted_candidates[0...5].each do |candidate| + debug("Candidate #{candidate[:elem].name}##{candidate[:elem][:id]}.#{candidate[:elem][:class]} with score #{candidate[:content_score]}") + end + + best_candidate = sorted_candidates.first || { :elem => @html.css("body").first, :content_score => 0 } + debug("Best candidate #{best_candidate[:elem].name}##{best_candidate[:elem][:id]}.#{best_candidate[:elem][:class]} with score #{best_candidate[:content_score]}") + + best_candidate + end + + def get_link_density(elem) + link_length = elem.css("a").map(&:text).join("").length + text_length = elem.text.length + link_length / text_length.to_f + end + + def class_weight(e) + weight = 0 + return weight unless @weight_classes + + if e[:class] && e[:class] != "" + weight -= 25 if e[:class] =~ REGEXES[:negativeRe] + weight += 25 if e[:class] =~ REGEXES[:positiveRe] + end + + if e[:id] && e[:id] != "" + weight -= 25 if e[:id] =~ REGEXES[:negativeRe] + weight += 25 if e[:id] =~ REGEXES[:positiveRe] + end + + weight + end + + ELEMENT_SCORES = { + 'div' => 5, + 'blockquote' => 3, + 'form' => -3, + 'th' => -5 + }.freeze + + def score_node(elem) + content_score = class_weight(elem) + content_score += ELEMENT_SCORES.fetch(elem.name.downcase, 0) + { :content_score => content_score, :elem => elem } + end + + def debug(str) + puts str if options[:debug] + end + + def sanitize(node, candidates, options = {}) + node.css("h1, h2, h3, h4, h5, h6").each do |header| + header.remove if class_weight(header) < 0 || get_link_density(header) > 0.33 + end + + node.css("form, object, iframe, embed").each do |elem| + elem.remove + end + + if @options[:remove_empty_nodes] + # remove

tags that have no text content - this will also remove p tags that contain only images. + node.css("p").each do |elem| + elem.remove if elem.content.strip.empty? + end + end + + # Conditionally clean s,
    s, and
    s + clean_conditionally(node, candidates, "table, ul, div") + + # We'll sanitize all elements using a whitelist + base_whitelist = @options[:tags] || %w[div p] + # We'll add whitespace instead of block elements, + # so a
    b will have a nice space between them + base_replace_with_whitespace = %w[br hr h1 h2 h3 h4 h5 h6 dl dd ol li ul address blockquote center] + + # Use a hash for speed (don't want to make a million calls to include?) + whitelist = Hash.new + base_whitelist.each {|tag| whitelist[tag] = true } + replace_with_whitespace = Hash.new + base_replace_with_whitespace.each { |tag| replace_with_whitespace[tag] = true } + + ([node] + node.css("*")).each do |el| + # If element is in whitelist, delete all its attributes + if whitelist[el.node_name] + el.attributes.each { |a, x| el.delete(a) unless @options[:attributes] && @options[:attributes].include?(a.to_s) } + + # Otherwise, replace the element with its contents + else + # If element is root, replace the node as a text node + if el.parent.nil? + node = Nokogiri::XML::Text.new(el.text, el.document) + break + else + if replace_with_whitespace[el.node_name] + el.swap(Nokogiri::XML::Text.new(' ' << el.text << ' ', el.document)) + else + el.swap(Nokogiri::XML::Text.new(el.text, el.document)) + end + end + end + + end + + s = Nokogiri::XML::Node::SaveOptions + save_opts = s::NO_DECLARATION | s::NO_EMPTY_TAGS | s::AS_XHTML + html = node.serialize(:save_with => save_opts) + + # Get rid of duplicate whitespace + return html.gsub(/[\r\n\f]+/, "\n" ) + end + + def clean_conditionally(node, candidates, selector) + return unless @clean_conditionally + node.css(selector).each do |el| + weight = class_weight(el) + content_score = candidates[el] ? candidates[el][:content_score] : 0 + name = el.name.downcase + + if weight + content_score < 0 + el.remove + debug("Conditionally cleaned #{name}##{el[:id]}.#{el[:class]} with weight #{weight} and content score #{content_score} because score + content score was less than zero.") + elsif el.text.count(",") < 10 + counts = %w[p img li a embed input].inject({}) { |m, kind| m[kind] = el.css(kind).length; m } + counts["li"] -= 100 + + # For every img under a noscript tag discount one from the count to avoid double counting + counts["img"] -= el.css("noscript").css("img").length + + content_length = el.text.strip.length # Count the text length excluding any surrounding whitespace + link_density = get_link_density(el) + + reason = clean_conditionally_reason?(name, counts, content_length, options, weight, link_density) + if reason + debug("Conditionally cleaned #{name}##{el[:id]}.#{el[:class]} with weight #{weight} and content score #{content_score} because it has #{reason}.") + el.remove + end + end + end + end + + def clean_conditionally_reason?(name, counts, content_length, options, weight, link_density) + if (counts["img"] > counts["p"]) && (counts["img"] > 1) + "too many images" + elsif counts["li"] > counts["p"] && name != "ul" && name != "ol" + "more
  • s than

    s" + elsif counts["input"] > (counts["p"] / 3).to_i + "less than 3x

    s than s" + elsif (content_length < options[:min_text_length]) && (counts["img"] != 1) + "too short a content length without a single image" + elsif weight < 25 && link_density > 0.2 + "too many links for its weight (#{weight})" + elsif weight >= 25 && link_density > 0.5 + "too many links for its weight (#{weight})" + elsif (counts["embed"] == 1 && content_length < 75) || counts["embed"] > 1 + "s with too short a content length, or too many s" + else + nil + end + end + + private + + # 제거항목 추가항목을 지정한다. + def handle_exclusions!(whitelist, blacklist) + return unless whitelist || blacklist + + if blacklist + elems = @html.css(blacklist) + if elems + elems.each do |e| + e.remove + end + end + end + + if whitelist + elems = @html.css(whitelist).to_s + + if body = @html.at_css('body') + body.inner_html = elems + end + end + + @input = @html.to_s + end + + # 코멘트가 제거된 기본 html 노드 반환 + def make_html(whitelist=nil, blacklist=nil) + @html = Nokogiri::HTML(@input, nil, @options[:encoding]) + # In case document has no body, such as from empty string or redirect + @html = Nokogiri::HTML('', nil, @options[:encoding]) if @html.css('body').length == 0 + # Remove html comment tags + @html.xpath('//comment()').each { |i| i.remove } + end + + + def prepare_candidates + @html.css("script, style").each { |i| i.remove } + remove_unlikely_candidates! if @remove_unlikely_candidates + transform_misused_divs_into_paragraphs! + + @candidates = score_paragraphs(options[:min_text_length]) + @best_candidate = select_best_candidate(@candidates) + end + + # 가망없는 후보자를 제거한다. (명확한 후보자는 제외하고 제거한다.) + def remove_unlikely_candidates! + @html.css("*").each do |elem| + str = "#{elem[:class]}#{elem[:id]}" + if str =~ REGEXES[:unlikelyCandidatesRe] && str !~ REGEXES[:okMaybeItsACandidateRe] && (elem.name.downcase != 'html') && (elem.name.downcase != 'body') + debug("Removing unlikely candidate - #{str}") + elem.remove + end + end + end + + # 잘못 사용되고 있는 DIV를 p로 변환한다. + def transform_misused_divs_into_paragraphs! + @html.css("*").each do |elem| + if elem.name.downcase == "div" + # transform

    s that do not contain other block elements into

    s + if elem.inner_html !~ REGEXES[:divToPElementsRe] + debug("Altering div(##{elem[:id]}.#{elem[:class]}) to p"); + elem.name = "p" + end + else + # wrap text nodes in p tags +# elem.children.each do |child| +# if child.text? +# debug("wrapping text node with a p") +# child.swap("

    #{child.text}

    ") +# end +# end + end + end + end + + # 가능노드에 점수를 매긴다. + def score_paragraphs(min_text_length) + candidates = {} + @html.css("p,td").each do |elem| + parent_node = elem.parent + grand_parent_node = parent_node.respond_to?(:parent) ? parent_node.parent : nil + inner_text = elem.text + + # If this paragraph is less than 25 characters, don't even count it. + next if inner_text.length < min_text_length + + candidates[parent_node] ||= score_node(parent_node) + candidates[grand_parent_node] ||= score_node(grand_parent_node) if grand_parent_node + + content_score = 1 + content_score += inner_text.split(',').length + content_score += [(inner_text.length / 100).to_i, 3].min + + candidates[parent_node][:content_score] += content_score + candidates[grand_parent_node][:content_score] += content_score / 2.0 if grand_parent_node + end + + # Scale the final candidates score based on link density. Good content should have a + # relatively small link density (5% or less) and be mostly unaffected by this operation. + candidates.each do |elem, candidate| + candidate[:content_score] = candidate[:content_score] * (1 - get_link_density(elem)) + end + + candidates + end + end +end diff --git a/lib/title_finder.ex b/lib/title_finder.ex new file mode 100644 index 0000000..8a1305f --- /dev/null +++ b/lib/title_finder.ex @@ -0,0 +1,64 @@ +defmodule Readability.TitleFinder do + @moduledoc """ + The TitleFinder engine traverse the HTML tree searching for finding title. + """ + + @title_suffix ~r/(\-)|(\:\:)|(\|)/ + @h_tag_selector "h1, h2, h3" + + @type html_tree :: tuple | list + + def title(html_tree) do + maybe_title = tag_title(html_tree) + if length(String.split(maybe_title, " ")) <= 4 do + maybe_title = og_title(html_tree) + end + maybe_title || h_tag_title(html_tree) + end + + @doc """ + Find title from title tag + """ + + @spec tag_title(html_tree) :: binary + + def tag_title(html_tree) do + html_tree + |> Floki.find("title") + |> to_clean_text + end + + @doc """ + Find title from og:title property of meta tag + """ + + @spec og_title(html_tree) :: binary + + def og_title(html_tree) do + html_tree + |> Floki.find("meta[property=og:title]") + |> Floki.attribute("content") + |> to_clean_text + end + + @doc """ + Find title from h tag + """ + + @spec h_tag_title(html_tree, String.t) :: binary + + def h_tag_title(html_tree, selector \\@h_tag_selector) do + html_tree + |> Floki.find(selector) + |> hd + |> to_clean_text + end + + defp to_clean_text(html_tree) do + title_text = html_tree + |> Floki.text + |> String.split(@title_suffix) + |> hd + |> String.strip + end +end diff --git a/mix.exs b/mix.exs new file mode 100644 index 0000000..2720fad --- /dev/null +++ b/mix.exs @@ -0,0 +1,34 @@ +defmodule Readability.Mixfile do + use Mix.Project + + def project do + [app: :readability, + version: "0.0.1", + elixir: "~> 1.2", + build_embedded: Mix.env == :prod, + start_permanent: Mix.env == :prod, + deps: deps] + end + + # Configuration for the OTP application + # + # Type "mix help compile.app" for more information + def application do + [applications: [:logger, + :floki + ]] + end + + # Dependencies can be Hex packages: + # + # {:mydep, "~> 0.3.0"} + # + # Or git/path repositories: + # + # {:mydep, git: "https://github.com/elixir-lang/mydep.git", tag: "0.1.0"} + # + # Type "mix help deps" for more examples and options + defp deps do + [{:floki, "~> 0.8.0"}] + end +end diff --git a/mix.lock b/mix.lock new file mode 100644 index 0000000..7874674 --- /dev/null +++ b/mix.lock @@ -0,0 +1,2 @@ +%{"floki": {:hex, :floki, "0.8.0"}, + "mochiweb_html": {:hex, :mochiweb_html, "2.13.0"}} diff --git a/test/features/nytimes.html b/test/features/nytimes.html new file mode 100644 index 0000000..a46d649 --- /dev/null +++ b/test/features/nytimes.html @@ -0,0 +1,1198 @@ + + + + + + + + + + + +Pence questions Obama at House GOP conference - washingtonpost.com + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    +

+ + + +
+ +
+ +
+
+
+ + + + + +
+
+

Pence questions Obama at House GOP conference

+ +
+
+ +
+ + + + + + + + + + + + +
+
+ + +
+
+
Discussion Policy
+ + +
+
+ +
+ Comments that include profanity or personal attacks or other inappropriate comments or material will be removed from the site. Additionally, entries that are unsigned or contain "signatures" by someone other than the actual author will be removed. Finally, we will take steps to block users who violate any of our posting standards, terms of use or privacy policies or any other policies governing this site. Please review the full rules governing commentaries and discussions. You are fully responsible for the content that you post. +
+ +
+
+ + + +
+ +
+ + +
+ +Friday, January 29, 2010; 1:47 PM +

+

+
+ +

+After addressing the GOP House Issues Conference in Baltimore on Friday, President Obama took a series questions from the lawmakers. Here is a transcript of one of the questions posed to the president: +

+
+

REP. MIKE PENCE (R-Ind.): We are pleased to have you return (inaudible) a year ago. House Republicans said then we would make you two promises. Number one, that most people in this room and their families would pray for you and your beautiful family just about every day for the four years. I want to assure you we're keeping that promise.

+

+OBAMA: I appreciate that. +

+

+PENCE: Number two, (inaudible) to you, Mr. President, was that door (ph) was always open. And we hope that by evidence of our invitation to you that we can demonstrate that (inaudible). +

+

+Mr. President, (inaudible) us in this conference yesterday, on the way into Baltimore, stopped by the Salvation Army homeless facility here in Baltimore yesterday. +

+

+I met a little boy, an African-American boy, in the 8th grade, named David Carter Jr. +

+

+When he heard that I would be seeing you today, his eyes lit up like I haven't seen. And I told him if he wrote you a letter, I'd give it to you. And I have. +

+

+But I had a conversation with little David Jr. and David Sr. And their families are struggling in this economy. His dad said words to me, Mr. President, that I'll never forget. About my age, and he said -- he said, "Congressman, it's not like it was when we were coming up." He said, "There's just no jobs." +

+

+Now, last year, about the time you met with us, unemployment was 7.5 percent in this country. Your administration and your party in Congress told us that we'd have to borrow more than $700 billion to pay for a so-called stimulus bill that was a piecemeal list of projects and boutique tax cuts, all of which we were told had to be passed or unemployment would go to 8 percent, as your administration said. +

+

+Well, unemployment is 10 percent now, as you well know, Mr. President. Here in Baltimore, it's considerably higher. +

+

+Now, Republicans offered a stimulus bill at the same time. It cost half as much as the Democratic proposal in Congress. And using your economic analyst models, it would have created twice the jobs at half the cost. It essentially was across-the-board tax relief, Mr. President. +

+

+Now, we know you've come to Baltimore today and you've -- you've raised this -- a tax credit which was last promoted by President Jimmy Carter. +

+
+ +
+
+ +
+
+CONTINUED     + +1 +   + +   + +   + + +    > +

+
+
+
+
+
+
+
+

+
+
+
+ + +
+ + + +
+ +

More in the Politics Section

+ +
+
+
+ +Campaign Finance -- Presidential Race +

2008 Fundraising

+

See who is giving to the '08 presidential candidates.

+ + +
+
+
+
+ +

Latest Politics Blog Updates

+ + +
+
+
+
+
+ + +
+ +
© 2010 The Washington Post Company
+
+
+
+ + + + + + + + + +
+ + + + + + + + + + + + + + + +
+
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test/readability_test.exs b/test/readability_test.exs new file mode 100644 index 0000000..7623e59 --- /dev/null +++ b/test/readability_test.exs @@ -0,0 +1,8 @@ +defmodule ReadabilityTest do + use ExUnit.Case + doctest Readability + + test "the truth" do + assert 1 + 1 == 2 + end +end diff --git a/test/test_helper.exs b/test/test_helper.exs new file mode 100644 index 0000000..869559e --- /dev/null +++ b/test/test_helper.exs @@ -0,0 +1 @@ +ExUnit.start() diff --git a/test/title_finder_test.exs b/test/title_finder_test.exs new file mode 100644 index 0000000..e9a8b2d --- /dev/null +++ b/test/title_finder_test.exs @@ -0,0 +1,45 @@ +defmodule Readability.TitleFinderTest do + use ExUnit.Case, async: true + + doctest Readability + + @html """ + + + Tag title - test + + + +

+

h1 title

+

h2 title

+

+ + + """ + + test "extract og title" do + title = Readability.TitleFinder.og_title(@html) + assert title == "og title" + end + + test "extract tag title" do + title = Readability.TitleFinder.tag_title(@html) + assert title == "Tag title" + end + + test "extract h1 tag title" do + title = Readability.TitleFinder.h_tag_title(@html) + assert title == "h1 title" + end + + test "extrat h2 tag title" do + title = Readability.TitleFinder.h_tag_title(@html, "h2") + assert title == "h2 title" + end + + test "extract most proper title" do + title = Readability.TitleFinder.title(@html) + assert title == "og title" + end +end