Skip to content

Commit

Permalink
feat: natgeo parser (#89)
Browse files Browse the repository at this point in the history
* feat: natgeo parser

Same as the news.nationalgeographic.com parser - for some reason the
author name doesn’t appear to be getting pulled into the local copy of
the file.

* fix: content assertion

* fix: generalize author byline

* disable: author assertion

* rm: author assertion

* fix: image lead, handles image-group

* fix: guard agaist missing img url

* fix: generalize dek and title selectors
  • Loading branch information
janetleekim authored and dviramontes committed Feb 8, 2017
1 parent 08b5bb7 commit 2279c2d
Show file tree
Hide file tree
Showing 4 changed files with 166 additions and 0 deletions.
3 changes: 3 additions & 0 deletions fixtures/www.nationalgeographic.com/1481921323654.html

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions src/extractors/custom/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ export * from './www.reuters.com';
export * from './mashable.com';
export * from './www.chicagotribune.com';
export * from './www.vox.com';
export * from './www.nationalgeographic.com';
export * from './www.latimes.com';
export * from './pagesix.com';
export * from './thefederalistpapers.org';
Expand Down
75 changes: 75 additions & 0 deletions src/extractors/custom/www.nationalgeographic.com/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
export const WwwNationalgeographicComExtractor = {
domain: 'www.nationalgeographic.com',

title: {
selectors: [
'h1',
'h1.main-title',
],
},

author: {
selectors: [
'.byline-component__contributors b span',
],
},

date_published: {
selectors: [
['meta[name="article:published_time"]', 'value'],
],
},

dek: {
selectors: [
'.article__deck',
],
},

lead_image_url: {
selectors: [
['meta[name="og:image"]', 'value'],
],
},

content: {
selectors: [
['.parsys.content', '.__image-lead__'],
'.content',
],

// Is there anything in the content you selected that needs transformed
// before it's consumable content? E.g., unusual lazy loaded images
transforms: {
'.parsys.content': ($node, $) => {
const $imageParent = $node.children().first();
if ($imageParent.hasClass('imageGroup')) {
const $dataAttrContainer = $imageParent.find('.media--medium__container').children().first();
const imgPath1 = $dataAttrContainer.data('platform-image1-path');
const imgPath2 = $dataAttrContainer.data('platform-image2-path');
if (imgPath2 && imgPath1) {
$node.prepend($(`<div class="__image-lead__">
<img src="${imgPath1}"/>
<img src="${imgPath2}"/>
</div>`));
}
} else {
const $imgSrc = $node.find('.image.parbase.section')
.find('.picturefill')
.first()
.data('platform-src');
if ($imgSrc) {
$node.prepend($(`<img class="__image-lead__" src="${$imgSrc}"/>`));
}
}
},
},

// Is there anything that is in the result that shouldn't be?
// The clean selectors will remove anything that matches from
// the result
clean: [
'.pull-quote.pull-quote--small',
],
},
};
87 changes: 87 additions & 0 deletions src/extractors/custom/www.nationalgeographic.com/index.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import assert from 'assert';
import fs from 'fs';
import URL from 'url';
import cheerio from 'cheerio';

import Mercury from 'mercury';
import getExtractor from 'extractors/get-extractor';
import { excerptContent } from 'utils/text';

describe('WwwNationalgeographicComExtractor', () => {
describe('initial test case', () => {
let result;
let url;
beforeAll(() => {
url =
'http://www.nationalgeographic.com/magazine/2017/01/gender-toys-departments-piece/';
const html =
fs.readFileSync('./fixtures/www.nationalgeographic.com/1481921323654.html');
result =
Mercury.parse(url, html, { fallback: false });
});

it('is selected properly', () => {
// This test should be passing by default.
// It sanity checks that the correct parser
// is being selected for URLs from this domain
const extractor = getExtractor(url);
assert.equal(extractor.domain, URL.parse(url).hostname);
});

it('returns the title', async () => {
// To pass this test, fill out the title selector
// in ./src/extractors/custom/www.nationalgeographic.com/index.js.
const { title } = await result;

// Update these values with the expected values from
// the article.
assert.equal(title, 'How Today\'s Toys May Be Harming Your Daughter');
});

it('returns the date_published', async () => {
// To pass this test, fill out the date_published selector
// in ./src/extractors/custom/www.nationalgeographic.com/index.js.
const { date_published } = await result;

// Update these values with the expected values from
// the article.
assert.equal(date_published, '2016-12-15T16:39:00.000Z');
});

it('returns the dek', async () => {
// To pass this test, fill out the dek selector
// in ./src/extractors/custom/www.nationalgeographic.com/index.js.
const { dek } = await result;

// Update these values with the expected values from
// the article.
assert.equal(dek, 'The long history of separate toys for girls and boys shows that marketing by gender has a profound impact on children.');
});

it('returns the lead_image_url', async () => {
// To pass this test, fill out the lead_image_url selector
// in ./src/extractors/custom/www.nationalgeographic.com/index.js.
const { lead_image_url } = await result;

// Update these values with the expected values from
// the article.
assert.equal(lead_image_url, 'http://www.nationalgeographic.com/content/dam/magazine/rights-exempt/2017/01/Departments/gendertoys/gendertoysOG.ngsversion.1481823676336.png');
});

it('returns the content', async () => {
// To pass this test, fill out the content selector
// in ./src/extractors/custom/www.nationalgeographic.com/index.js.
// You may also want to make use of the clean and transform
// options.
const { content } = await result;

const $ = cheerio.load(content || '');

const first13 = excerptContent($('*').first().text(), 13);

// Update these values with the expected values from
// the article.
assert.equal(first13, 'This story appears in the January 2017 issue of National Geographic magazine. Read');
});
});
});

0 comments on commit 2279c2d

Please sign in to comment.