Skip to content

Commit

Permalink
[fix] google & youtube - set EU consent cookie
Browse files Browse the repository at this point in the history
This change the previous bypass method for Google consent using
``ucbcb=1`` (6face21) to accept the consent using ``CONSENT=YES+``.

The youtube_noapi and google have a similar API, at least for the consent[1].

Get CONSENT cookie from google reguest::

    curl -i "https://www.google.com/search?q=time&tbm=isch" \
         -A "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0" \
         | grep -i consent
    ...
    location: https://consent.google.com/m?continue=https://www.google.com/search?q%3Dtime%26tbm%3Disch&gl=DE&m=0&pc=irp&uxe=eomtm&hl=en-US&src=1
    set-cookie: CONSENT=PENDING+936; expires=Wed, 24-Jul-2024 11:26:20 GMT; path=/; domain=.google.com; Secure
    ...

PENDING & YES [2]:

  Google change the way for consent about YouTube cookies agreement in EU
  countries. Instead of showing a popup in the website, YouTube redirects the
  user to a new webpage at consent.youtube.com domain ...  Fix for this is to
  put a cookie CONSENT with YES+ value for every YouTube request

[1] iv-org/invidious#2207
[2] TeamNewPipe/NewPipeExtractor#592

Closes: searxng/searxng#1432
  • Loading branch information
unixfox authored and kvch committed Jul 30, 2022
1 parent 86bd82d commit 7123aa1
Show file tree
Hide file tree
Showing 7 changed files with 83 additions and 7 deletions.
1 change: 1 addition & 0 deletions searx/engines/google.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ def request(query, params):
params['url'] = query_url

logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
params['cookies']['CONSENT'] = "YES+"
params['headers'].update(lang_info['headers'])
if use_mobile_ui:
params['headers']['Accept'] = '*/*'
Expand Down
1 change: 1 addition & 0 deletions searx/engines/google_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ def request(query, params):
params['url'] = query_url

logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
params['cookies']['CONSENT'] = "YES+"
params['headers'].update(lang_info['headers'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
Expand Down
2 changes: 2 additions & 0 deletions searx/engines/google_news.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@ def request(query, params):
params['url'] = query_url

logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))

params['cookies']['CONSENT'] = "YES+"
params['headers'].update(lang_info['headers'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
Expand Down
69 changes: 69 additions & 0 deletions searx/engines/google_play_apps.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Google Play Apps
"""

from urllib.parse import urlencode
from lxml import html
from searx.utils import (
eval_xpath,
extract_url,
extract_text,
eval_xpath_list,
eval_xpath_getindex,
)

about = {
"website": "https://play.google.com/",
"wikidata_id": "Q79576",
"use_official_api": False,
"require_api_key": False,
"results": "HTML",
}

categories = ["files", "apps"]
search_url = "https://play.google.com/store/search?{query}&c=apps"


def request(query, params):
params["url"] = search_url.format(query=urlencode({"q": query}))
params['cookies']['CONSENT'] = "YES+"

return params


def response(resp):
results = []

dom = html.fromstring(resp.text)

if eval_xpath(dom, '//div[@class="v6DsQb"]'):
return []

spot = eval_xpath_getindex(dom, '//div[@class="ipRz4"]', 0, None)
if spot is not None:
url = extract_url(eval_xpath(spot, './a[@class="Qfxief"]/@href'), search_url)
title = extract_text(eval_xpath(spot, './/div[@class="vWM94c"]'))
content = extract_text(eval_xpath(spot, './/div[@class="LbQbAe"]'))
img = extract_text(eval_xpath(spot, './/img[@class="T75of bzqKMd"]/@src'))

results.append({"url": url, "title": title, "content": content, "img_src": img})

more = eval_xpath_list(dom, '//c-wiz[@jsrenderer="RBsfwb"]//div[@role="listitem"]', min_len=1)
for result in more:
url = extract_url(eval_xpath(result, ".//a/@href"), search_url)
title = extract_text(eval_xpath(result, './/span[@class="DdYX5"]'))
content = extract_text(eval_xpath(result, './/span[@class="wMUdtb"]'))
img = extract_text(
eval_xpath(
result,
'.//img[@class="T75of stzEZd" or @class="T75of etjhNc Q8CSx "]/@src',
)
)

results.append({"url": url, "title": title, "content": content, "img_src": img})

for suggestion in eval_xpath_list(dom, '//c-wiz[@jsrenderer="qyd4Kb"]//div[@class="ULeU3b neq64b"]'):
results.append({"suggestion": extract_text(eval_xpath(suggestion, './/div[@class="Epkrse "]'))})

return results
15 changes: 8 additions & 7 deletions searx/engines/google_scholar.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,20 +85,21 @@ def request(query, params):
# subdomain is: scholar.google.xy
lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")

query_url = 'https://'+ lang_info['subdomain'] + '/scholar' + "?" + urlencode({
'q': query,
**lang_info['params'],
'ie': "utf8",
'oe': "utf8",
'start' : offset,
})
query_url = (
'https://'
+ lang_info['subdomain']
+ '/scholar'
+ "?"
+ urlencode({'q': query, **lang_info['params'], 'ie': "utf8", 'oe': "utf8", 'start': offset})
)

query_url += time_range_url(params)

logger.debug("query_url --> %s", query_url)
params['url'] = query_url

logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
params['cookies']['CONSENT'] = "YES+"
params['headers'].update(lang_info['headers'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
Expand Down
1 change: 1 addition & 0 deletions searx/engines/google_videos.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ def request(query, params):
params['url'] = query_url

logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
params['cookies']['CONSENT'] = "YES+"
params['headers'].update(lang_info['headers'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
Expand Down
1 change: 1 addition & 0 deletions searx/engines/youtube_noapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@

# do search-request
def request(query, params):
params['cookies']['CONSENT'] = "YES+"
if not params['engine_data'].get('next_page_token'):
params['url'] = search_url.format(query=quote_plus(query), page=params['pageno'])
if params['time_range'] in time_range_dict:
Expand Down

0 comments on commit 7123aa1

Please sign in to comment.