Skip to content

Commit

Permalink
use app.zyte.com now (#432)
Browse files Browse the repository at this point in the history
* use app.zyte.com now.

* fixes compatibility for pipenv>=2023.10.24

---------

Co-authored-by: hraza07 <[email protected]>
Co-authored-by: Adrián Chaves <[email protected]>
  • Loading branch information
3 people authored Nov 7, 2023
1 parent e6f35c0 commit 41c4b1a
Show file tree
Hide file tree
Showing 14 changed files with 26 additions and 23 deletions.
4 changes: 2 additions & 2 deletions docs/deploying.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ your ``scrapinghub.yml``, you can leave out the parameter completely::
Packing version 3af023e-master
Deploying to Scrapy Cloud project "12345"
{"status": "ok", "project": 12345, "version": "3af023e-master", "spiders": 1}
Run your spiders at: https://app.scrapinghub.com/p/12345/
Run your spiders at: https://app.zyte.com/p/12345/

You can also deploy your project from a Python egg, or build one without
deploying::
Expand All @@ -29,7 +29,7 @@ deploying::
Using egg: egg_name
Deploying to Scrapy Cloud project "12345"
{"status": "ok", "project": 12345, "version": "1.0.0", "spiders": 1}
Run your spiders at: https://app.scrapinghub.com/p/12345/
Run your spiders at: https://app.zyte.com/p/12345/

::

Expand Down
6 changes: 3 additions & 3 deletions docs/scheduling.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ and ``-s`` options::
or print items as they are being scraped:
shub items -f 2/15
or watch it running in Scrapinghub's web interface:
https://app.scrapinghub.com/p/12345/job/2/15
https://app.zyte.com/p/12345/job/2/15

::

Expand All @@ -35,7 +35,7 @@ and ``-s`` options::
or print items as they are being scraped:
shub items -f 2/15
or watch it running in Scrapinghub's web interface:
https://app.scrapinghub.com/p/33333/job/2/15
https://app.zyte.com/p/33333/job/2/15

You can also specify the amount of Scrapy Cloud units (``-u``) and the priority (``-p``)::

Expand All @@ -46,7 +46,7 @@ You can also specify the amount of Scrapy Cloud units (``-u``) and the priority
or print items as they are being scraped:
shub items -f 2/16
or watch it running in Scrapinghub's web interface:
https://app.scrapinghub.com/p/12345/job/2/16
https://app.zyte.com/p/12345/job/2/16

shub provides commands to retrieve log entries, scraped items, or requests from
jobs. If the job is still running, you can provide the ``-f`` (follow) option
Expand Down
6 changes: 3 additions & 3 deletions shub/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

class ShubConfig(object):

DEFAULT_ENDPOINT = 'https://app.scrapinghub.com/api/'
DEFAULT_ENDPOINT = 'https://app.zyte.com/api/'

# Dictionary option name: Shortcut to set 'default' key
SHORTCUTS = {
Expand Down Expand Up @@ -57,11 +57,11 @@ def _check_endpoints(self):
parsed = six.moves.urllib.parse.urlparse(url)
if parsed.netloc == 'staging.scrapinghub.com':
self.endpoints[endpoint] = six.moves.urllib.parse.urlunparse(
parsed._replace(netloc='app.scrapinghub.com')
parsed._replace(netloc='app.zyte.com')
)
click.echo(
'WARNING: Endpoint "%s" is still using %s which has been '
'obsoleted. shub has updated it to app.scrapinghub.com '
'obsoleted. shub has updated it to app.zyte.com '
'for this time only. Please update your configuration.' % (
endpoint, parsed.netloc,
),
Expand Down
5 changes: 4 additions & 1 deletion shub/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def deploy_cmd(target, version, debug, egg, build_egg, verbose, keep_log,
version, auth, verbose, keep_log, targetconf.stack,
targetconf.requirements_file, targetconf.eggs, tmpdir)
click.echo("Run your spiders at: "
"https://app.scrapinghub.com/p/%s/"
"https://app.zyte.com/p/%s/"
"" % targetconf.project_id)
finally:
if tmpdir:
Expand Down Expand Up @@ -201,6 +201,9 @@ def _add_sources(
# Keep backward compatibility with pipenv<=2022.8.30
if isinstance(_requirements, list):
tmp.write('\n'.join(_requirements).encode('utf-8'))
# Keep compatible with pipenv>=v2023.10.24
elif isinstance(_requirements, dict):
tmp.write('\n'.join(_requirements.values()).encode('utf-8'))
else:
with open(_requirements, 'rb') as f:
tmp.write(f.read())
Expand Down
2 changes: 1 addition & 1 deletion shub/items.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
You can also provide the Scrapinghub job URL instead:
shub items https://app.scrapinghub.com/p/12345/2/15
shub items https://app.zyte.com/p/12345/2/15
You can omit the project ID if you have a default target defined in your
scrapinghub.yml:
Expand Down
2 changes: 1 addition & 1 deletion shub/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
You can also provide the Scrapinghub job URL instead:
shub log https://app.scrapinghub.com/p/12345/2/15
shub log https://app.zyte.com/p/12345/2/15
You can omit the project ID if you have a default target defined in your
scrapinghub.yml:
Expand Down
4 changes: 2 additions & 2 deletions shub/login.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
with your Scrapinghub account.
You can find your API key in Scrapinghub's dashboard:
https://app.scrapinghub.com/account/apikey
https://app.zyte.com/account/apikey
"""

SHORT_HELP = "Save your Scrapinghub API key"
Expand All @@ -41,7 +41,7 @@ def cli():
def _get_apikey(suggestion='', endpoint=None):
suggestion_txt = ' (%s)' % suggestion if suggestion else ''
click.echo(
"Enter your API key from https://app.scrapinghub.com/account/apikey"
"Enter your API key from https://app.zyte.com/account/apikey"
)
while True:
key = input('API key%s: ' % suggestion_txt) or suggestion
Expand Down
2 changes: 1 addition & 1 deletion shub/requests.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
You can also provide the Scrapinghub job URL instead:
shub requests https://app.scrapinghub.com/p/12345/2/15
shub requests https://app.zyte.com/p/12345/2/15
You can omit the project ID if you have a default target defined in your
scrapinghub.yml:
Expand Down
2 changes: 1 addition & 1 deletion shub/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ def _deploy_dependency_egg(project, endpoint, apikey, name=None, version=None, e
files = {'egg': (egg_name, egg_fp)}
make_deploy_request(url, data, files, auth, False, False)

success = "Deployed eggs list at: https://app.scrapinghub.com/p/%s/deploy/"
success = "Deployed eggs list at: https://app.zyte.com/p/%s/deploy/"
click.echo(success % project)


Expand Down
4 changes: 2 additions & 2 deletions tests/image/test_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def test_cli(list_mocked, post_mocked, get_mocked):

auth_cfg = '{"email": null, "password": " ", "username": "abcdef"}'
post_mocked.assert_called_with(
'https://app.scrapinghub.com/api/releases/deploy.json',
'https://app.zyte.com/api/releases/deploy.json',
allow_redirects=False,
auth=('abcdef', ''),
data={
Expand Down Expand Up @@ -78,7 +78,7 @@ def test_cli_insecure_registry(list_mocked, post_mocked, get_mocked):
assert result.exit_code == 0

post_mocked.assert_called_with(
'https://app.scrapinghub.com/api/releases/deploy.json',
'https://app.zyte.com/api/releases/deploy.json',
allow_redirects=False,
auth=('abcdef', ''),
data={
Expand Down
2 changes: 1 addition & 1 deletion tests/image/test_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def test_cli(requests_get_mock, get_docker_client_mock, is_binary_logs):
assert result.exit_code == 0
assert result.output.endswith('abc\ndef\n')
requests_get_mock.assert_called_with(
'https://app.scrapinghub.com/api/settings/get.json',
'https://app.zyte.com/api/settings/get.json',
allow_redirects=False, auth=('abcdef', ''),
params={'project': 12345}, timeout=300)

Expand Down
4 changes: 2 additions & 2 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def _get_conf(scrapycfg_default_target):
[deploy:otherurl]
project = 444
url = http://app.scrapinghub.com/api/scrapyd/
url = http://app.zyte.com/api/scrapyd/
[deploy:external]
project = 555
Expand Down Expand Up @@ -283,7 +283,7 @@ def _get_conf(scrapycfg_default_target):
expected_endpoints = {
'default': ShubConfig.DEFAULT_ENDPOINT,
'external': 'external_endpoint',
'otherurl': 'http://app.scrapinghub.com/api/'
'otherurl': 'http://app.zyte.com/api/'
}
expected_apikeys = {
'otheruser': 'otherkey',
Expand Down
2 changes: 1 addition & 1 deletion tests/test_deploy_reqs.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_can_decompress_downloaded_packages_and_call_deploy_reqs(self):
for args, kwargs in m.call_args_list:
project, endpoint, apikey = args
self.assertEqual(project, 1)
self.assertIn('https://app.scrapinghub.com', endpoint)
self.assertIn('https://app.zyte.com', endpoint)
self.assertEqual(apikey, self.conf.apikeys['default'])

def _write_tmp_requirements_file(self):
Expand Down
4 changes: 2 additions & 2 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,12 +132,12 @@ def _test_specs(job, expected_job_id, expected_endpoint):
_test_specs('prod/2/3', '2/2/3', 'default')
_test_specs('vagrant/2/3', '3/2/3', 'vagrant')
_test_specs(
'https://app.scrapinghub.com/p/7389/259/1/#/log/line/0',
'https://app.zyte.com/p/7389/259/1/#/log/line/0',
'7389/259/1',
'default',
)
_test_specs(
'https://app.scrapinghub.com/p/7389/job/259/1/',
'https://app.zyte.com/p/7389/job/259/1/',
'7389/259/1',
'default',
)
Expand Down

0 comments on commit 41c4b1a

Please sign in to comment.