Skip to content

Commit

Permalink
feat: Bump version to v0.2.73 and update documentation
Browse files Browse the repository at this point in the history
This commit updates the version number to v0.2.73 and makes corresponding changes in the README.md and Dockerfile.

Docker file install the default mode, this resolve many of installation issues.

Additionally, the installation instructions are updated to include support for different modes. Setup.py doesn't have anymore dependancy on Spacy.

The change log is also updated to reflect these changes.

Supporting websites need with-head browser.
  • Loading branch information
unclecode committed Jul 3, 2024
1 parent 88d8cd8 commit 9926eb9
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 25 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Crawl4AI v0.2.72 🕷️🤖
# Crawl4AI v0.2.73 🕷️🤖

[![GitHub Stars](https://img.shields.io/github/stars/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/stargazers)
[![GitHub Forks](https://img.shields.io/github/forks/unclecode/crawl4ai?style=social)](https://github.com/unclecode/crawl4ai/network/members)
Expand Down
11 changes: 7 additions & 4 deletions crawl4ai/crawler_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from selenium.webdriver.chrome.service import Service as ChromeService
from webdriver_manager.chrome import ChromeDriverManager

from .config import *
import logging, time
import base64
from PIL import Image, ImageDraw, ImageFont
Expand Down Expand Up @@ -181,7 +182,7 @@ def _ensure_page_load(self, max_checks=6, check_interval=0.01):
initial_length = len(self.driver.page_source)

for ix in range(max_checks):
print(f"Checking page load: {ix}")
# print(f"Checking page load: {ix}")
time.sleep(check_interval)
current_length = len(self.driver.page_source)

Expand All @@ -190,7 +191,7 @@ def _ensure_page_load(self, max_checks=6, check_interval=0.01):

return self.driver.page_source

def crawl(self, url: str) -> str:
def crawl(self, url: str, **kwargs) -> str:
# Create md5 hash of the URL
import hashlib
url_hash = hashlib.md5(url.encode()).hexdigest()
Expand All @@ -213,15 +214,17 @@ def crawl(self, url: str) -> str:
WebDriverWait(self.driver, 10).until(
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
html = self._ensure_page_load() # self.driver.page_source
can_not_be_done_headless = False # Look at my creativity for naming variables
# TODO: Very ugly way for now but it works
if html == "<html><head></head><body></body></html>":
if not kwargs.get('bypass_headless', False) and html == "<html><head></head><body></body></html>":
print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...")
can_not_be_done_headless = True
options = Options()
options.headless = False
# set window size very small
options.add_argument("--window-size=10,10")
options.add_argument("--window-size=5,5")
driver = webdriver.Chrome(service=self.service, options=options)
driver.get(url)
html = driver.page_source
Expand Down
8 changes: 6 additions & 2 deletions crawl4ai/extraction_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,15 +101,15 @@ def extract(self, url: str, ix:int, html: str) -> List[Dict[str, Any]]:
prompt_with_variables = PROMPT_EXTRACT_BLOCKS_WITH_INSTRUCTION

if self.extract_type == "schema":
variable_values["SCHEMA"] = json.dumps(self.schema)
variable_values["SCHEMA"] = json.dumps(self.schema, indent=2)
prompt_with_variables = PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION

for variable in variable_values:
prompt_with_variables = prompt_with_variables.replace(
"{" + variable + "}", variable_values[variable]
)

response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token)
response = perform_completion_with_backoff(self.provider, prompt_with_variables, self.api_token) # , json_response=self.extract_type == "schema")
try:
blocks = extract_xml_data(["blocks"], response.choices[0].message.content)['blocks']
blocks = json.loads(blocks)
Expand Down Expand Up @@ -196,6 +196,10 @@ def run(self, url: str, sections: List[str]) -> List[Dict[str, Any]]:
time.sleep(0.5) # 500 ms delay between each processing
else:
# Parallel processing using ThreadPoolExecutor
# extract_func = partial(self.extract, url)
# for ix, section in enumerate(merged_sections):
# extracted_content.append(extract_func(ix, section))

with ThreadPoolExecutor(max_workers=4) as executor:
extract_func = partial(self.extract, url)
futures = [executor.submit(extract_func, ix, section) for ix, section in enumerate(merged_sections)]
Expand Down
10 changes: 8 additions & 2 deletions crawl4ai/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,13 +186,19 @@
Please carefully read the URL content and the user's request. If the user provided a desired JSON schema in the <schema_block> above, extract the requested information from the URL content according to that schema. If no schema was provided, infer an appropriate JSON schema based on the user's request that will best capture the key information they are looking for.
Extraction instructions:
Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in <blocks> tags.
Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
Quality Reflection:
Before outputting your final answer, double check that the JSON you are returning is complete, containing all the information requested by the user, and is valid JSON that could be parsed by json.loads() with no errors or omissions. The outputted JSON objects should fully match the schema, either provided or inferred.
Quality Score:
After reflecting, score the quality and completeness of the JSON data you are about to return on a scale of 1 to 5. Write the score inside <score> tags.
Avoid Common Mistakes:
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
- Do not miss closing </blocks> tag at the end of the JSON output.
- Do not generate the Python coee show me how to do the task, this is your task to extract the information and return it in JSON format.
Result
Output the final list of JSON objects, wrapped in <blocks> tags."""
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
11 changes: 7 additions & 4 deletions crawl4ai/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,6 @@ def flatten_nested_elements(node):
print('Error processing HTML content:', str(e))
raise InvalidCSSSelectorError(f"Invalid CSS selector: {css_selector}") from e


def get_content_of_website_optimized(url: str, html: str, word_count_threshold: int = MIN_WORD_THRESHOLD, css_selector: str = None, **kwargs) -> Dict[str, Any]:
if not html:
return None
Expand Down Expand Up @@ -544,7 +543,6 @@ def flatten_nested_elements(node):
'metadata': meta
}


def extract_metadata(html, soup = None):
metadata = {}

Expand Down Expand Up @@ -603,12 +601,16 @@ def extract_xml_data(tags, string):
return data

# Function to perform the completion with exponential backoff
def perform_completion_with_backoff(provider, prompt_with_variables, api_token):
def perform_completion_with_backoff(provider, prompt_with_variables, api_token, json_response = False):
from litellm import completion
from litellm.exceptions import RateLimitError
max_attempts = 3
base_delay = 2 # Base delay in seconds, you can adjust this based on your needs

extra_args = {}
if json_response:
extra_args["response_format"] = { "type": "json_object" }

for attempt in range(max_attempts):
try:
response =completion(
Expand All @@ -617,7 +619,8 @@ def perform_completion_with_backoff(provider, prompt_with_variables, api_token):
{"role": "user", "content": prompt_with_variables}
],
temperature=0.01,
api_key=api_token
api_key=api_token,
**extra_args
)
return response # Return the successful response
except RateLimitError as e:
Expand Down
4 changes: 3 additions & 1 deletion crawl4ai/web_crawler.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from typing import List
from concurrent.futures import ThreadPoolExecutor
from .config import *
import warnings
warnings.filterwarnings("ignore", message='Field "model_name" has conflict with protected namespace "model_".')


class WebCrawler:
Expand Down Expand Up @@ -164,7 +166,7 @@ def run(
if user_agent:
self.crawler_strategy.update_user_agent(user_agent)
t1 = time.time()
html = self.crawler_strategy.crawl(url)
html = self.crawler_strategy.crawl(url, **kwargs)
t2 = time.time()
if verbose:
print(f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1} seconds")
Expand Down
2 changes: 1 addition & 1 deletion docs/md/index.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Crawl4AI v0.2.72
# Crawl4AI v0.2.73

Welcome to the official documentation for Crawl4AI! 🕷️🤖 Crawl4AI is an open-source Python library designed to simplify web crawling and extract useful information from web pages. This documentation will guide you through the features, usage, and customization of Crawl4AI.

Expand Down
11 changes: 1 addition & 10 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,9 @@
torch_requirements = [req for req in requirements if req.startswith(("torch", "nltk", "spacy", "scikit-learn", "numpy"))]
transformer_requirements = [req for req in requirements if req.startswith(("transformers", "tokenizers", "onnxruntime"))]

class CustomInstallCommand(install):
"""Customized setuptools install command to install spacy without dependencies."""
def run(self):
install.run(self)
subprocess.check_call([os.sys.executable, '-m', 'pip', 'install', 'spacy', '--no-deps'])

setup(
name="Crawl4AI",
version="0.2.72",
version="0.2.73",
description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & Scrapper",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
Expand All @@ -41,9 +35,6 @@ def run(self):
"transformer": transformer_requirements,
"all": requirements,
},
cmdclass={
'install': CustomInstallCommand,
},
entry_points={
'console_scripts': [
'crawl4ai-download-models=crawl4ai.model_loader:main',
Expand Down

0 comments on commit 9926eb9

Please sign in to comment.