So I need to extract the reviews from the URL of a product on this site, more specifically the username, date, text, and score. However, I have some issues with it because I keep getting an error: failed to retrieve reviews for page 1. Error: "Connection broken: InvalidChunkLength(got length b'', 0 bytes read)"; "InvalidChunkLength(got length b'', 0 bytes read)"; I tried adding a time delay but it still doesn't work. How can I modify this?
import json
import requests
from bs4 import BeautifulSoup
url = "https://www.emag.ro/covor-antiderapant-negru-poliester-80-x-300-cm-c027-80x300/pd/DBY5YJMBM/?ref=sponsored_products_fill_a_b_5_3&provider=rec&recid=rec_73_c449bb3e50b63cc8f6da4a42a31af359f6cbfb3c547bc5748cb6d45501a29685_1684315709&scenario_ID=73&aid=034a897a-956c-11ed-9004-0ab644dfda7c&oid=89847310"
review_url = "https://www.emag.ro/review/get-review-listing-page?id={product_id}&page={page}"
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0'}
product_id = url.split("/pd/")[1].split("/")[0]
reviews = []
page = 1
while True:
r_url = review_url.format(product_id=product_id, page=page)
try:
response = requests.get(r_url, headers=headers)
response.raise_for_status()
data = response.json()
except (requests.RequestException, json.JSONDecodeError) as e:
print(f"Failed to retrieve reviews for page {page}. Error: {str(e)}")
break
if not data['reviews']:
break
for r in data['reviews']:
review_text = r['content']
author = r['author']['name']
date = r['date']
score = r['rating']
reviews.append({"author": author, "date": date, "review_text": review_text, "score": score})
page += 1
with open('reviews.json', 'w') as f:
json.dump(reviews, f, indent=4)
source https://stackoverflow.com/questions/76276163/scraping-a-url-address-for-reviews
Comments
Post a Comment