일부 데이터를 스크랩하기 위해 Python과 함께 셀레늄을 사용하고 있습니다. 코드는 단일 URL에 대해 잘 작동합니다 (URL을 하드 코딩하면), 우리의 경우에는 많은 URL이 있으며 데이터베이스에서 웹 드라이버로 URL을 전달할 계획입니다.
하지만 내가했을 때 예외가 발생하면 아래 코드와 예외가 있습니다. 누구 내가 뭘 잘못하고 있는지 알려줄 수 있습니까?
이 줄에서 예외가 발생합니다. browser.get (passed_url) 하지만 아래와 같이 문자열로 전달하면 작동하는 browser.get ( ' https://www.google.com/search?q=vitamin+b12 ')
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import glob
import datetime
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
#browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver/', chrome_options=option)
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
try:
#Database connection string
DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
#DWH table to which data is ported
TABLE_NAME = 'staging.search_url'
#Connecting DB..
conn = psycopg2.connect(DSN)
print("Database connected...")
conn.set_client_encoding('latin-1')
cur = conn.cursor()
cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
print('database connection failed')
quit()
search_url_fetch="""select url_to_be_searched from staging.search_url"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
for row in serach_url_list:
passed_url=''
passed_url=str(row)
passed_url=passed_url.replace(',)','')
passed_url=passed_url.replace('(','')
print(passed_url)
print("\n")
browser.get('https://www.google.com/search?q=vitamin+b12')
#browser.get(passed_url)
full_titles_element = browser.find_elements_by_xpath("//div[@class='mnr-c pla-unit']")
# use list comprehension to get the actual repo titles and not the selenium objects.
full_text_title = [x.text for x in full_titles_element]
# print out all the titles.
print('Whole names that appear in site:')
print(full_text_title, '\n')
product_name_list = browser.find_elements_by_xpath("//span[@class='pymv4e']")
# use list comprehension to get the actual repo titles and not the selenium objects.
#upd_product_name_list=list(filter(None, product_name_list))
upd_product_name_list=list(filter(None, product_name_list))
product_name = [x.text for x in product_name_list]
print('Product names:')
print(product_name, '\n')
filtered = [x for x in product_name if len(x.strip()) > 0]
print(filtered)
element_length=(len(filtered))
print(element_length)
print("\n")
positions=[]
for x in range(1, element_length+1):
positions.append(x)
print(positions)
print("\n")
company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
# use list comprehension to get the actual repo titles and not the selenium objects.
company = [x.text for x in company_name_list]
# print out all the titles.
print('Company Name:')
print(company, '\n')
urls=[]
find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
for my_href in find_href:
url_list=my_href.get_attribute("href")
urls.append(url_list)
#print(my_href.get_attribute("href"))
print(urls)
print("\n")
result = zip(positions,filtered, urls, company)
print(tuple(result))
예외 :
Warning (from warnings module):
File "/Users/user_123/Documents/PLA/selenium_chrome_with_beautiful_soup.py", line 16
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
DeprecationWarning: use options instead of chrome_options
Database connected...
Fetched DB values
'https://www.google.com/search?q=vitamin+b12'
Traceback (most recent call last):
File "/Users/user_123/Documents/PLA/selenium_chrome_with_beautiful_soup.py", line 49, in <module>
browser.get(passed_url)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 333, in get
self.execute(Command.GET, {'url': url})
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/webdriver.py", line 321, in execute
self.error_handler.check_response(response)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/selenium/webdriver/remote/errorhandler.py", line 242, in check_response
raise exception_class(message, screen, stacktrace)
selenium.common.exceptions.InvalidArgumentException: Message: invalid argument
(Session info: chrome=79.0.3945.130)
'
따옴표와 문자열의 시작과 끝을 전달 합니다. 아래를 참조하여 문자열을 잘라 내고 새 변수를 할당했습니다 new_url
.
대답 :
new_url = passed_url[1:len(passed_url)-1]
browser.get(new_url)
예:
a = "'https://www.google.com/search?q=vitamin+b12'"
b = a[1:len(a)-1]
print(a)
print(b)
아래 수정 된 코드 :
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import psycopg2
import os
import glob
import datetime
option = webdriver.ChromeOptions()
option.add_argument(" — incognito")
#browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver/', chrome_options=option)
browser = webdriver.Chrome(executable_path='/users/user_123/downloads/chrome_driver/chromedriver', chrome_options=option)
try:
#Database connection string
DSN = "dbname='postgres' user='postgres' host='localhost' password='postgres' port='5432'"
#DWH table to which data is ported
TABLE_NAME = 'staging.search_url'
#Connecting DB..
conn = psycopg2.connect(DSN)
print("Database connected...")
conn.set_client_encoding('latin-1')
cur = conn.cursor()
cur.execute("SET datestyle='German'")
except (Exception, psycopg2.Error) as error:
print('database connection failed')
quit()
search_url_fetch="""select url_to_be_searched from staging.search_url"""
psql_cursor = conn.cursor()
psql_cursor.execute(search_url_fetch)
serach_url_list = psql_cursor.fetchall()
print('Fetched DB values')
for row in serach_url_list:
passed_url=''
passed_url=str(row)
passed_url=passed_url.replace(',)','')
passed_url=passed_url.replace('(','')
new_url = passed_url[1:len(passed_url)-1]
print(passed_url)
print("\n")
#browser.get('https://www.google.com/search?q=vitamin+b12')
browser.get(new_url)
full_titles_element = browser.find_elements_by_xpath("//div[@class='mnr-c pla-unit']")
# use list comprehension to get the actual repo titles and not the selenium objects.
full_text_title = [x.text for x in full_titles_element]
# print out all the titles.
print('Whole names that appear in site:')
print(full_text_title, '\n')
product_name_list = browser.find_elements_by_xpath("//span[@class='pymv4e']")
# use list comprehension to get the actual repo titles and not the selenium objects.
#upd_product_name_list=list(filter(None, product_name_list))
upd_product_name_list=list(filter(None, product_name_list))
product_name = [x.text for x in product_name_list]
print('Product names:')
print(product_name, '\n')
filtered = [x for x in product_name if len(x.strip()) > 0]
print(filtered)
element_length=(len(filtered))
print(element_length)
print("\n")
positions=[]
for x in range(1, element_length+1):
positions.append(x)
print(positions)
print("\n")
company_name_list = browser.find_elements_by_xpath("//div[@class='LbUacb']")
# use list comprehension to get the actual repo titles and not the selenium objects.
company = [x.text for x in company_name_list]
# print out all the titles.
print('Company Name:')
print(company, '\n')
urls=[]
find_href = browser.find_elements_by_xpath("//a[@class='plantl pla-unit-single-clickable-target clickable-card']")
for my_href in find_href:
url_list=my_href.get_attribute("href")
urls.append(url_list)
#print(my_href.get_attribute("href"))
print(urls)
print("\n")
result = zip(positions,filtered, urls, company)
print(tuple(result))
이 기사는 인터넷에서 수집됩니다. 재 인쇄 할 때 출처를 알려주십시오.
침해가 발생한 경우 연락 주시기 바랍니다[email protected] 삭제
몇 마디 만하겠습니다