このWebスクレーパーをPythonで作成し、PycharmでAnacondaで構築しています。私のスクリプトはうまくいきましたが、どういうわけか私はコンピュータで何かを混乱させてしまいましたが、今はこのエラーが発生しています。ImportError Python:名前ビューキーをインポートできません
はImportError:名前viewkeysをインポートすることはできません
また、私はアナコンダディレクトリ上に「機械化」モジュールをインストールすることができように見えることはできません。
以下は私のコードです。誰かがこれを解決するのを助けることができればそれはすばらしいだろう!エラーの
from bs4 import BeautifulSoup
import urllib
import numpy as np
#import selenium.webdriver as webdriver
import urllib
import requests
import mechanize
import urlparse
import lxml
import sys
import re
def get_Sample_URL(Sample_Name):
br = mechanize.Browser()
br.open("https://www.lpi.usra.edu/meteor/")
"SELECTING THE FORM"
br.select_form(nr=0)
# Entering in the sample name
br.form['sea'] = '%s' %Sample_Name
br.submit()
Link_List = list()
for i in br.links():
# Append the Base URL and the URL of the Sample
new_url = urlparse.urljoin(i.base_url, i.url)
# Insert URL's into a list
Link_List.append(new_url)
br.close()
if len(Link_List)>9999999999:
# print len(Link_List
print ("Can't find data for: "), Sample_Name, "Perhaps try searching this one manually"
#Manual_Search_File = np.loadtxt("/Users/edenmolina/Desktop/Metiorite_Manual_Search.txt", dtype='string', unpack=True
Sample_URL = 0
return Sample_URL
#9 is the mumber of links for a sample that is not in the online database#
elif len(Link_List)<=9:
#print len(Link_List)
#print ("No results found on database for: "), Sample_Name
Sample_URL = 0
return Sample_URL
else:
#print len(Link_List)
#print len(Link_List), Sample_Name
Sample_URL = Link_List[-4]
return Sample_URL
"""Get the Classification"""
def get_Classification(URL):
source = urllib.urlopen("%s"%URL).read()
soup = BeautifulSoup(source, "html.parser")
Classification_List = []
for k, td in enumerate(soup.find_all("td", {'class', 'inside'})):
Classification = td.find_all("b")
Classification_List.append(Classification)
#print Classification_List[3]
print (Classification_List[3][1].text)
"Define a fucntion that get the name, weathering, and shock stage of the sample"
def get_Name_Weathering_Shock(url):
"Get the source code from the website"
source = urllib.urlopen("%s" % url).read()
# Convert the data to a Beautiful Soup object
soup = BeautifulSoup(source, "html.parser")
"""
Print out the title of the webpage"""
#print soup.title.string
""""Getting the name"""
Name_List = []
for i, td in enumerate(soup.find_all("th", {'class', 'insidehead'})):
Name = td.find_all("b")
Name_List.append(Name)
print ("Name Check: ", Name_List[0][0].text)
# Get the data in the td subsections from the website
data = soup.find_all('tr')
# Getting the website data
Website_Data = list()
for tr in data:
td = tr.find_all('td')
row = [i.text for i in td]
Website_Data.append(row)
Weathering_Grade = [w for w in Website_Data if "Weathering grade:" in w]
Shock_Stage = [s for s in Website_Data if "Shock stage:" in s]
#Prints out the weathering and shock stage of the sample
try:
print (Weathering_Grade[1][1])
np.savetxt("/Users/edenmolina/Desktop/Meteorite Data/%s.txt" % (Name[0][0].text), Weathering_Grade[1][1])
except:
print ("No Weathering")
try:
print (Shock_Stage[1][1])
except:
print ("No Shock Stage")
def get_Info(url, weatheringOrshock):
"Get source code of website"
source = urllib.urlopen("%s" % url).read()
# Convert the data to a Beautiful Soup object
soup = BeautifulSoup(source, "html.parser")
data = soup.find_all('tr')
data = list()
for tr in data:
td = tr.find_all('td')
row = [i.text for i in td]
data.append(row)
information = [w for w in data if "%s" %weatheringOrshock in w]
try:
print (information[1][1])
return information[1][1]
except:
print ("No %s" %weatheringOrshock)
#get_SampleData("https://www.lpi.usra.edu/meteor/metbull.php?sea=NWA+001&sfor=names&ants=&falls=&valids=&stype=contains&lrec=50&map=ge&browse=&country=All&srt=name&categ=All&mblist=All&rect=&phot=&snew=0&pnt=Normal%20table&code=17011")
#AllData("NWA 002")
#LOAD THE SAMPLE NAMES FROM A TEXT FILE#
SampleNames_Text = np.loadtxt("/Users/edenmolina/Desktop/MetioriteNames.txt",delimiter="\n", dtype=np.str)
Number_of_Loops = len(SampleNames_Text)
"""FOR SAVING THE DATA"""
#Iterates through each of the samples in the text file and outputs the name, weathering, and the shock stage (if applicable)
for i in range(Number_of_Loops):
print (SampleNames_Text[i])
Sample_URL = get_Sample_URL("%s" % SampleNames_Text[i])
if Sample_URL == 0 or len(Sample_URL) < 80.0:
print ("")
elif len(Sample_URL)<80.0:
print ("Try Searching This Manually")
else:
Weathering = get_Info(Sample_URL, "Weathering grade:")
Shock = get_Info(Sample_URL, "Shock stage:")
Classification = get_Classification(Sample_URL)
URL = get_Sample_URL("%s" %SampleNames_Text[i])
print ("\n")
スタックトレース
Traceback (most recent call last):
File "/Users/edenmolina/PycharmProjects/Meteorite/DataBase_Data_Extractor_V3.py", line 163, in <module>
Sample_URL = get_Sample_URL("%s" % SampleNames_Text[i])
File "/Users/edenmolina/PycharmProjects/Meteorite/DataBase_Data_Extractor_V3.py", line 34, in get_Sample_URL
br.select_form(nr=0)
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/mechanize/_mechanize.py", line 619, in select_form
global_form = self._factory.global_form
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/mechanize/_html.py", line 260, in global_form
self.forms()
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/mechanize/_html.py", line 267, in forms
self._current_forms, self._current_global_form = self._get_forms()
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/mechanize/_html.py", line 282, in _get_forms
if self.root is None:
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/mechanize/_html.py", line 247, in root
response, verify=False))
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/mechanize/_html.py", line 145, in content_parser
from html5lib import parse
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/html5lib/__init__.py", line 16, in <module>
from .html5parser import HTMLParser, parse, parseFragment
File "/Users/edenmolina/Library/Python/2.7/lib/python/site-packages/html5lib/html5parser.py", line 2, in <module>
from six import with_metaclass, viewkeys, PY3
ImportError: cannot import name viewkeys
エラーのスタックトレースを送信してください。また、あなたのコードがPython開発者のためにもっとpythonに見えるように、あなたのPEP8(https://www.python.org/dev/peps/pep-0008/)pythonのコーディング標準を読むことをお勧めします。 –
@MedhatGayedエラーのスタックトレースを使ってポストを更新しました。 – Eden