を確認し、私のスクリプトはCC_transcriptのためですが、あなたはstocktwitsの任意のアカウントのためにそれを適用することができます:
###########################################################################
### This script is a web scraper for stocktwits. ###
## applied specifically on cc_transcripts . ###
### To use it you need first to install Python 3.5.2 on your computer. ###
### Install the module "Selenium" 3.1.1, and "chromedriver.exe" ###
###########################################################################
from selenium import webdriver
import sys
import time
from selenium.webdriver.common.keys import Keys
#only for Chrome, for firefox need another driver
print("Loading... Please wait")
Pathwebdriver="D:\\Programs\\Python\\Python35-32\\Scripts\\chromedriver.exe"
driver = webdriver.Chrome(Pathwebdriver)
#website to analyse
driver.get("https://stocktwits.com/cctranscripts?q=cctranscripts")
#Scrolling of the webpage
ScrollNumber=3
print(str(ScrollNumber)+ " scrolldown will be done.")
for i in range(1,ScrollNumber): #scroll down X times
print("Scrolling... #"+str(i))
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2) #Delay between 2 scrolls down to be sure the page loaded, 1s is too short some loading take longer
#retrieving source code
html_source = driver.page_source
data = str(html_source.encode('utf-8'))
#driver.close()#close of chrome to able the opwning of new windows and to save source code.
#Saving source code (in the same folder as this script)
SaveSource = False
if SaveSource:
text_file = open("SourceCode.html", "w")
text_file.write(data)
text_file.close()
#Analysis of the source code
PosScanning=1
GlobalList=[]
print("Processing data")
while data[PosScanning:].find("picked")>0:
PosPick=data[PosScanning:].find("picked") +PosScanning
List = [0, 0, 0, 0, 0, 0, 0] #Ticker,Nb of shares, Text of stocktwits,Link,Price of buying, Date, Text of CC_transcript
#Quote
dataBis=data[PosPick::-1] #reading the string backward
PosBegin=PosPick - dataBis.find(">") +1 #looking for the begining of the text
data=data[PosBegin:] #shortening the string each loop to increase the speed of processing
PosEnd=data.find("<")#looking for the end of the text
#print(data[PosBegin:PosEnd])
List[2]=data[:PosEnd].replace(","," ")
#Nb of shares
List[1]=List[2].split(' up', 1)[1]
List[1]=List[1].split('share', 1)[0]
List[1]=List[1].replace(" ","")
#link to the transcript
PosLinkBegin=data.find("href=")+6
PosLinkend=data.find("\"",PosLinkBegin,PosLinkBegin+3000)
#print(data[PosLinkBegin:PosLinkend])
List[3]=data[PosLinkBegin:PosLinkend]
#Symbol
PosSymbolBegin=data.find("data-symbol=")+13
PosSymbolEnd=data.find("\"",PosSymbolBegin,PosSymbolBegin+300)
#print(data[PosSymbolBegin:PosSymbolEnd])
List[0]=data[PosSymbolBegin:PosSymbolEnd]
#data-body, the "picked" is repeat 2 times, need to ignore it
PosBody1=data.find("picked",PosSymbolEnd,PosSymbolEnd+10000)+100
PosBody2=data.find("picked",PosBody1,PosBody1+10000)
PosScanning=PosBody2 +100
GlobalList.append(List)
#Opening Link to retrieve information
print("Opning links to retrieve detailed information form CC_transcript")
j=1
for item in GlobalList:
print("Retieving data: " +str(j)+"/"+str(len(GlobalList)))
driver.find_element_by_tag_name('body').send_keys(Keys.COMMAND + 't')#open tab
driver.get(item[3])
html_source2 = driver.page_source
data2 = str(html_source2.encode('utf-8'))
#text of CC_transcript
TextePos=data2.find("$(\"#meaning\").popover();")
item[6] = data2[TextePos+40:TextePos+1000].replace(","," ")
#price of Shares
BuyPos=item[6].find("place at")+10
BuyPosend=item[6][BuyPos:].find("share")+BuyPos +6
item[4]=item[6][BuyPos:BuyPosend]
#date
DatePos=item[6].find(" on ")
DatePosEnd=item[6][DatePos:].find(".")+DatePos
item[5]=item[6][DatePos+4:DatePosEnd]
j=j+1
driver.close()
#output of final data
print("Writting data in .csv file")
f = open('stocktwits.csv','w')
f.write("Ticker")
f.write(' , ')
f.write("Nb of shares")
f.write(' , ')
f.write("Text of stocktwits")
f.write(' , ')
f.write("Link")
f.write(' , ')
f.write("Price of buying")
f.write(' , ')
f.write("Date")
f.write(' , ')
f.write("Text of CC_transcript")
f.write('\n')
for item in GlobalList:
for elem in item:
f.write(elem)
f.write(' , ')# excel change of column
f.write('\n') # excel change of line
f.close()
time.sleep(5)
print("Done")
ちょっとエリック、あなたの答えをありがとう! "api.py"のコードを "wl = R()。get_json(ST_BASE_URL + 'watchlists/show/{} .json'.format(wl_id)、params = ST_BASE_PARAMS)に変更しました。" 次のエラーメッセージが表示されます: TypeError: "キーワード引数 'params'に複数の値があります" これをどう対処するのか本当に分かりません:/ – annach
私は簡単な変更を反映するように答えを更新しました。コードを設計どおりに動作させるために使用します。 –