0
大きなファイルを処理するためにpandasを使用していますが、get_chunkメソッドを使用しましたが、正しくロードされません。私は pandas read_table JSONデータで単一の列を指定します
def load_data():
reader = pd.read_table('/Users/fiz/Desktop/xad', iterator=True,encoding='utf-8')
loop = True
chunkSize = 10000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunkSize)
chunks.append(chunk)
print(chunk)
except StopIteration:
loop = False
print("Iteration is stopped.")
df = pd.concat(chunks, ignore_index=True)
を試してみました何
は与える:
{"_index":"pos_journals-2017.03.24","_type":"pos_journals","_id":"AVr_peEqanqtRFPuszUt","_score":null,"_source": {"message":"Chk 4040 Gst 0\r\n1005 Yangshuying PCWS02\r\nCE: 1005 CC: 0 TC: 0\r\nTrn 8704 24Mar'17 17:28\r\n--------------------------------\r\n To Go \r\n SR:6010381353541922\r\n SR:ONLINE\r\n 1 Mocha Ice V 37.00\r\n Cash 102.00\r\n Subtotal 37.00\r\n Paid 37.00\r\n Change Due 65.00\r\n================================","@version":"1","@timestamp":"2017-03-24T09:28:56.787Z","type":"pos_journals","count":1,"beat":{"hostname":"S24376","name":"S24376"},"source":"d:\\MICROS\\Res\\Pos\\Journals\\POS02.txt","offset":40621,"input_type":"log","fields":null,"host":"S24376","tags":["beats_input_codec_plain_applied"]},"fields":{"@timestamp":[1490347736787]},"highlight":{"message":["Chk 4040 Gst 0\r\n1005 Yangshuying PCWS02\r\nCE: 1005 CC: 0 TC: 0\r\nTrn 8704 24Mar'17 17:28\r\n--------------------------------\r\n To Go \r\n SR:6010381353541922\r\n SR:ONLINE\r\n 1 Mocha Ice V 37.00\r\n @[email protected]@/[email protected] 102.00\r\n Subtotal 37.00\r\n Paid 37.00\r\n Change Due 65.00\r\n================================"]},"sort":[1490347736787]}
0 {"_index":"pos_journals-2017.03.24","_type":"p...
1 {"_index":"pos_journals-2017.03.24","_type":"p...
2 {"_index":"pos_journals-2017.03.24","_type":"p...
3 {"_index":"pos_journals-2017.03.24","_type":"p...