2017-02-11 74 views
-1
import requests 
from requests import Session 
from bs4 import BeautifulSoup 
import re 
from multiprocessing.dummy import Pool as ThreadPool 

#s = Session() 

def get_photo_from_page(): 
    tut = [] 
    r = requests.get('https://vk.com/uporols_you').text 
    soup = BeautifulSoup(r, 'lxml') 
    im = soup.find_all('img', class_="ph_img") 
    for a in im: 
     s = a.get('data-src_big').split('|')[0] 
     tut.append(s) 
    y = "img%s.jpg" 
    for t, im in tut, [y % i for i in range(1,5)]: 
     p = requests.get(t) 
     out = open(im, "wb") 
     out.write(p.content) 
     out.close() 

def main(): 
    get_photo_from_page() 

if __name__ == '__main__': 
    main() 

error from cmd for t, im in tut, [y % i for i in range(1,5)]: ValueError: too many values to unpack (expected 2) > I need to list with a 1 to 1 accrue to URL, and on passage possylke, and save all images with the new name, in separate cycles, it always takes the last available reference and stores it as the number of times indicated in the cycle.错误与周期,并请Python3解析

回答

0
import requests 
from requests import Session 
from bs4 import BeautifulSoup 
import re 
from multiprocessing.dummy import Pool as ThreadPool 

#s = Session() 

def get_photo_from_page(): 
    tut = [] 
    r = requests.get('https://m.vk.com/uporols_you').text 
    soup = BeautifulSoup(r, 'lxml') 
    im = soup.find_all('img', class_="ph_img") 
    try: 
     for a in im: 
      s = a.get('data-src_big').split('|')[0] 
      tut.append(s) 
     print(tut) 
    except: 
     print('no have any links)') 

    for num, link in enumerate(tut, start=1): 
     p = requests.get(link) 
     out = open("img%s.jpg" % (num), 'wb') 
     out.write(p.content) 
     out.close() 

def main(): 
    get_photo_from_page() 

if __name__ == '__main__': 
    main()