我已经根据在这里的伟大民间人士提出的以下解决办法修改了该守则;我在这里看到了以下错误。
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from dmoz2.items import DmozItem
class DmozSpider(BaseSpider):
name = "namastecopy2"
allowed_domains = ["namastefoods.com"]
start_urls = [
"http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=1",
"http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=12",
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.select( /html/body/div/div[2]/table/tr/td[2]/table/tr )
items = []
for site in sites:
item = DmozItem()
item[ manufacturer ] = Namaste Foods
item[ productname ] = site.select( td/h1/text() ).extract()
item[ description ] = site.select( //*[@id="info-col"]/p[7]/strong/text() ).extract()
item[ ingredients ] = site.select( td[1]/table/tr/td[2]/text() ).extract()
item[ ninfo ] = site.select( td[2]/ul/li[3]/img/@src ).extract()
#insert code that will save the above image path for ninfo as an absolute path
base_url = get_base_url(response)
relative_url = site.select( //*[@id="showImage"]/@src ).extract()
item[ image_urls ] = urljoin_rfc(base_url, relative_url)
items.append(item)
return items
我的项目就是这样:
from scrapy.item import Item, Field
class DmozItem(Item):
# define the fields for your item here like:
productid = Field()
manufacturer = Field()
productname = Field()
description = Field()
ingredients = Field()
ninfo = Field()
imagename = Field()
image_paths = Field()
relative_images = Field()
image_urls = Field()
pass
我需要一个相对的道路,那就是,spi子正在为改用绝对道路和安放的物品[相对价值]提取;在物品中节省了[图像_urls],以便我能够从该间谍中下载图像。 例如,间谍网的相对价格途径是/.files/images/small/8270-BrowniesHiResClip.jpg,应转换为http://namastefoods.com/files/images/small/8270-BrowniesHiResClip.jpg, &储存在物品中[图像_urls]。
我还需要这些物品作为一条绝对的道路。
采用上述法典的错误:
2011-06-28 17:18:11-0400 [scrapy] INFO: Scrapy 0.12.0.2541 started (bot: dmoz2)
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled extensions: TelnetConsole, SpiderContext, WebService, CoreStats, CloseSpider
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled scheduler middlewares: DuplicatesFilterMiddleware
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled downloader middlewares: HttpAuthMiddleware, DownloadTimeoutMiddleware, UserAgentMiddleware, RetryMiddleware, DefaultHeadersMiddleware, RedirectMiddleware, CookiesMiddleware, HttpCompressionMiddleware, DownloaderStats
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled spider middlewares: HttpErrorMiddleware, OffsiteMiddleware, RefererMiddleware, UrlLengthMiddleware, DepthMiddleware
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Enabled item pipelines: MyImagesPipeline
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Telnet console listening on 0.0.0.0:6023
2011-06-28 17:18:11-0400 [scrapy] DEBUG: Web service listening on 0.0.0.0:6080
2011-06-28 17:18:11-0400 [namastecopy2] INFO: Spider opened
2011-06-28 17:18:12-0400 [namastecopy2] DEBUG: Crawled (200) <GET http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=12> (referer: None)
2011-06-28 17:18:12-0400 [namastecopy2] ERROR: Spider error processing <http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=12> (referer: <None>)
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 1137, in mainLoop
self.runUntilCurrent()
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 757, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 243, in callback
self._startRunCallbacks(result)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 312, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 328, in _runCallbacks
self.result = callback(self.result, *args, **kw)
File "/***/***/***/***/***/***/spiders/namaste_copy2.py", line 30, in parse
item[ image_urls ] = urljoin_rfc(base_url, relative_url)
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/url.py", line 37, in urljoin_rfc
unicode_to_str(ref, encoding))
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/python.py", line 96, in unicode_to_str
raise TypeError( unicode_to_str must receive a unicode or str object, got %s % type(text).__name__)
exceptions.TypeError: unicode_to_str must receive a unicode or str object, got list
2011-06-28 17:18:15-0400 [namastecopy2] DEBUG: Crawled (200) <GET http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=1> (referer: None)
2011-06-28 17:18:15-0400 [namastecopy2] ERROR: Spider error processing <http://www.namastefoods.com/products/cgi-bin/products.cgi?Function=show&Category_Id=4&Id=1> (referer: <None>)
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 1137, in mainLoop
self.runUntilCurrent()
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/base.py", line 757, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 243, in callback
self._startRunCallbacks(result)
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 312, in _startRunCallbacks
self._runCallbacks()
--- <exception caught here> ---
File "/System/Library/Frameworks/Python.framework/Versions/2.6/Extras/lib/python/twisted/internet/defer.py", line 328, in _runCallbacks
self.result = callback(self.result, *args, **kw)
File "/***/***/***/***/***/***/spiders/namaste_copy2.py", line 30, in parse
item[ image_urls ] = urljoin_rfc(base_url, relative_url)
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/url.py", line 37, in urljoin_rfc
unicode_to_str(ref, encoding))
File "/Library/Python/2.6/site-packages/Scrapy-0.12.0.2541-py2.6.egg/scrapy/utils/python.py", line 96, in unicode_to_str
raise TypeError( unicode_to_str must receive a unicode or str object, got %s % type(text).__name__)
exceptions.TypeError: unicode_to_str must receive a unicode or str object, got list
2 011-06-28 17:18:15-0400 [namastecopy2] INFO: Closing spider (finished)
2011-06-28 17:18:15-0400 [namastecopy2] INFO: Spider closed (finished)
感谢-TM